-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathForm1.cs
More file actions
876 lines (735 loc) · 34.2 KB
/
Form1.cs
File metadata and controls
876 lines (735 loc) · 34.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
using System.Diagnostics;
using System.Text.Json; // Import for Debug.WriteLine functionality
namespace LMStudioExampleFormApp
{
public partial class Form1 : Form
{
// Main AI client that handles communication with the LLM API
private LMStudioExample _aiClient;
// Cancellation token source to allow canceling requests
private CancellationTokenSource? _cts;
public Form1()
{
// Initialize the form components defined in the designer
InitializeComponent();
// Create the AI client with endpoint URL, model name, and system prompt
//_aiClient = new LMStudioExampleFormApp.two.LMStudioExample(
_aiClient = new LMStudioExample(
"http://localhost:1234/v1/chat/completions", // API endpoint URL
"lfm2-vl-1.6b", // Model to use
//"openai/gpt-oss-20b", // Model to use
"you are a professional assistant" // System instructions
);
_aiClient.initialize();
}
private void Form1_Load(object sender, EventArgs e)
{
// Set up event handlers when the form loads
// These handle different stages of the AI response process
_aiClient.OnContentReceived += AiClient_OnContentReceived; // Called when content chunks arrive
_aiClient.OnComplete += AiClient_OnComplete; // Called when the entire response is complete
_aiClient.OnError += AiClient_OnError; // Called when an error occurs
_aiClient.OnStatusUpdate += AiClient_OnStatusUpdate; // Called when status changes
}
private async void btnSend_Click(object sender, EventArgs e)
{
// Handle the non-streaming send button click
await SendMessage(streaming: false);
}
private async void btnSendNonStreaming_Click(object sender, EventArgs e)
{
// Text only (backward compatible)
// await _aiClient.SendMessageAsync("Hello, how are you?");
// Text with single image
//await _aiClient.SendMessageWithImagesAsync("What do you see in this image?",
// new[] { @"C:\Users\Administrator\Pictures\aug.jpg" });
// Text with multiple images
// await _aiClient.SendMessageWithImagesAsync("Compare these images",
// new[] { @"C:\Users\Administrator\Pictures\cap.jpg", @"C:\Users\Administrator\Pictures\cap.png" });
//// Non-streaming with images
//await _aiClient.SendMessageWithImagesNonStreamingAsync("Analyze this photo and tell me what you see.",
// new[] { @"C:\Users\Administrator\Pictures\cap.png" });
// Handle the streaming send button click
// Note: This appears to be mislabeled - it's setting streaming to true
await SendMessage(streaming: true);
}
private void btnCancel_Click(object sender, EventArgs e)
{
// Cancel the current request if there is one
_cts?.Cancel(); // Trigger cancellation
btnSend.Enabled = true; // Re-enable the send buttons
btnSendNonStreaming.Enabled = true;
Debug.WriteLine("Request canceled by user"); // Log cancellation
}
private async Task SendMessage(bool streaming)
{
// Get the user's message from the text box
string userMessage = txtPrompt.Text.Trim();
if (string.IsNullOrEmpty(userMessage))
return; // Don't do anything if message is empty
ChatMessage($"\nUser:{userMessage}\n");
// if (streaming)
// {
ChatMessage($"Assistant:");
// }
string response = "";
// Clear the UI elements for the new response
txtPrompt.Clear();
// txtResponse.Clear();
// Update button states
btnCancel.Enabled = true; // Enable cancel button
btnSend.Enabled = false; // Disable send buttons while processing
btnSendNonStreaming.Enabled = false;
// Cancel any existing request and create a new cancellation token
_cts?.Cancel();
_cts = new CancellationTokenSource();
try
{
// Run the API call on a background thread to avoid UI freezing
await Task.Run(async () =>
{
if (streaming)
{
// Use streaming mode - content will come in chunks via events
_aiClient.SetTimeout(15); // Set a short timeout for streaming requests
response = await _aiClient.SendMessageAsync(userMessage, _cts.Token);
}
else
{
// Use non-streaming mode - get the complete response at once
_aiClient.SetTimeout(200); // Set a long timeout for non-streaming requests.
response = await _aiClient.SendMessageNonStreamingAsync(userMessage, _cts.Token);
ChatMessage($"Assistant:{response}\n");
// Update the UI with the full response
}
});
}
catch (Exception ex)
{
// Handle any exceptions that occur during the request
Debug.WriteLine($"Error sending message: {ex.Message}");
txtResponse.Text = $"Error: {ex.Message}";
btnCancel.Enabled = false;
}
finally
{
// Re-enable send buttons regardless of success or failure
btnSend.Enabled = true;
btnSendNonStreaming.Enabled = true;
}
}
private void ChatMessage(string Message)
{
// Helper method to update the response text box from any thread
if (InvokeRequired)
{
// If called from a non-UI thread, use Invoke to switch to the UI thread
Invoke(new Action(() => ChatMessage(Message)));
return;
}
// Update the text box with the complete message
txtResponse.Text += $"{Message}";
Debug.WriteLine($"ChatMessage: {Message}");
}
private void AiClient_OnStatusUpdate(object? sender, string e)
{
// Handle status update events from the AI client
if (InvokeRequired)
{
// Ensure we're on the UI thread
Invoke(new Action(() => AiClient_OnStatusUpdate(sender, e)));
return;
}
// Update the form title with the status message
this.Text = e;
Debug.WriteLine($"Status updated: {e}");
}
private void AiClient_OnError(object? sender, Exception e)
{
// Handle error events from the AI client
if (InvokeRequired)
{
// Ensure we're on the UI thread
Invoke(new Action(() => AiClient_OnError(sender, e)));
return;
}
// Display the error message in the response area
txtResponse.AppendText($"\r\n\r\nError: {e.Message}");
// Update UI state
btnSend.Enabled = true;
btnSendNonStreaming.Enabled = true;
btnCancel.Enabled = false;
Debug.WriteLine($"Error occurred: {e.Message}");
this.Text = "LMStudio Example";
}
private void AiClient_OnComplete(object? sender, string e)
{
// Handle completion events from the AI client
if (InvokeRequired)
{
// Ensure we're on the UI thread
Invoke(new Action(() => AiClient_OnComplete(sender, e)));
return;
}
// Add a new line to the response area
// txtResponse.AppendText("\r\n");
// Update UI state
btnSend.Enabled = true;
btnSendNonStreaming.Enabled = true;
btnCancel.Enabled = false;
_cts = null; // Clear the cancellation token
Debug.WriteLine("Response completed");
this.Text = "LMStudio Example";
}
private void AiClient_OnContentReceived(object? sender, string e)
{
// Handle content received events from the AI client (streaming mode)
if (InvokeRequired)
{
// Ensure we're on the UI thread
Invoke(new Action(() => AiClient_OnContentReceived(sender, e)));
return;
}
// Enable the cancel button while content is streaming
btnCancel.Enabled = true;
// Append the new content chunk to the response area
txtResponse.AppendText(e);
Debug.WriteLine($"Content received: {e}");
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e)
{
_cts?.Cancel(); // Cancel any pending requests
_aiClient.Dispose(); // Dispose the AI client to release resources
}
private void txtResponse_TextChanged(object sender, EventArgs e)
{
txtResponse.SelectionStart = txtResponse.Text.Length;
txtResponse.SelectionLength = 0;
txtResponse.ScrollToCaret();
}
private async Task GetEmbedding()
{
try
{
string textToEmbed = txtPrompt.Text.Trim();
if (string.IsNullOrEmpty(textToEmbed))
{
MessageBox.Show("Please enter some text to embed");
return;
}
// Get embedding for a single text
var embedding = await _aiClient.GetEmbeddingAsync(
textToEmbed,
"text-embedding-qwen3-embedding-0.6b"
);
if (embedding != null)
{
txtResponse.AppendText($"Embedding generated successfully!\n");
txtResponse.AppendText($"Dimensions: {embedding.Length}\n");
//txtResponse.AppendText($"First 10 values: [{string.Join(", ", embedding.Take(10).Select(v => v.ToString("F6")))}...]\n");
txtResponse.AppendText($"[{string.Join(", ", embedding.Select(v => v.ToString("F6")))}...]\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error getting embedding: {ex.Message}");
Debug.WriteLine($"Embedding error: {ex}");
}
}
private async Task BatchEmbeddings()
{
try
{
// Example: Get embeddings for multiple texts at once
string[] textsToEmbed = new[]
{
"Machine learning is fascinating",
"Artificial intelligence is the future",
"Deep learning uses neural networks",
"Natural language processing enables chatbots"
};
txtResponse.AppendText("Processing batch embeddings...\n\n");
var embeddings = await _aiClient.GetEmbeddingsBatchAsync(textsToEmbed);
if (embeddings != null)
{
txtResponse.AppendText($"Generated {embeddings.Length} embeddings\n\n");
for (int i = 0; i < textsToEmbed.Length; i++)
{
txtResponse.AppendText($"Text {i + 1}: \"{textsToEmbed[i]}\"\n");
txtResponse.AppendText($" Embedding dimensions: {embeddings[i].Length}\n");
//txtResponse.AppendText($" First 5 values: [{string.Join(", ", embeddings[i].Take(5).Select(v => v.ToString("F4")))}...]\n\n");
txtResponse.AppendText($"[{string.Join(", ", embeddings[i].Select(v => v.ToString("F4")))}...]\n\n");
}
// Calculate and display similarity matrix
txtResponse.AppendText("\nSimilarity Matrix:\n");
txtResponse.AppendText(" ");
for (int i = 0; i < embeddings.Length; i++)
{
txtResponse.AppendText($"Text{i + 1} ");
}
txtResponse.AppendText("\n");
for (int i = 0; i < embeddings.Length; i++)
{
txtResponse.AppendText($"Text{i + 1} ");
for (int j = 0; j < embeddings.Length; j++)
{
float similarity = LMStudioExample.CalculateCosineSimilarity(
embeddings[i],
embeddings[j]
);
txtResponse.AppendText($"{similarity:F3} ");
}
txtResponse.AppendText("\n");
}
}
}
catch (Exception ex)
{
MessageBox.Show($"Error with batch embeddings: {ex.Message}");
Debug.WriteLine($"Batch embeddings error: {ex}");
}
}
private async Task CompareSimilarity()
{
try
{
// Example: Compare similarity between two sentences
string text1 = "The cat sat on the mat";
string text2 = "A feline rested on the rug";
string text3 = "Python is a programming language";
txtResponse.AppendText("Calculating embeddings and similarity...\n\n");
// Get embeddings for all three texts
var embedding1 = await _aiClient.GetEmbeddingAsync(text1);
var embedding2 = await _aiClient.GetEmbeddingAsync(text2);
var embedding3 = await _aiClient.GetEmbeddingAsync(text3);
if (embedding1 != null && embedding2 != null && embedding3 != null)
{
// Calculate cosine similarities
float similarity1_2 = LMStudioExample.CalculateCosineSimilarity(embedding1, embedding2);
float similarity1_3 = LMStudioExample.CalculateCosineSimilarity(embedding1, embedding3);
float similarity2_3 = LMStudioExample.CalculateCosineSimilarity(embedding2, embedding3);
txtResponse.AppendText($"Text 1: \"{text1}\"\n");
txtResponse.AppendText($"Text 2: \"{text2}\"\n");
txtResponse.AppendText($"Text 3: \"{text3}\"\n\n");
txtResponse.AppendText($"Similarity between Text 1 and Text 2: {similarity1_2:F4}\n");
txtResponse.AppendText($"Similarity between Text 1 and Text 3: {similarity1_3:F4}\n");
txtResponse.AppendText($"Similarity between Text 2 and Text 3: {similarity2_3:F4}\n\n");
txtResponse.AppendText("Note: Values closer to 1.0 indicate higher similarity\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error comparing similarity: {ex.Message}");
Debug.WriteLine($"Similarity comparison error: {ex}");
}
}
private async Task SemanticSearch()
{
try
{
string query = txtPrompt.Text.Trim();
if (string.IsNullOrEmpty(query))
{
MessageBox.Show("Please enter a search query");
return;
}
// Sample document collection
string[] documents = new[]
{
"The quick brown fox jumps over the lazy dog",
"Machine learning models require training data",
"Python is a popular programming language",
"Natural language processing enables computers to understand text",
"The weather today is sunny and warm",
"Deep neural networks can learn complex patterns"
};
txtResponse.AppendText($"Searching for: \"{query}\"\n\n");
txtResponse.AppendText("Processing documents...\n");
// Get embedding for query
var queryEmbedding = await _aiClient.GetEmbeddingAsync(query);
// Get embeddings for all documents
var documentEmbeddings = await _aiClient.GetEmbeddingsBatchAsync(documents);
if (queryEmbedding != null && documentEmbeddings != null)
{
// Calculate similarities and rank results
var results = new List<(string Document, float Similarity)>();
for (int i = 0; i < documents.Length; i++)
{
float similarity = LMStudioExample.CalculateCosineSimilarity(
queryEmbedding,
documentEmbeddings[i]
);
results.Add((documents[i], similarity));
}
// Sort by similarity (descending)
results = results.OrderByDescending(r => r.Similarity).ToList();
txtResponse.AppendText("\nSearch Results (ranked by relevance):\n\n");
for (int i = 0; i < results.Count; i++)
{
txtResponse.AppendText($"{i + 1}. [{results[i].Similarity:F4}] {results[i].Document}\n");
}
}
}
catch (Exception ex)
{
MessageBox.Show($"Error during semantic search: {ex.Message}");
Debug.WriteLine($"Semantic search error: {ex}");
}
}
private async Task ListAllModels()
{
try
{
txtResponse.Text = "Fetching all models...\n\n";
var models = await _aiClient.GetAllModelsAsync();
if (models != null && models.Length > 0)
{
txtResponse.AppendText($"Found {models.Length} total models:\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n\n");
foreach (var model in models)
{
txtResponse.AppendText($"Model ID: {model.Id}\n");
txtResponse.AppendText($" Type: {model.Type}\n");
txtResponse.AppendText($" Publisher: {model.Publisher}\n");
txtResponse.AppendText($" Architecture: {model.Arch}\n");
txtResponse.AppendText($" Quantization: {model.Quantization}\n");
txtResponse.AppendText($" State: {model.State}\n");
txtResponse.AppendText($" Max Context: {model.MaxContextLength:N0} tokens\n");
txtResponse.AppendText($" Compatibility: {model.CompatibilityType}\n");
txtResponse.AppendText("\n");
}
}
else
{
txtResponse.AppendText("No models found.\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error listing models: {ex.Message}");
Debug.WriteLine($"List models error: {ex}");
}
}
private async Task ListLoadedModels()
{
try
{
txtResponse.Text = "Fetching loaded models...\n\n";
var loadedModels = await _aiClient.GetLoadedModelsAsync();
if (loadedModels != null && loadedModels.Length > 0)
{
txtResponse.AppendText($"Currently loaded models: {loadedModels.Length}\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n\n");
foreach (var model in loadedModels)
{
txtResponse.AppendText($"✓ {model.Id}\n");
txtResponse.AppendText($" Type: {model.Type} | Quantization: {model.Quantization}\n");
txtResponse.AppendText($" Max Context: {model.MaxContextLength:N0} tokens\n");
txtResponse.AppendText("\n");
}
}
else
{
txtResponse.AppendText("No models are currently loaded.\n");
txtResponse.AppendText("Load a model in LM Studio to use it.\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error listing loaded models: {ex.Message}");
Debug.WriteLine($"List loaded models error: {ex}");
}
}
private async Task GetModelInfo()
{
try
{
// Get model ID from user input (could be from txtPrompt or a separate textbox)
string modelId = txtPrompt.Text.Trim();
if (string.IsNullOrEmpty(modelId))
{
MessageBox.Show("Please enter a model ID in the prompt field.\nExample: qwen2-vl-7b-instruct");
return;
}
txtResponse.Text = $"Fetching info for model: {modelId}...\n\n";
var modelInfo = await _aiClient.GetModelInfoAsync(modelId);
if (modelInfo != null)
{
txtResponse.AppendText("Model Information\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n\n");
txtResponse.AppendText($"Model ID: {modelInfo.Id}\n");
txtResponse.AppendText($"Type: {modelInfo.Type}\n");
txtResponse.AppendText($"Publisher: {modelInfo.Publisher}\n");
txtResponse.AppendText($"Architecture: {modelInfo.Arch}\n");
txtResponse.AppendText($"Quantization: {modelInfo.Quantization}\n");
txtResponse.AppendText($"State: {modelInfo.State} {(modelInfo.IsLoaded ? "✓" : "✗")}\n");
txtResponse.AppendText($"Max Context Length: {modelInfo.MaxContextLength:N0} tokens\n");
txtResponse.AppendText($"Compatibility Type: {modelInfo.CompatibilityType}\n\n");
// Additional info based on model type
if (modelInfo.IsEmbeddingModel)
{
txtResponse.AppendText("📊 This is an embedding model for vector representations.\n");
}
else if (modelInfo.IsVisionModel)
{
txtResponse.AppendText("👁️ This is a vision-language model that can process images.\n");
}
else if (modelInfo.IsLanguageModel)
{
txtResponse.AppendText("💬 This is a text-only language model.\n");
}
}
}
catch (Exception ex)
{
MessageBox.Show($"Error getting model info: {ex.Message}");
Debug.WriteLine($"Get model info error: {ex}");
}
}
private async Task ListModelsByType()
{
try
{
txtResponse.Text = "Categorizing models by type...\n\n";
// Get all models categorized by type
var embeddingModels = await _aiClient.GetEmbeddingModelsAsync();
var languageModels = await _aiClient.GetLanguageModelsAsync();
var visionModels = await _aiClient.GetVisionModelsAsync();
// Display Embedding Models
txtResponse.AppendText("📊 EMBEDDING MODELS\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n");
if (embeddingModels != null && embeddingModels.Length > 0)
{
foreach (var model in embeddingModels)
{
string status = model.IsLoaded ? "✓ LOADED" : "○ Not loaded";
txtResponse.AppendText($"{status} | {model.Id} ({model.Quantization})\n");
}
}
else
{
txtResponse.AppendText("No embedding models found.\n");
}
txtResponse.AppendText("\n");
// Display Language Models
txtResponse.AppendText("💬 LANGUAGE MODELS (LLMs)\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n");
if (languageModels != null && languageModels.Length > 0)
{
foreach (var model in languageModels)
{
string status = model.IsLoaded ? "✓ LOADED" : "○ Not loaded";
txtResponse.AppendText($"{status} | {model.Id} ({model.Quantization})\n");
txtResponse.AppendText($" Context: {model.MaxContextLength:N0} tokens\n");
}
}
else
{
txtResponse.AppendText("No language models found.\n");
}
txtResponse.AppendText("\n");
// Display Vision Models
txtResponse.AppendText("👁️ VISION-LANGUAGE MODELS (VLMs)\n");
txtResponse.AppendText("=".PadRight(80, '=') + "\n");
if (visionModels != null && visionModels.Length > 0)
{
foreach (var model in visionModels)
{
string status = model.IsLoaded ? "✓ LOADED" : "○ Not loaded";
txtResponse.AppendText($"{status} | {model.Id} ({model.Quantization})\n");
txtResponse.AppendText($" Context: {model.MaxContextLength:N0} tokens\n");
}
}
else
{
txtResponse.AppendText("No vision models found.\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error categorizing models: {ex.Message}");
Debug.WriteLine($"Categorize models error: {ex}");
}
}
private async Task CreateModelSelector()
{
try
{
// This example shows how to populate a ComboBox with available models
// Useful for letting users select which model to use
var models = await _aiClient.GetAllModelsAsync();
if (models != null && models.Length > 0)
{
// Create a form with a combo box to select models
Form modelSelectorForm = new Form
{
Text = "Select a Model",
Width = 500,
Height = 200,
StartPosition = FormStartPosition.CenterParent
};
ComboBox comboBox = new ComboBox
{
Left = 20,
Top = 20,
Width = 440,
DropDownStyle = ComboBoxStyle.DropDownList
};
// Populate combo box with models
foreach (var model in models)
{
string displayText = $"{model.Id} ({model.Type}, {model.State})";
comboBox.Items.Add(new { Text = displayText, Model = model });
}
comboBox.DisplayMember = "Text";
comboBox.SelectedIndex = 0;
Button selectButton = new Button
{
Text = "Select Model",
Left = 20,
Top = 60,
Width = 100
};
selectButton.Click += (s, ev) =>
{
if (comboBox.SelectedItem != null)
{
dynamic selectedItem = comboBox.SelectedItem;
ModelInfo selectedModel = selectedItem.Model;
txtResponse.Text = $"Selected Model: {selectedModel.Id}\n";
txtResponse.AppendText($"Type: {selectedModel.Type}\n");
txtResponse.AppendText($"State: {selectedModel.State}\n");
txtResponse.AppendText($"Max Context: {selectedModel.MaxContextLength:N0} tokens\n");
modelSelectorForm.Close();
}
};
modelSelectorForm.Controls.Add(comboBox);
modelSelectorForm.Controls.Add(selectButton);
modelSelectorForm.ShowDialog();
}
else
{
MessageBox.Show("No models available.");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error creating model selector: {ex.Message}");
Debug.WriteLine($"Model selector error: {ex}");
}
}
// Example: Check if a specific model type is available and loaded
private async Task<bool> IsEmbeddingModelAvailable()
{
try
{
var embeddingModels = await _aiClient.GetEmbeddingModelsAsync();
var loadedEmbeddingModel = embeddingModels?.FirstOrDefault(m => m.IsLoaded);
if (loadedEmbeddingModel != null)
{
Debug.WriteLine($"Embedding model available: {loadedEmbeddingModel.Id}");
return true;
}
else
{
Debug.WriteLine("No embedding model is currently loaded");
return false;
}
}
catch (Exception ex)
{
Debug.WriteLine($"Error checking embedding model: {ex.Message}");
return false;
}
}
// Example: Validate that required models are loaded before operation
private async Task SmartOperation()
{
try
{
txtResponse.Text = "Checking available models...\n\n";
var loadedModels = await _aiClient.GetLoadedModelsAsync();
if (loadedModels == null || loadedModels.Length == 0)
{
txtResponse.AppendText("❌ No models are loaded!\n");
txtResponse.AppendText("Please load a model in LM Studio first.\n");
return;
}
// Check for specific model types
bool hasLLM = loadedModels.Any(m => m.IsLanguageModel);
bool hasEmbedding = loadedModels.Any(m => m.IsEmbeddingModel);
bool hasVision = loadedModels.Any(m => m.IsVisionModel);
txtResponse.AppendText("Available capabilities:\n");
txtResponse.AppendText($" {(hasLLM ? "✓" : "✗")} Text Generation (LLM)\n");
txtResponse.AppendText($" {(hasEmbedding ? "✓" : "✗")} Embeddings\n");
txtResponse.AppendText($" {(hasVision ? "✓" : "✗")} Vision Understanding (VLM)\n\n");
if (hasLLM)
{
txtResponse.AppendText("Ready to chat! You can ask questions.\n");
}
if (hasEmbedding)
{
txtResponse.AppendText("Ready for semantic search and embeddings!\n");
}
if (hasVision)
{
txtResponse.AppendText("Ready to analyze images!\n");
}
}
catch (Exception ex)
{
MessageBox.Show($"Error checking models: {ex.Message}");
Debug.WriteLine($"Smart operation error: {ex}");
}
}
private async void embeddingsToolStripMenuItem_Click(object sender, EventArgs e)
{
await GetEmbedding();
await BatchEmbeddings();
await CompareSimilarity();
await SemanticSearch();
}
private async void visionToolStripMenuItem_Click(object sender, EventArgs e)
{
// Send text with multiple images
await _aiClient.SendMessageWithImagesAsync(
"What do you see?",
new[] { @"C:\Users\Administrator\Pictures\aug.jpg", @"C:\Users\Administrator\Pictures\cap.jpg" }
);
// Send text with single image
await _aiClient.SendMessageWithImagesAsync(
"What do you see?",
new[] { @"C:\Users\Administrator\Pictures\aug.jpg" }
);
}
private async void listAllModelsToolStripMenuItem_Click(object sender, EventArgs e)
{
await ListAllModels();
}
private async void listLoadedModelsToolStripMenuItem_Click(object sender, EventArgs e)
{
await ListLoadedModels();
}
private async void getModelInfoToolStripMenuItem_Click(object sender, EventArgs e)
{
await GetModelInfo();
}
private async void listModelsByTypeToolStripMenuItem_Click(object sender, EventArgs e)
{
await ListModelsByType();
}
private async void createModelSelectorToolStripMenuItem_Click(object sender, EventArgs e)
{
await CreateModelSelector();
}
private async void isEmbeddingModelAvailableToolStripMenuItem_Click(object sender, EventArgs e)
{
var a = await IsEmbeddingModelAvailable();
MessageBox.Show($"Is embedding model available? {a}");
}
private async void smartOperationToolStripMenuItem_Click(object sender, EventArgs e)
{
await SmartOperation();
}
}
}