Jofthomas commited on
Commit
0e59015
·
verified ·
1 Parent(s): 18c2241

Upload 15 files

Browse files
README.md CHANGED
@@ -1,12 +1,12 @@
1
  ---
2
  title: Ministrals Demo
3
- emoji: 🐢
4
- colorFrom: indigo
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 6.0.2
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Ministrals Demo
3
+ emoji: 💻
4
+ colorFrom: yellow
5
+ colorTo: red
6
  sdk: gradio
7
+ sdk_version: 5.50.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,874 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from http import HTTPStatus
3
+ import os
4
+ import uuid
5
+ import time
6
+ from datetime import datetime, timedelta
7
+ import torch
8
+ import gradio as gr
9
+ from gradio_client import utils as client_utils
10
+ import modelscope_studio.components.antd as antd
11
+ import modelscope_studio.components.antdx as antdx
12
+ import modelscope_studio.components.base as ms
13
+ import modelscope_studio.components.pro as pro
14
+ from config import DEFAULT_THEME, LIGHT_THEME, DEFAULT_SYS_PROMPT, save_history, user_config, bot_config, welcome_config, markdown_config, upload_config, MINISTRAL_MODELS
15
+ from ui_components.thinking_button import ThinkingButton
16
+ import spaces
17
+
18
+ try:
19
+ from transformers import Mistral3ForConditionalGeneration, AutoProcessor, AutoTokenizer, TextIteratorStreamer
20
+ from huggingface_hub import hf_hub_download
21
+ from threading import Thread
22
+ TRANSFORMERS_AVAILABLE = True
23
+ except ImportError:
24
+ TRANSFORMERS_AVAILABLE = False
25
+ print("Warning: transformers not available. Running in demo mode only.")
26
+
27
+ MODEL_CACHE = {}
28
+ SYSTEM_PROMPT_CACHE = {}
29
+ PROCESSOR_CACHE = {}
30
+
31
+ print("=" * 50)
32
+ print("🚀 Ministral Demo Starting")
33
+ print(f" Model cache initialized (empty)")
34
+ print(f" Processor cache initialized (empty)")
35
+ print(f" System prompt cache initialized (empty)")
36
+ print("=" * 50)
37
+
38
+ def log_with_time(message: str):
39
+ timestamp = datetime.now().strftime("%H:%M:%S.%f")[:-3]
40
+ print(f"[{timestamp}] {message}")
41
+
42
+ def log_cache_status():
43
+ log_with_time(f"📦 Cache status: {len(MODEL_CACHE)} models, {len(PROCESSOR_CACHE)} processors, {len(SYSTEM_PROMPT_CACHE)} prompts cached")
44
+
45
+ def load_system_prompt(model_id: str) -> str:
46
+ cache_key = model_id
47
+ if cache_key in SYSTEM_PROMPT_CACHE:
48
+ log_with_time(f"📋 System prompt cache hit for {model_id.split('/')[-1]}")
49
+ cached_prompt = SYSTEM_PROMPT_CACHE[cache_key]
50
+ today = datetime.today().strftime("%Y-%m-%d")
51
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
52
+ model_name = model_id.split("/")[-1]
53
+ return cached_prompt.format(name=model_name, today=today, yesterday=yesterday)
54
+
55
+ try:
56
+ log_with_time(f"📥 Downloading system prompt for {model_id.split('/')[-1]}...")
57
+ start = time.time()
58
+ file_path = hf_hub_download(repo_id=model_id, filename="SYSTEM_PROMPT.txt")
59
+ with open(file_path, "r") as file:
60
+ system_prompt = file.read()
61
+
62
+ SYSTEM_PROMPT_CACHE[cache_key] = system_prompt
63
+
64
+ today = datetime.today().strftime("%Y-%m-%d")
65
+ yesterday = (datetime.today() - timedelta(days=1)).strftime("%Y-%m-%d")
66
+ model_name = model_id.split("/")[-1]
67
+ log_with_time(f"✅ System prompt loaded in {time.time() - start:.2f}s")
68
+ return system_prompt.format(name=model_name, today=today, yesterday=yesterday)
69
+ except Exception as e:
70
+ log_with_time(f"⚠️ Could not load system prompt: {e}")
71
+ return DEFAULT_SYS_PROMPT
72
+
73
+ def get_processor_and_tokenizer(model_id: str):
74
+ if model_id in PROCESSOR_CACHE:
75
+ log_with_time(f"📋 Processor cache hit for {model_id.split('/')[-1]}")
76
+ return PROCESSOR_CACHE[model_id]
77
+
78
+ try:
79
+ log_with_time(f"📥 Loading processor for {model_id.split('/')[-1]}...")
80
+ start = time.time()
81
+ processor = AutoProcessor.from_pretrained(model_id)
82
+ tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
83
+ PROCESSOR_CACHE[model_id] = (processor, tokenizer)
84
+ log_with_time(f"✅ Processor loaded in {time.time() - start:.2f}s")
85
+ return processor, tokenizer
86
+ except Exception as e:
87
+ log_with_time(f"❌ Error loading processor: {e}")
88
+ return None, None
89
+
90
+ def get_model_and_processor(model_id: str, show_notification=False):
91
+ if not TRANSFORMERS_AVAILABLE:
92
+ log_with_time("⚠️ Transformers not available")
93
+ return None, None, None
94
+
95
+ if model_id in MODEL_CACHE:
96
+ log_with_time(f"📋 Model cache hit for {model_id.split('/')[-1]}")
97
+ return MODEL_CACHE[model_id]
98
+
99
+ model_name = model_id.split("/")[-1]
100
+
101
+ try:
102
+ if show_notification:
103
+ gr.Info(f"📥 Loading {model_name}... This may take a few minutes on first use.", duration=15)
104
+
105
+ total_start = time.time()
106
+
107
+ processor, tokenizer = get_processor_and_tokenizer(model_id)
108
+ if processor is None or tokenizer is None:
109
+ return None, None, None
110
+
111
+ log_with_time(f"📥 Loading model weights for {model_name}...")
112
+ model_start = time.time()
113
+ model = Mistral3ForConditionalGeneration.from_pretrained(
114
+ model_id,
115
+ torch_dtype=torch.bfloat16,
116
+ device_map="auto"
117
+ ).eval()
118
+ log_with_time(f"✅ Model weights loaded in {time.time() - model_start:.2f}s")
119
+
120
+ MODEL_CACHE[model_id] = (model, processor, tokenizer)
121
+ total_time = time.time() - total_start
122
+ log_with_time(f"🎉 {model_name} fully loaded in {total_time:.2f}s (cached for future use)")
123
+
124
+ if show_notification:
125
+ gr.Info(f"✅ {model_name} loaded and ready!", duration=3)
126
+
127
+ return model, processor, tokenizer
128
+ except Exception as e:
129
+ log_with_time(f"❌ Error loading model {model_id}: {e}")
130
+ if show_notification:
131
+ gr.Warning(f"❌ Failed to load model: {str(e)}", duration=10)
132
+ return None, None, None
133
+
134
+ def encode_file_to_base64(file_path):
135
+ with open(file_path, "rb") as file:
136
+ mime_type = client_utils.get_mimetype(file_path)
137
+ bae64_data = base64.b64encode(file.read()).decode("utf-8")
138
+ return f"data:{mime_type};base64,{bae64_data}"
139
+
140
+ def format_history_for_transformers(history, model_id):
141
+ system_prompt = load_system_prompt(model_id)
142
+
143
+ messages = [{
144
+ "role": "system",
145
+ "content": [{"type": "text", "text": system_prompt}]
146
+ }]
147
+
148
+ for item in history:
149
+ if item["role"] == "user":
150
+ content = []
151
+ text_content = item["content"][1]["content"]
152
+ content.append({"type": "text", "text": text_content})
153
+
154
+ for file_path in item["content"][0]["content"]:
155
+ if file_path.startswith("http"):
156
+ content.append({"type": "image", "url": file_path})
157
+ elif os.path.exists(file_path):
158
+ mime_type = client_utils.get_mimetype(file_path)
159
+ if mime_type.startswith("image"):
160
+ content.append({"type": "image", "url": file_path})
161
+
162
+ messages.append({
163
+ "role": "user",
164
+ "content": content
165
+ })
166
+ elif item["role"] == "assistant":
167
+ text_contents = [content["content"] for content in item["content"] if content["type"] == "text"]
168
+ if text_contents:
169
+ messages.append({
170
+ "role": "assistant",
171
+ "content": [{"type": "text", "text": " ".join(text_contents)}]
172
+ })
173
+
174
+ return messages
175
+
176
+ def prepare_inputs(processor, messages):
177
+ log_with_time("📋 Preparing inputs...")
178
+ inputs = processor.apply_chat_template(
179
+ messages,
180
+ add_generation_prompt=True,
181
+ tokenize=True,
182
+ return_dict=True,
183
+ return_tensors="pt"
184
+ )
185
+
186
+ if 'token_type_ids' in inputs:
187
+ del inputs['token_type_ids']
188
+
189
+ return inputs
190
+
191
+ @spaces.GPU(duration=180)
192
+ def generate_streaming(model, processor, tokenizer, messages):
193
+ log_with_time("🚀 Starting streaming generation...")
194
+
195
+ inputs = prepare_inputs(processor, messages)
196
+
197
+ log_with_time("📤 Moving tensors to GPU...")
198
+ inputs = {
199
+ k: (v.to(model.device, dtype=torch.bfloat16) if v.is_floating_point() else v.to(model.device))
200
+ for k, v in inputs.items()
201
+ }
202
+
203
+ streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
204
+
205
+ generation_kwargs = dict(
206
+ **inputs,
207
+ streamer=streamer,
208
+ max_new_tokens=2048,
209
+ temperature=0.15,
210
+ do_sample=True,
211
+ )
212
+
213
+ log_with_time("🧵 Starting generation thread...")
214
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
215
+ thread.start()
216
+
217
+ log_with_time("📝 Streaming tokens...")
218
+ generated_text = ""
219
+ for token in streamer:
220
+ generated_text += token
221
+ yield {"token": token, "full_text": generated_text, "done": False}
222
+
223
+ thread.join()
224
+ log_with_time(f"✅ Generation complete: {len(generated_text)} chars")
225
+ yield {"token": "", "full_text": generated_text, "done": True}
226
+
227
+
228
+ class Gradio_Events:
229
+
230
+ @staticmethod
231
+ def submit(state_value):
232
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
233
+ enable_thinking = state_value["conversation_contexts"][state_value["conversation_id"]]["enable_thinking"]
234
+ model_size = state_value["conversation_contexts"][state_value["conversation_id"]].get("model_size", "14B")
235
+
236
+ model_type = "reasoning" if enable_thinking else "instruct"
237
+ model_id = MINISTRAL_MODELS[model_size][model_type]
238
+
239
+ log_cache_status()
240
+
241
+ log_with_time(f"📝 Formatting {len(history)} messages for {model_id.split('/')[-1]}")
242
+ messages = format_history_for_transformers(history, model_id)
243
+ log_with_time(f"📨 {len(messages)} messages prepared (including system prompt)")
244
+
245
+ history.append({
246
+ "role": "assistant",
247
+ "content": [],
248
+ "key": str(uuid.uuid4()),
249
+ "loading": True,
250
+ "header": f"Ministral-3-{model_size}",
251
+ "status": "pending"
252
+ })
253
+
254
+ yield {
255
+ chatbot: gr.update(value=history),
256
+ state: gr.update(value=state_value),
257
+ }
258
+ try:
259
+ start_time = time.time()
260
+ answer_content = ""
261
+
262
+ if TRANSFORMERS_AVAILABLE:
263
+ needs_download = model_id not in MODEL_CACHE
264
+ model, processor, tokenizer = get_model_and_processor(model_id, show_notification=needs_download)
265
+ else:
266
+ model, processor, tokenizer = None, None, None
267
+
268
+ if model is not None and processor is not None and tokenizer is not None:
269
+ log_with_time(f"🚀 Starting inference with {model_id.split('/')[-1]}")
270
+
271
+ try:
272
+ inference_start = time.time()
273
+
274
+ history[-1]["content"] = [{
275
+ "type": "text",
276
+ "content": "",
277
+ }]
278
+ history[-1]["loading"] = False
279
+
280
+ for chunk in generate_streaming(model, processor, tokenizer, messages):
281
+ if chunk.get("token"):
282
+ answer_content = chunk["full_text"]
283
+ history[-1]["content"][0]["content"] = answer_content
284
+ yield {
285
+ chatbot: gr.update(value=history),
286
+ state: gr.update(value=state_value)
287
+ }
288
+
289
+ if chunk.get("done"):
290
+ answer_content = chunk["full_text"]
291
+ break
292
+
293
+ inference_time = time.time() - inference_start
294
+ log_with_time(f"✅ Streaming generation completed in {inference_time:.2f}s")
295
+
296
+ except Exception as e:
297
+ log_with_time(f"❌ Model inference error: {e}")
298
+ error_str = str(e)
299
+
300
+ if "timeout" in error_str.lower() or "aborted" in error_str.lower():
301
+ answer_content = "⏱️ GPU timeout: The request took too long to process. Please try:\n\n1. Using a shorter prompt\n2. Reducing image sizes\n3. Trying again in a moment"
302
+ elif "memory" in error_str.lower() or "oom" in error_str.lower():
303
+ answer_content = "💾 GPU out of memory. Try using a smaller model or reducing input size."
304
+ else:
305
+ answer_content = f"❌ Model inference failed: {error_str}\n\nPlease try again or check the console for more details."
306
+
307
+ history[-1]["content"] = [{
308
+ "type": "text",
309
+ "content": answer_content,
310
+ }]
311
+ history[-1]["loading"] = False
312
+ yield {
313
+ chatbot: gr.update(value=history),
314
+ state: gr.update(value=state_value)
315
+ }
316
+
317
+ else:
318
+ log_with_time(f"⚠️ Using demo mode for: {model_id}")
319
+ demo_answer = f"This is a demo response from {model_id}. The application is running in demo mode.\n\nTo use real models, install transformers: `pip install transformers torch`\n\nYour message: {messages[-1]['content'][0]['text'] if messages and messages[-1]['content'] else 'N/A'}"
320
+
321
+ history[-1]["content"] = [{
322
+ "type": "text",
323
+ "content": "",
324
+ }]
325
+
326
+ for char in demo_answer:
327
+ answer_content += char
328
+ history[-1]["content"][0]["content"] = answer_content
329
+ history[-1]["loading"] = False
330
+ yield {
331
+ chatbot: gr.update(value=history),
332
+ state: gr.update(value=state_value)
333
+ }
334
+ time.sleep(0.01)
335
+
336
+ log_with_time(f"📊 Response generated: {len(answer_content)} chars")
337
+ history[-1]["status"] = "done"
338
+ cost_time = "{:.2f}".format(time.time() - start_time)
339
+ log_with_time(f"⏱️ Total request time: {cost_time}s")
340
+ history[-1]["footer"] = f"{cost_time}s"
341
+ yield {
342
+ chatbot: gr.update(value=history),
343
+ state: gr.update(value=state_value),
344
+ }
345
+ except Exception as e:
346
+ log_with_time(f"❌ Request failed for {model_id.split('/')[-1]}: {e}")
347
+ history[-1]["loading"] = False
348
+ history[-1]["status"] = "done"
349
+ if not history[-1]["content"]:
350
+ history[-1]["content"] = []
351
+ history[-1]["content"].append({
352
+ "type": "text",
353
+ "content": f'<span style="color: var(--color-red-500)">Error: {str(e)}</span>'
354
+ })
355
+ yield {
356
+ chatbot: gr.update(value=history),
357
+ state: gr.update(value=state_value)
358
+ }
359
+
360
+ @staticmethod
361
+ def add_message(input_value, thinking_btn_state_value, model_selector_state_value, state_value):
362
+ text = input_value["text"]
363
+ files = input_value["files"]
364
+ if not state_value["conversation_id"]:
365
+ random_id = str(uuid.uuid4())
366
+ history = []
367
+ state_value["conversation_id"] = random_id
368
+ state_value["conversation_contexts"][state_value["conversation_id"]] = {"history": history}
369
+ state_value["conversations"].append({"label": text, "key": random_id})
370
+
371
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
372
+
373
+ state_value["conversation_contexts"][state_value["conversation_id"]] = {
374
+ "history": history,
375
+ "enable_thinking": thinking_btn_state_value["enable_thinking"],
376
+ "model_size": model_selector_state_value["model_size"]
377
+ }
378
+
379
+ history.append({
380
+ "key": str(uuid.uuid4()),
381
+ "role": "user",
382
+ "content": [{"type": "file", "content": [f for f in files]}, {"type": "text", "content": text}]
383
+ })
384
+ yield Gradio_Events.preprocess_submit(clear_input=True)(state_value)
385
+
386
+ try:
387
+ for chunk in Gradio_Events.submit(state_value):
388
+ yield chunk
389
+ except Exception as e:
390
+ raise e
391
+ finally:
392
+ yield Gradio_Events.postprocess_submit(state_value)
393
+
394
+ @staticmethod
395
+ def preprocess_submit(clear_input=True):
396
+ def preprocess_submit_handler(state_value):
397
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
398
+ return {
399
+ **({input: gr.update(value=None, loading=True) if clear_input else gr.update(loading=True)} if clear_input else {}),
400
+ conversations: gr.update(active_key=state_value["conversation_id"],
401
+ items=list(map(lambda item: {**item, "disabled": True if item["key"] != state_value["conversation_id"] else False}, state_value["conversations"]))),
402
+ add_conversation_btn: gr.update(disabled=True),
403
+ clear_btn: gr.update(disabled=True),
404
+ conversation_delete_menu_item: gr.update(disabled=True),
405
+ chatbot: gr.update(value=history, bot_config=bot_config(disabled_actions=['edit', 'retry', 'delete']), user_config=user_config(disabled_actions=['edit', 'delete'])),
406
+ state: gr.update(value=state_value),
407
+ }
408
+ return preprocess_submit_handler
409
+
410
+ @staticmethod
411
+ def postprocess_submit(state_value):
412
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
413
+ return {
414
+ input: gr.update(loading=False),
415
+ conversation_delete_menu_item: gr.update(disabled=False),
416
+ clear_btn: gr.update(disabled=False),
417
+ conversations: gr.update(items=state_value["conversations"]),
418
+ add_conversation_btn: gr.update(disabled=False),
419
+ chatbot: gr.update(value=history, bot_config=bot_config(), user_config=user_config()),
420
+ state: gr.update(value=state_value),
421
+ }
422
+
423
+ @staticmethod
424
+ def cancel(state_value):
425
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
426
+ history[-1]["loading"] = False
427
+ history[-1]["status"] = "done"
428
+ history[-1]["footer"] = "Chat completion paused"
429
+ return Gradio_Events.postprocess_submit(state_value)
430
+
431
+ @staticmethod
432
+ def delete_message(state_value, e: gr.EventData):
433
+ index = e._data["payload"][0]["index"]
434
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
435
+ history = history[:index] + history[index + 1:]
436
+ state_value["conversation_contexts"][state_value["conversation_id"]]["history"] = history
437
+ return gr.update(value=state_value)
438
+
439
+ @staticmethod
440
+ def edit_message(state_value, chatbot_value, e: gr.EventData):
441
+ index = e._data["payload"][0]["index"]
442
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
443
+ history[index]["content"] = chatbot_value[index]["content"]
444
+ if not history[index].get("edited"):
445
+ history[index]["edited"] = True
446
+ history[index]["footer"] = ((history[index]["footer"]) + " " if history[index].get("footer") else "") + "Edited"
447
+ return gr.update(value=state_value), gr.update(value=history)
448
+
449
+ @staticmethod
450
+ def regenerate_message(thinking_btn_state_value, model_selector_state_value, state_value, e: gr.EventData):
451
+ index = e._data["payload"][0]["index"]
452
+ history = state_value["conversation_contexts"][state_value["conversation_id"]]["history"]
453
+ history = history[:index]
454
+
455
+ state_value["conversation_contexts"][state_value["conversation_id"]] = {
456
+ "history": history,
457
+ "enable_thinking": thinking_btn_state_value["enable_thinking"],
458
+ "model_size": model_selector_state_value["model_size"]
459
+ }
460
+
461
+ yield Gradio_Events.preprocess_submit()(state_value)
462
+ try:
463
+ for chunk in Gradio_Events.submit(state_value):
464
+ yield chunk
465
+ except Exception as e:
466
+ raise e
467
+ finally:
468
+ yield Gradio_Events.postprocess_submit(state_value)
469
+
470
+ @staticmethod
471
+ def apply_prompt(e: gr.EventData, input_value):
472
+ input_value["text"] = e._data["payload"][0]["value"]["description"]
473
+ input_value["files"] = e._data["payload"][0]["value"]["urls"]
474
+ return gr.update(value=input_value)
475
+
476
+ @staticmethod
477
+ def new_chat(thinking_btn_state, model_selector_state, state_value):
478
+ if not state_value["conversation_id"]:
479
+ return gr.skip()
480
+ state_value["conversation_id"] = ""
481
+ thinking_btn_state["enable_thinking"] = True
482
+ model_selector_state["model_size"] = "14B"
483
+ return gr.update(active_key=state_value["conversation_id"]), gr.update(value=None), gr.update(value=thinking_btn_state), gr.update(value="14B"), gr.update(value=state_value)
484
+
485
+ @staticmethod
486
+ def select_conversation(thinking_btn_state_value, model_selector_state_value, state_value, e: gr.EventData):
487
+ active_key = e._data["payload"][0]
488
+ if state_value["conversation_id"] == active_key or (active_key not in state_value["conversation_contexts"]):
489
+ return gr.skip()
490
+ state_value["conversation_id"] = active_key
491
+ thinking_btn_state_value["enable_thinking"] = state_value["conversation_contexts"][active_key].get("enable_thinking", False)
492
+ model_size = state_value["conversation_contexts"][active_key].get("model_size", "14B")
493
+ model_selector_state_value["model_size"] = model_size
494
+ return gr.update(active_key=active_key), gr.update(value=state_value["conversation_contexts"][active_key]["history"]), gr.update(value=thinking_btn_state_value), gr.update(value=model_size), gr.update(value=state_value)
495
+
496
+ @staticmethod
497
+ def click_conversation_menu(state_value, e: gr.EventData):
498
+ conversation_id = e._data["payload"][0]["key"]
499
+ operation = e._data["payload"][1]["key"]
500
+ if operation == "delete":
501
+ del state_value["conversation_contexts"][conversation_id]
502
+ state_value["conversations"] = [item for item in state_value["conversations"] if item["key"] != conversation_id]
503
+ if state_value["conversation_id"] == conversation_id:
504
+ state_value["conversation_id"] = ""
505
+ return gr.update(items=state_value["conversations"], active_key=state_value["conversation_id"]), gr.update(value=None), gr.update(value=state_value)
506
+ else:
507
+ return gr.update(items=state_value["conversations"]), gr.skip(), gr.update(value=state_value)
508
+ return gr.skip()
509
+
510
+ @staticmethod
511
+ def clear_conversation_history(state_value):
512
+ if not state_value["conversation_id"]:
513
+ return gr.skip()
514
+ state_value["conversation_contexts"][state_value["conversation_id"]]["history"] = []
515
+ return gr.update(value=None), gr.update(value=state_value)
516
+
517
+ @staticmethod
518
+ def update_browser_state(state_value):
519
+ return gr.update(value=dict(conversations=state_value["conversations"], conversation_contexts=state_value["conversation_contexts"]))
520
+
521
+ @staticmethod
522
+ def apply_browser_state(browser_state_value, state_value):
523
+ state_value["conversations"] = browser_state_value["conversations"]
524
+ state_value["conversation_contexts"] = browser_state_value["conversation_contexts"]
525
+ return gr.update(items=browser_state_value["conversations"]), gr.update(value=state_value)
526
+
527
+
528
+ css = """
529
+ body, html {
530
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
531
+ color: var(--ms-gr-ant-color-text, #000000) !important;
532
+ }
533
+
534
+ .gradio-container, .gradio-container.dark {
535
+ padding: 0 !important;
536
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
537
+ color: var(--ms-gr-ant-color-text, #000000) !important;
538
+ }
539
+ .gradio-container > main.fillable {
540
+ padding: 0 !important;
541
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
542
+ }
543
+
544
+ #chatbot .ms-gr-ant-col,
545
+ #chatbot .ms-gr-antd-col,
546
+ #chatbot [class*="ms-gr-ant-col"] {
547
+ padding-left: 0 !important;
548
+ padding-right: 0 !important;
549
+ background-color: transparent !important;
550
+ }
551
+
552
+ #chatbot {
553
+ height: calc(100vh - 21px - 16px);
554
+ max-height: 1500px;
555
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
556
+ }
557
+ #chatbot .chatbot-conversations {
558
+ height: 100vh;
559
+ background-color: var(--ms-gr-ant-color-bg-container, #FFF0C3) !important;
560
+ padding-left: 4px;
561
+ padding-right: 4px;
562
+ }
563
+ #chatbot .chatbot-conversations .chatbot-conversations-list {
564
+ padding-left: 0;
565
+ padding-right: 0;
566
+ }
567
+ #chatbot .chatbot-chat {
568
+ padding: 32px;
569
+ padding-bottom: 0;
570
+ height: 100%;
571
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
572
+ color: var(--ms-gr-ant-color-text, #000000) !important;
573
+ }
574
+ @media (max-width: 768px) {
575
+ #chatbot .chatbot-chat {
576
+ padding: 10px;
577
+ }
578
+ }
579
+ #chatbot .chatbot-chat .chatbot-chat-messages {
580
+ flex: 1;
581
+ background-color: transparent !important;
582
+ }
583
+
584
+ .gradio-container .contain {
585
+ background-color: transparent !important;
586
+ padding: 0 !important;
587
+ }
588
+
589
+ .user-message-content {
590
+ background-color: #ffffff !important;
591
+ background: #ffffff !important;
592
+ border-radius: 16px !important;
593
+ padding: 14px 18px !important;
594
+ border: 1px solid #E9E2CB !important;
595
+ color: #1E1E1E !important;
596
+ box-shadow: 0 2px 8px rgba(0, 0, 0, 0.06) !important;
597
+ max-width: fit-content;
598
+ }
599
+
600
+ [class*="chatbot"] [class*="user"] [class*="content"],
601
+ [class*="chatbot"] [class*="user"] [class*="bubble"],
602
+ [class*="chatbot"] [class*="user"] [class*="message"],
603
+ [class*="pro-chatbot"] [class*="user"] {
604
+ background-color: transparent !important;
605
+ background: transparent !important;
606
+ }
607
+
608
+ .user-message-content,
609
+ .user-message-content *:not(code):not(pre) {
610
+ background-color: inherit !important;
611
+ }
612
+
613
+ .chatbot-welcome-prompts {
614
+ background-color: var(--ms-gr-ant-color-bg-container, #FFF0C3) !important;
615
+ }
616
+
617
+ [class*="welcome"] [class*="prompt"],
618
+ [class*="welcome"] [class*="prompts"],
619
+ [class*="prompts"] [class*="item"],
620
+ [class*="prompts"] [class*="card"],
621
+ .ms-gr-antdx-prompts-item,
622
+ .ms-gr-pro-chatbot-welcome-prompts {
623
+ color: #1E1E1E !important;
624
+ }
625
+
626
+ [class*="welcome"] span,
627
+ [class*="welcome"] p,
628
+ [class*="welcome"] div,
629
+ [class*="prompts"] span,
630
+ [class*="prompts"] p,
631
+ [class*="prompts"] div,
632
+ [class*="prompts"] [class*="title"],
633
+ [class*="prompts"] [class*="description"],
634
+ .ms-gr-antdx-prompts-item span,
635
+ .ms-gr-antdx-prompts-item p,
636
+ .ms-gr-antdx-prompts-item div {
637
+ color: #1E1E1E !important;
638
+ }
639
+
640
+ [class*="prompts"] [class*="item"] {
641
+ background-color: #FFFAEB !important;
642
+ border: 1px solid #E9E2CB !important;
643
+ }
644
+
645
+ .chatbot-conversations {
646
+ background-color: var(--ms-gr-ant-color-bg-container, #FFF0C3) !important;
647
+ }
648
+
649
+ .chatbot-conversations .ms-gr-ant-typography {
650
+ color: var(--ms-gr-ant-color-text, #000000) !important;
651
+ }
652
+
653
+ .chatbot-conversations .ms-gr-ant-btn-color-primary.ms-gr-ant-btn-variant-filled {
654
+ background-color: var(--ms-gr-ant-color-primary, #FF8205) !important;
655
+ color: #ffffff !important;
656
+ border: none !important;
657
+ }
658
+
659
+ .chatbot-conversations .ms-gr-ant-btn-color-primary.ms-gr-ant-btn-variant-filled:hover {
660
+ background-color: #FA500F !important;
661
+ transform: translateY(-1px);
662
+ }
663
+
664
+ .chatbot-conversations .ms-gr-ant-conversations {
665
+ color: var(--ms-gr-ant-color-text, #000000) !important;
666
+ }
667
+
668
+ .chatbot-conversations .ms-gr-ant-conversations-item {
669
+ color: var(--ms-gr-ant-color-text, #000000) !important;
670
+ background-color: transparent !important;
671
+ }
672
+
673
+ .chatbot-conversations .ms-gr-ant-conversations-item:hover {
674
+ background-color: var(--ms-gr-ant-color-bg-elevated, #E9E2CB) !important;
675
+ }
676
+
677
+ .ant-typography {
678
+ color: var(--ms-gr-ant-color-text, #000000) !important;
679
+ }
680
+
681
+ .ant-flex {
682
+ color: var(--ms-gr-ant-color-text, #000000) !important;
683
+ }
684
+
685
+ #chatbot > .ant-col {
686
+ background-color: var(--ms-gr-ant-color-bg-layout, #FFFAEB) !important;
687
+ }
688
+
689
+ h1, h2, h3, h4, h5, h6, p, span {
690
+ color: var(--ms-gr-ant-color-text);
691
+ }
692
+
693
+ .ms-gr-pro-chatbot-bot,
694
+ .ms-gr-pro-chatbot-bot *,
695
+ .ms-gr-pro-chatbot-bot .ms-gr-pro-chatbot-message-content,
696
+ .ms-gr-pro-chatbot-bot [class*="content"],
697
+ [class*="chatbot"] [class*="bot"],
698
+ [class*="chatbot"] [class*="bot"] * {
699
+ color: #1E1E1E !important;
700
+ }
701
+
702
+ .ms-gr-pro-chatbot-bot h1,
703
+ .ms-gr-pro-chatbot-bot h2,
704
+ .ms-gr-pro-chatbot-bot h3,
705
+ .ms-gr-pro-chatbot-bot h4,
706
+ .ms-gr-pro-chatbot-bot h5,
707
+ .ms-gr-pro-chatbot-bot h6,
708
+ .ms-gr-pro-chatbot-bot strong,
709
+ .ms-gr-pro-chatbot-bot b,
710
+ .ms-gr-pro-chatbot-bot em,
711
+ .ms-gr-pro-chatbot-bot i,
712
+ .ms-gr-pro-chatbot-bot p,
713
+ .ms-gr-pro-chatbot-bot span,
714
+ .ms-gr-pro-chatbot-bot li,
715
+ .ms-gr-pro-chatbot-bot ul,
716
+ .ms-gr-pro-chatbot-bot ol,
717
+ .ms-gr-pro-chatbot-bot a,
718
+ [class*="chatbot"] [class*="bot"] h1,
719
+ [class*="chatbot"] [class*="bot"] h2,
720
+ [class*="chatbot"] [class*="bot"] h3,
721
+ [class*="chatbot"] [class*="bot"] strong,
722
+ [class*="chatbot"] [class*="bot"] b,
723
+ [class*="chatbot"] [class*="bot"] p,
724
+ [class*="chatbot"] [class*="bot"] span,
725
+ [class*="chatbot"] [class*="bot"] li {
726
+ color: #1E1E1E !important;
727
+ }
728
+
729
+ .ms-gr-pro-chatbot-bot [style*="color"],
730
+ [class*="chatbot"] [class*="bot"] [style*="color"],
731
+ [class*="chatbot"] [class*="bot"] [style] {
732
+ color: #1E1E1E !important;
733
+ }
734
+
735
+ .ms-gr-pro-chatbot-bot pre,
736
+ .ms-gr-pro-chatbot-bot pre code {
737
+ background-color: #E9E2CB !important;
738
+ color: #1E1E1E !important;
739
+ }
740
+
741
+ .ms-gr-pro-chatbot-bot code:not(pre code) {
742
+ background-color: #E9E2CB !important;
743
+ color: #1E1E1E !important;
744
+ padding: 2px 6px;
745
+ border-radius: 4px;
746
+ }
747
+
748
+ footer {
749
+ display: none !important;
750
+ }
751
+ .footer {
752
+ display: none !important;
753
+ }
754
+
755
+ *, *::before, *::after {
756
+ transition: none !important;
757
+ }
758
+ """
759
+
760
+ with gr.Blocks(
761
+ fill_width=True,
762
+ css=css,
763
+ theme=gr.themes.Default(primary_hue="orange", secondary_hue="gray", neutral_hue="gray")
764
+ ) as demo:
765
+
766
+ state = gr.State({"conversation_contexts": {}, "conversations": [], "conversation_id": "", "oss_cache": {}})
767
+ thinking_btn_state = gr.State({"enable_thinking": False})
768
+ model_selector_state = gr.State({"model_size": "14B"})
769
+
770
+ with ms.Application(), antdx.XProvider(theme=LIGHT_THEME), ms.AutoLoading():
771
+ with antd.Row(gutter=[20, 20], wrap=False, elem_id="chatbot"):
772
+ with antd.Col(md=dict(flex="0 0 260px", span=24, order=0), span=0, order=1, elem_style=dict(width=0)):
773
+ with ms.Div(elem_classes="chatbot-conversations"):
774
+ with antd.Flex(vertical=True, gap="small", elem_style=dict(height="100%")):
775
+ with antd.Flex(vertical=True, gap="small", align="center", elem_style=dict(padding=8)):
776
+ with antd.Typography.Title(level=1, elem_style=dict(fontSize=24, margin=0)):
777
+ with antd.Flex(align="center", gap="small", justify="center"):
778
+ antd.Image('./assets/m-boxed-rainbow.png', preview=False, alt="logo", width=24, height=24)
779
+ ms.Span("Ministrals Demo")
780
+
781
+ with antd.Button(value=None, color="primary", variant="filled", block=True) as add_conversation_btn:
782
+ ms.Text("New Conversation")
783
+ with ms.Slot("icon"):
784
+ antd.Icon("PlusOutlined")
785
+
786
+ with antdx.Conversations(elem_classes="chatbot-conversations-list") as conversations:
787
+ with ms.Slot('menu.items'):
788
+ with antd.Menu.Item(label="Delete", key="delete", danger=True) as conversation_delete_menu_item:
789
+ with ms.Slot("icon"):
790
+ antd.Icon("DeleteOutlined")
791
+
792
+ with antd.Col(flex=1, elem_style=dict(height="100%")):
793
+ with antd.Flex(vertical=True, gap="small", elem_classes="chatbot-chat"):
794
+ with antd.Flex(align="center", gap="large", elem_style=dict(paddingBottom=10)):
795
+ antd.Typography.Title("Hello, I'm Ministral", level=3, elem_style=dict(margin=0))
796
+ with antd.Flex(align="center", gap="small"):
797
+ ms.Span("currently using:", elem_style=dict(fontSize=12))
798
+ model_display = antd.Typography.Text(
799
+ value="mistralai/Ministral-3-14B-Instruct-2512",
800
+ copyable=True, code=True,
801
+ elem_style=dict(fontSize=12, color="var(--ms-gr-ant-color-text-secondary)")
802
+ )
803
+
804
+ chatbot = pro.Chatbot(elem_classes="chatbot-chat-messages", height=0,
805
+ markdown_config=markdown_config(), welcome_config=welcome_config(),
806
+ user_config=user_config(), bot_config=bot_config())
807
+
808
+ with pro.MultimodalInput(placeholder="How can I help you today?", upload_config=upload_config()) as input:
809
+ with ms.Slot("prefix"):
810
+ with antd.Flex(gap=4, wrap=True, elem_style=dict(maxWidth='40vw', display="inline-flex")):
811
+ with antd.Button(value=None, type="text") as clear_btn:
812
+ with ms.Slot("icon"):
813
+ antd.Icon("ClearOutlined")
814
+
815
+ model_selector = antd.Select(
816
+ value="14B", default_value="14B",
817
+ options=[{"label": "Ministral-3-14B", "value": "14B"}, {"label": "Ministral-3-8B", "value": "8B"}, {"label": "Ministral-3-3B", "value": "3B"}],
818
+ elem_style=dict(width=180)
819
+ )
820
+
821
+ with antd.Button("Thinking", shape="round", color="primary") as thinking_btn:
822
+ with ms.Slot("icon"):
823
+ antd.Icon("SunOutlined")
824
+
825
+ def toggle_thinking(state_value):
826
+ state_value["enable_thinking"] = not state_value["enable_thinking"]
827
+ return gr.update(value=state_value)
828
+
829
+ def apply_thinking_style(state_value):
830
+ return gr.update(variant="solid" if state_value["enable_thinking"] else "")
831
+
832
+ def update_model_size(value, state_value):
833
+ state_value["model_size"] = value
834
+ return gr.update(value=state_value)
835
+
836
+ def update_model_display(thinking_state, model_state):
837
+ model_size = model_state.get("model_size", "14B")
838
+ model_type = "reasoning" if thinking_state.get("enable_thinking", False) else "instruct"
839
+ model_name = MINISTRAL_MODELS[model_size][model_type]
840
+ return gr.update(value=model_name)
841
+
842
+ thinking_btn_state.change(fn=apply_thinking_style, inputs=[thinking_btn_state], outputs=[thinking_btn])
843
+ thinking_btn_state.change(fn=update_model_display, inputs=[thinking_btn_state, model_selector_state], outputs=[model_display])
844
+ thinking_btn.click(fn=toggle_thinking, inputs=[thinking_btn_state], outputs=[thinking_btn_state])
845
+
846
+ model_selector.change(fn=update_model_size, inputs=[model_selector, model_selector_state], outputs=[model_selector_state])
847
+ model_selector_state.change(fn=update_model_display, inputs=[thinking_btn_state, model_selector_state], outputs=[model_display])
848
+
849
+ if save_history:
850
+ browser_state = gr.BrowserState({"conversation_contexts": {}, "conversations": []}, storage_key="ministral_demo_storage")
851
+ state.change(fn=Gradio_Events.update_browser_state, inputs=[state], outputs=[browser_state])
852
+ demo.load(fn=Gradio_Events.apply_browser_state, inputs=[browser_state, state], outputs=[conversations, state])
853
+
854
+ add_conversation_btn.click(fn=Gradio_Events.new_chat, inputs=[thinking_btn_state, model_selector_state, state], outputs=[conversations, chatbot, thinking_btn_state, model_selector, state])
855
+ conversations.active_change(fn=Gradio_Events.select_conversation, inputs=[thinking_btn_state, model_selector_state, state], outputs=[conversations, chatbot, thinking_btn_state, model_selector, state])
856
+ conversations.menu_click(fn=Gradio_Events.click_conversation_menu, inputs=[state], outputs=[conversations, chatbot, state])
857
+
858
+ chatbot.welcome_prompt_select(fn=Gradio_Events.apply_prompt, inputs=[input], outputs=[input])
859
+ chatbot.delete(fn=Gradio_Events.delete_message, inputs=[state], outputs=[state])
860
+ chatbot.edit(fn=Gradio_Events.edit_message, inputs=[state, chatbot], outputs=[state, chatbot])
861
+
862
+ regenerating_event = chatbot.retry(fn=Gradio_Events.regenerate_message, inputs=[thinking_btn_state, model_selector_state, state],
863
+ outputs=[input, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state])
864
+
865
+ submit_event = input.submit(fn=Gradio_Events.add_message, inputs=[input, thinking_btn_state, model_selector_state, state],
866
+ outputs=[input, clear_btn, conversation_delete_menu_item, add_conversation_btn, conversations, chatbot, state])
867
+ input.cancel(fn=Gradio_Events.cancel, inputs=[state],
868
+ outputs=[input, conversation_delete_menu_item, clear_btn, conversations, add_conversation_btn, chatbot, state],
869
+ cancels=[submit_event, regenerating_event], queue=False)
870
+
871
+ clear_btn.click(fn=Gradio_Events.clear_conversation_history, inputs=[state], outputs=[chatbot, state])
872
+
873
+ if __name__ == "__main__":
874
+ demo.queue(default_concurrency_limit=100, max_size=100).launch(ssr_mode=False, max_threads=100, show_api=False)
assets/Ministral.png ADDED
assets/m-boxed-black.png ADDED
assets/m-boxed-orange.png ADDED
assets/m-boxed-rainbow.png ADDED
assets/m-orange.png ADDED
assets/m-white.png ADDED
assets/pokemon_battle.jpg ADDED
config.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from modelscope_studio.components.pro.chatbot import ChatbotActionConfig, ChatbotBotConfig, ChatbotUserConfig, ChatbotWelcomeConfig, ChatbotMarkdownConfig
3
+ from modelscope_studio.components.pro.multimodal_input import MultimodalInputUploadConfig
4
+ from example_prompts import get_example_prompts
5
+
6
+ save_history = True
7
+
8
+ MINISTRAL_MODELS = {
9
+ "14B": {
10
+ "instruct": "mistralai/Ministral-3-14B-Instruct-2512-BF16",
11
+ "reasoning": "mistralai/Ministral-3-14B-Reasoning-2512"
12
+ },
13
+ "8B": {
14
+ "instruct": "mistralai/Ministral-3-8B-Instruct-2512-BF16",
15
+ "reasoning": "mistralai/Ministral-3-8B-Reasoning-2512"
16
+ },
17
+ "3B": {
18
+ "instruct": "mistralai/Ministral-3-3B-Instruct-2512-BF16",
19
+ "reasoning": "mistralai/Ministral-3-3B-Reasoning-2512"
20
+ }
21
+ }
22
+
23
+ DEFAULT_MODEL_SIZE = "14B"
24
+
25
+ def markdown_config():
26
+ return ChatbotMarkdownConfig()
27
+
28
+ def user_config(disabled_actions=None):
29
+ return ChatbotUserConfig(
30
+ class_names=dict(content="user-message-content"),
31
+ actions=[
32
+ "copy", "edit",
33
+ ChatbotActionConfig(
34
+ action="delete",
35
+ popconfirm=dict(title="Delete the message",
36
+ description="Are you sure to delete this message?",
37
+ okButtonProps=dict(danger=True)))
38
+ ],
39
+ disabled_actions=disabled_actions)
40
+
41
+ def bot_config(disabled_actions=None):
42
+ return ChatbotBotConfig(actions=[
43
+ "copy", "edit",
44
+ ChatbotActionConfig(
45
+ action="retry",
46
+ popconfirm=dict(
47
+ title="Regenerate the message",
48
+ description="Regenerate the message will also delete all subsequent messages.",
49
+ okButtonProps=dict(danger=True))),
50
+ ChatbotActionConfig(action="delete",
51
+ popconfirm=dict(
52
+ title="Delete the message",
53
+ description="Are you sure to delete this message?",
54
+ okButtonProps=dict(danger=True)))
55
+ ],
56
+ avatar="./assets/m-boxed-rainbow.png",
57
+ disabled_actions=disabled_actions)
58
+
59
+ def welcome_config():
60
+ return ChatbotWelcomeConfig(
61
+ variant="borderless",
62
+ icon="./assets/m-boxed-rainbow.png",
63
+ title="",
64
+ description="Enter text and upload images or videos to get started.",
65
+ prompts=dict(
66
+ title="How can I help you today?",
67
+ styles={
68
+ "list": {
69
+ "width": '100%',
70
+ },
71
+ "item": {
72
+ "flex": 1,
73
+ },
74
+ },
75
+ items=get_example_prompts()),
76
+ )
77
+
78
+ def upload_config():
79
+ return MultimodalInputUploadConfig(
80
+ accept="image/*,video/*",
81
+ placeholder={
82
+ "inline": {
83
+ "title": "Upload files",
84
+ "description": "Click or drag files to this area to upload images or videos"
85
+ },
86
+ "drop": {
87
+ "title": "Drop files here",
88
+ }
89
+ })
90
+
91
+ DEFAULT_SYS_PROMPT = "You are a helpful and harmless assistant."
92
+
93
+ LIGHT_THEME = {
94
+ "token": {
95
+ "colorPrimary": "#FF8205",
96
+ "colorSuccess": "#FFAF00",
97
+ "colorWarning": "#FFD800",
98
+ "colorError": "#E10500",
99
+ "colorInfo": "#FA500F",
100
+ "colorBgLayout": "#FFFAEB",
101
+ "colorBgContainer": "#FFF0C3",
102
+ "colorBgElevated": "#FFFAEB",
103
+ "colorBorder": "#E9E2CB",
104
+ "colorText": "#000000",
105
+ "colorTextSecondary": "#1E1E1E",
106
+ "borderRadius": 8,
107
+ },
108
+ "algorithm": "default"
109
+ }
110
+
111
+ DARK_THEME = {
112
+ "token": {
113
+ "colorPrimary": "#FF8205",
114
+ "colorSuccess": "#FFAF00",
115
+ "colorWarning": "#FFD800",
116
+ "colorError": "#E10500",
117
+ "colorInfo": "#FA500F",
118
+ "colorBgLayout": "#000000",
119
+ "colorBgContainer": "#1E1E1E",
120
+ "colorBgElevated": "#2a2a2a",
121
+ "colorBorder": "#3a3a3a",
122
+ "colorText": "#FFFAEB",
123
+ "colorTextSecondary": "#E9E2CB",
124
+ "borderRadius": 8,
125
+ },
126
+ "algorithm": "dark"
127
+ }
128
+
129
+ DEFAULT_THEME = LIGHT_THEME
example_prompts.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_example_prompts():
2
+ return [{
3
+ "label": "🖼️ Vision & Reasoning",
4
+ "children": [{
5
+ "urls": ["./assets/pokemon_battle.jpg"],
6
+ "description": "What action do you think I should take in this situation? List all the possible actions and explain why you think they are good or bad."
7
+ }, {
8
+ "urls": ["https://math-coaching.com/img/fiche/46/expressions-mathematiques.jpg"],
9
+ "description": "Thanks to your calculator, compute the results for the equations that involve numbers displayed in the image."
10
+ }, {
11
+ "urls": ["https://i.ytimg.com/vi/5Y3xLHeyKZU/hqdefault.jpg"],
12
+ "description": "Solve the equations. If they contain only numbers, use your calculator, else only think. Answer in the language of the image."
13
+ }]
14
+ }, {
15
+ "label": "📝 Instruction Following",
16
+ "children": [
17
+ {
18
+ "urls": [],
19
+ "description": "Write me a sentence where every word starts with the next letter in the alphabet - start with 'a' and end with 'z'."
20
+ },
21
+ {
22
+ "urls": [],
23
+ "description": "Use each number in 2,5,6,3 exactly once, along with any combination of +, -, ×, ÷ (and parentheses for grouping), to make the number 24."
24
+ },
25
+ ]
26
+ }]
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio
2
+ modelscope_studio
3
+ torch
4
+ accelerate
5
+ huggingface_hub
6
+ spaces
7
+ git+https://github.com/huggingface/transformers.git@refs/pull/42498/head
ui_components/__pycache__/thinking_button.cpython-313.pyc ADDED
Binary file (1.72 kB). View file
 
ui_components/logo.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modelscope_studio.components.antd as antd
2
+ import modelscope_studio.components.base as ms
3
+
4
+
5
+ def Logo(model_display=None):
6
+ """Logo component with optional model display"""
7
+ with antd.Flex(vertical=True, gap="small", align="center", elem_style=dict(padding=8)):
8
+ with antd.Typography.Title(level=1,
9
+ elem_style=dict(fontSize=24,
10
+ margin=0)):
11
+ with antd.Flex(align="center", gap="small", justify="center"):
12
+ antd.Image('./assets/m-boxed-rainbow.png',
13
+ preview=False,
14
+ alt="logo",
15
+ width=24,
16
+ height=24)
17
+ ms.Span("Ministrals Demo")
18
+
19
+ # Display model name if provided
20
+ if model_display is not None:
21
+ with antd.Flex(justify="center"):
22
+ model_display
ui_components/thinking_button.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import modelscope_studio.components.antd as antd
2
+ import modelscope_studio.components.base as ms
3
+ import gradio as gr
4
+
5
+
6
+ def ThinkingButton():
7
+ state = gr.State({"enable_thinking": False})
8
+ with antd.Button("Thinking",
9
+ shape="round",
10
+ color="primary") as thinking_btn:
11
+ with ms.Slot("icon"):
12
+ antd.Icon("SunOutlined")
13
+
14
+ def toggle_thinking(state_value):
15
+ state_value["enable_thinking"] = not state_value["enable_thinking"]
16
+ return gr.update(value=state_value)
17
+
18
+ def apply_state_change(state_value):
19
+ return gr.update(
20
+ variant="solid" if state_value["enable_thinking"] else "")
21
+
22
+ state.change(fn=apply_state_change, inputs=[state], outputs=[thinking_btn])
23
+
24
+ thinking_btn.click(fn=toggle_thinking, inputs=[state], outputs=[state])
25
+
26
+ return state