Update app.py
Browse files
app.py
CHANGED
|
@@ -29,7 +29,7 @@ class XylariaChat:
|
|
| 29 |
raise ValueError("HuggingFace token not found in environment variables")
|
| 30 |
|
| 31 |
self.client = InferenceClient(
|
| 32 |
-
model="Qwen/
|
| 33 |
api_key=self.hf_token
|
| 34 |
)
|
| 35 |
|
|
@@ -395,7 +395,7 @@ class XylariaChat:
|
|
| 395 |
|
| 396 |
try:
|
| 397 |
self.client = InferenceClient(
|
| 398 |
-
model="Qwen/
|
| 399 |
api_key=self.hf_token
|
| 400 |
)
|
| 401 |
except Exception as e:
|
|
@@ -524,7 +524,7 @@ class XylariaChat:
|
|
| 524 |
|
| 525 |
stream = self.client.chat_completion(
|
| 526 |
messages=messages,
|
| 527 |
-
model="Qwen/
|
| 528 |
temperature=0.7,
|
| 529 |
max_tokens=max_new_tokens,
|
| 530 |
top_p=0.9,
|
|
@@ -614,40 +614,12 @@ class XylariaChat:
|
|
| 614 |
raise ValueError("Invalid chat index")
|
| 615 |
|
| 616 |
def create_interface(self):
|
| 617 |
-
loading_svg = """<svg width="256" height="256" viewBox="0 0 256 256" xmlns="http://www.w3.org/2000/svg">
|
| 618 |
-
<style>
|
| 619 |
-
rect {
|
| 620 |
-
animation: fillAnimation 3s ease-in-out infinite;
|
| 621 |
-
}
|
| 622 |
-
@keyframes fillAnimation {
|
| 623 |
-
0% { fill: #626262; }
|
| 624 |
-
50% { fill: #111111; }
|
| 625 |
-
100% { fill: #626262; }
|
| 626 |
-
}
|
| 627 |
-
text {
|
| 628 |
-
font-family: 'Helvetica Neue', Arial, sans-serif; /* Choose a good font */
|
| 629 |
-
font-weight: 300; /* Slightly lighter font weight */
|
| 630 |
-
text-shadow: 0px 2px 4px rgba(0, 0, 0, 0.4); /* Subtle shadow */
|
| 631 |
-
}
|
| 632 |
-
</style>
|
| 633 |
-
<rect width="256" height="256" rx="20" fill="#888888" />
|
| 634 |
-
<text x="50%" y="50%" dominant-baseline="middle" text-anchor="middle" font-size="24" fill="white" opacity="0.8">
|
| 635 |
-
<tspan>creating your image</tspan>
|
| 636 |
-
<tspan x="50%" dy="1.2em">with xylaria iris</tspan>
|
| 637 |
-
</text>
|
| 638 |
-
</svg>"""
|
| 639 |
-
|
| 640 |
def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
|
| 641 |
-
if message.strip().lower()
|
| 642 |
-
# Extract image generation prompt from the
|
| 643 |
-
image_prompt = ""
|
| 644 |
-
for i in range(len(chat_history) - 1, -1, -1):
|
| 645 |
-
if chat_history[i][0] and chat_history[i][0].strip().lower() != "/image":
|
| 646 |
-
image_prompt = chat_history[i][0]
|
| 647 |
-
break
|
| 648 |
-
|
| 649 |
if not image_prompt:
|
| 650 |
-
image_prompt = "A realistic image"
|
| 651 |
|
| 652 |
# Generate image based on the extracted prompt
|
| 653 |
image_bytes = self.generate_image(image_prompt)
|
|
@@ -655,9 +627,9 @@ class XylariaChat:
|
|
| 655 |
if isinstance(image_bytes, bytes):
|
| 656 |
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
| 657 |
image_html = f'<img src="data:image/png;base64,{base64_image}" alt="Generated Image" style="max-width: 100%; max-height: 400px;">'
|
| 658 |
-
|
| 659 |
# Append the /image command and generated image to chat history
|
| 660 |
-
chat_history.append([message, ""])
|
| 661 |
chat_history.append(["", image_html])
|
| 662 |
|
| 663 |
# Update conversation history
|
|
@@ -668,13 +640,13 @@ class XylariaChat:
|
|
| 668 |
self.save_chat()
|
| 669 |
all_chats = self.load_all_chats()
|
| 670 |
chat_titles = [f"{chat['timestamp']}: {chat['conversation'][0]['content'][:30]}..." if len(chat['conversation']) > 0 and chat['conversation'][0]['content'] else f"{chat['timestamp']}: Empty Chat" for chat in all_chats]
|
| 671 |
-
|
| 672 |
yield "", chat_history, None, None, gr.update(choices=chat_titles, visible=True)
|
| 673 |
else:
|
| 674 |
chat_history.append([message, image_bytes])
|
| 675 |
yield "", chat_history, None, None, None
|
| 676 |
return
|
| 677 |
-
|
| 678 |
ocr_text = ""
|
| 679 |
if math_ocr_image_path:
|
| 680 |
ocr_text = self.perform_math_ocr(math_ocr_image_path)
|
|
|
|
| 29 |
raise ValueError("HuggingFace token not found in environment variables")
|
| 30 |
|
| 31 |
self.client = InferenceClient(
|
| 32 |
+
model="Qwen/Qwen-32B-Preview",
|
| 33 |
api_key=self.hf_token
|
| 34 |
)
|
| 35 |
|
|
|
|
| 395 |
|
| 396 |
try:
|
| 397 |
self.client = InferenceClient(
|
| 398 |
+
model="Qwen/Qwen-32B-Preview",
|
| 399 |
api_key=self.hf_token
|
| 400 |
)
|
| 401 |
except Exception as e:
|
|
|
|
| 524 |
|
| 525 |
stream = self.client.chat_completion(
|
| 526 |
messages=messages,
|
| 527 |
+
model="Qwen/Qwen-32B-Preview",
|
| 528 |
temperature=0.7,
|
| 529 |
max_tokens=max_new_tokens,
|
| 530 |
top_p=0.9,
|
|
|
|
| 614 |
raise ValueError("Invalid chat index")
|
| 615 |
|
| 616 |
def create_interface(self):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 617 |
def streaming_response(message, chat_history, image_filepath, math_ocr_image_path):
|
| 618 |
+
if message.strip().lower().startswith("/image"):
|
| 619 |
+
# Extract image generation prompt from the message
|
| 620 |
+
image_prompt = message.strip().lower()[len("/image"):].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 621 |
if not image_prompt:
|
| 622 |
+
image_prompt = "A realistic image" # Default prompt
|
| 623 |
|
| 624 |
# Generate image based on the extracted prompt
|
| 625 |
image_bytes = self.generate_image(image_prompt)
|
|
|
|
| 627 |
if isinstance(image_bytes, bytes):
|
| 628 |
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
| 629 |
image_html = f'<img src="data:image/png;base64,{base64_image}" alt="Generated Image" style="max-width: 100%; max-height: 400px;">'
|
| 630 |
+
|
| 631 |
# Append the /image command and generated image to chat history
|
| 632 |
+
chat_history.append([message, ""])
|
| 633 |
chat_history.append(["", image_html])
|
| 634 |
|
| 635 |
# Update conversation history
|
|
|
|
| 640 |
self.save_chat()
|
| 641 |
all_chats = self.load_all_chats()
|
| 642 |
chat_titles = [f"{chat['timestamp']}: {chat['conversation'][0]['content'][:30]}..." if len(chat['conversation']) > 0 and chat['conversation'][0]['content'] else f"{chat['timestamp']}: Empty Chat" for chat in all_chats]
|
| 643 |
+
|
| 644 |
yield "", chat_history, None, None, gr.update(choices=chat_titles, visible=True)
|
| 645 |
else:
|
| 646 |
chat_history.append([message, image_bytes])
|
| 647 |
yield "", chat_history, None, None, None
|
| 648 |
return
|
| 649 |
+
|
| 650 |
ocr_text = ""
|
| 651 |
if math_ocr_image_path:
|
| 652 |
ocr_text = self.perform_math_ocr(math_ocr_image_path)
|