Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -328,7 +328,7 @@ def process_summary_with_openai(summary):
|
|
| 328 |
return final_summary
|
| 329 |
except Exception as e:
|
| 330 |
return str(e)
|
| 331 |
-
|
| 332 |
def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
|
| 333 |
try:
|
| 334 |
# Initialize the combined text
|
|
@@ -341,6 +341,7 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 341 |
# Process audio input
|
| 342 |
if audio_input is not None:
|
| 343 |
audio_text = process_speech(input_language, audio_input)
|
|
|
|
| 344 |
combined_text += "\n" + audio_text
|
| 345 |
|
| 346 |
# Check if only an image is provided without text
|
|
@@ -349,28 +350,33 @@ def process_and_query(input_language=None, audio_input=None, image_input=None, t
|
|
| 349 |
|
| 350 |
# Process image input
|
| 351 |
if image_input is not None:
|
| 352 |
-
# Use the current combined text (which includes the processed text input) for image processing
|
| 353 |
image_text = process_image(image_input, combined_text)
|
|
|
|
| 354 |
combined_text += "\n" + image_text
|
| 355 |
|
| 356 |
# Use the text to query Vectara
|
| 357 |
vectara_response_json = query_vectara(combined_text)
|
|
|
|
| 358 |
|
| 359 |
# Convert the Vectara response to Markdown
|
| 360 |
markdown_output = convert_to_markdown(vectara_response_json)
|
| 361 |
|
| 362 |
# Process the summary with OpenAI
|
| 363 |
final_response = process_summary_with_openai(markdown_output)
|
|
|
|
| 364 |
|
| 365 |
# Evaluate hallucination
|
| 366 |
hallucination_label = evaluate_hallucination(final_response, markdown_output)
|
|
|
|
| 367 |
|
| 368 |
return final_response, hallucination_label
|
| 369 |
|
| 370 |
except Exception as e:
|
|
|
|
| 371 |
return str(e), "Error in processing"
|
| 372 |
|
| 373 |
|
|
|
|
| 374 |
welcome_message = """
|
| 375 |
# 👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷
|
| 376 |
### How To Use ⚕🗣️😷MultiMed⚕:
|
|
|
|
| 328 |
return final_summary
|
| 329 |
except Exception as e:
|
| 330 |
return str(e)
|
| 331 |
+
|
| 332 |
def process_and_query(input_language=None, audio_input=None, image_input=None, text_input=None):
|
| 333 |
try:
|
| 334 |
# Initialize the combined text
|
|
|
|
| 341 |
# Process audio input
|
| 342 |
if audio_input is not None:
|
| 343 |
audio_text = process_speech(input_language, audio_input)
|
| 344 |
+
print("Audio Text:", audio_text) # Debug print
|
| 345 |
combined_text += "\n" + audio_text
|
| 346 |
|
| 347 |
# Check if only an image is provided without text
|
|
|
|
| 350 |
|
| 351 |
# Process image input
|
| 352 |
if image_input is not None:
|
|
|
|
| 353 |
image_text = process_image(image_input, combined_text)
|
| 354 |
+
print("Image Text:", image_text) # Debug print
|
| 355 |
combined_text += "\n" + image_text
|
| 356 |
|
| 357 |
# Use the text to query Vectara
|
| 358 |
vectara_response_json = query_vectara(combined_text)
|
| 359 |
+
print("Vectara Response:", vectara_response_json) # Debug print
|
| 360 |
|
| 361 |
# Convert the Vectara response to Markdown
|
| 362 |
markdown_output = convert_to_markdown(vectara_response_json)
|
| 363 |
|
| 364 |
# Process the summary with OpenAI
|
| 365 |
final_response = process_summary_with_openai(markdown_output)
|
| 366 |
+
print("Final Response:", final_response) # Debug print
|
| 367 |
|
| 368 |
# Evaluate hallucination
|
| 369 |
hallucination_label = evaluate_hallucination(final_response, markdown_output)
|
| 370 |
+
print("Hallucination Label:", hallucination_label) # Debug print
|
| 371 |
|
| 372 |
return final_response, hallucination_label
|
| 373 |
|
| 374 |
except Exception as e:
|
| 375 |
+
print("Exception:", e) # Debug print
|
| 376 |
return str(e), "Error in processing"
|
| 377 |
|
| 378 |
|
| 379 |
+
|
| 380 |
welcome_message = """
|
| 381 |
# 👋🏻Welcome to ⚕🗣️😷MultiMed - Access Chat ⚕🗣️😷
|
| 382 |
### How To Use ⚕🗣️😷MultiMed⚕:
|