IniNLP247 commited on
Commit
7bfb3bc
·
verified ·
1 Parent(s): 80c9092

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -1,4 +1,5 @@
1
  #INFERENCE NLP+EMOTION DETECTION CV+TTS+Memory Management
 
2
  import spaces
3
 
4
  import gradio as gr
@@ -466,7 +467,7 @@ def analyze_emotion(image):
466
 
467
  output = {}
468
  for emotion, score in sorted(emotions.items(), key=lambda x: x[1], reverse=True):
469
- output[emotion.capitalize()] = score
470
 
471
  return output
472
 
@@ -497,11 +498,11 @@ def chat_with_kenko(message, history):
497
 
498
  emotion_context = get_emotion_context()
499
 
500
- threat_context = get_threat_context()
501
 
502
  prompt = f"""### Instruction:
503
  You are Kenko, a compassionate mental health therapist. Provide empathetic, helpful, and professional responses to support the user's mental wellbeing.
504
- {emotion_context}{threat_context}
505
  {conversation}User: {message}
506
  ### Response:
507
  """
@@ -597,7 +598,7 @@ with gr.Blocks(
597
  audio_output = gr.Audio(
598
  label="Kenko's Voice Response",
599
  autoplay=True,
600
- show_label=True
601
  )
602
 
603
  with gr.Row():
@@ -632,7 +633,7 @@ with gr.Blocks(
632
 
633
  emotion_status = gr.Markdown("*Waiting for emotion data...*")
634
 
635
-
636
  with gr.Row(visible=False) as examples_row:
637
  gr.Examples(
638
  examples=[
@@ -707,8 +708,8 @@ with gr.Blocks(
707
  analyze_emotion,
708
  inputs=webcam_input,
709
  outputs=emotion_output,
710
- stream_every=1,
711
- time_limit=60
712
  )
713
 
714
  timer = gr.Timer(value=5)
@@ -724,6 +725,6 @@ with gr.Blocks(
724
  if __name__ == "__main__":
725
  print("🚀 Starting Kenko Mental Health Assistant with Emotion Detection...")
726
  demo.launch(
727
- share=True,
728
  show_error=True
729
  )
 
1
  #INFERENCE NLP+EMOTION DETECTION CV+TTS+Memory Management
2
+ #INFERENCE NLP+EMOTION DETECTION CV+TTS+Memory Management
3
  import spaces
4
 
5
  import gradio as gr
 
467
 
468
  output = {}
469
  for emotion, score in sorted(emotions.items(), key=lambda x: x[1], reverse=True):
470
+ output[emotion.capitalize()] = score
471
 
472
  return output
473
 
 
498
 
499
  emotion_context = get_emotion_context()
500
 
501
+ # Removed the call to the undefined get_threat_context() function
502
 
503
  prompt = f"""### Instruction:
504
  You are Kenko, a compassionate mental health therapist. Provide empathetic, helpful, and professional responses to support the user's mental wellbeing.
505
+ {emotion_context}
506
  {conversation}User: {message}
507
  ### Response:
508
  """
 
598
  audio_output = gr.Audio(
599
  label="Kenko's Voice Response",
600
  autoplay=True,
601
+ show_label=true
602
  )
603
 
604
  with gr.Row():
 
633
 
634
  emotion_status = gr.Markdown("*Waiting for emotion data...*")
635
 
636
+
637
  with gr.Row(visible=False) as examples_row:
638
  gr.Examples(
639
  examples=[
 
708
  analyze_emotion,
709
  inputs=webcam_input,
710
  outputs=emotion_output,
711
+ stream_every=1,
712
+ time_limit=60
713
  )
714
 
715
  timer = gr.Timer(value=5)
 
725
  if __name__ == "__main__":
726
  print("🚀 Starting Kenko Mental Health Assistant with Emotion Detection...")
727
  demo.launch(
728
+ share=True,
729
  show_error=True
730
  )