Update app.py
Browse files
app.py
CHANGED
|
@@ -86,23 +86,20 @@ class AdvancedMemorySystem:
|
|
| 86 |
self.embedding_model = embedding_model
|
| 87 |
self.vector_store = vector_store
|
| 88 |
|
| 89 |
-
|
| 90 |
self.working_memory = []
|
| 91 |
|
| 92 |
-
|
| 93 |
-
self.short_term_memory = []
|
| 94 |
|
| 95 |
-
|
| 96 |
-
self.semantic_clusters = defaultdict(list) # {cluster_id: [memories]}
|
| 97 |
|
| 98 |
-
# Emotional memory graph
|
| 99 |
self.emotional_memory = {
|
| 100 |
-
"emotion_transitions": [],
|
| 101 |
-
"trigger_patterns": defaultdict(list),
|
| 102 |
-
"coping_effectiveness": {}
|
| 103 |
}
|
| 104 |
-
|
| 105 |
-
# Meta-cognitive tracking
|
| 106 |
self.conversation_themes = []
|
| 107 |
self.user_model = {
|
| 108 |
"communication_style": None,
|
|
@@ -113,18 +110,18 @@ class AdvancedMemorySystem:
|
|
| 113 |
|
| 114 |
def calculate_importance(self, text, emotion, user_engagement):
|
| 115 |
"""Calculate memory importance using multiple factors"""
|
| 116 |
-
importance = 0.5
|
| 117 |
|
| 118 |
-
|
| 119 |
high_intensity_emotions = ["fear", "angry", "sad", "surprise"]
|
| 120 |
if emotion in high_intensity_emotions:
|
| 121 |
importance += 0.3
|
| 122 |
|
| 123 |
-
|
| 124 |
-
if len(text.split()) > 30:
|
| 125 |
importance += 0.2
|
| 126 |
|
| 127 |
-
|
| 128 |
therapeutic_keywords = [
|
| 129 |
"trauma", "suicide", "self-harm", "abuse", "panic",
|
| 130 |
"breakthrough", "progress", "better", "worse", "relationship"
|
|
@@ -143,9 +140,8 @@ class AdvancedMemorySystem:
|
|
| 143 |
"timestamp": timestamp
|
| 144 |
})
|
| 145 |
|
| 146 |
-
|
| 147 |
if len(self.working_memory) > 5:
|
| 148 |
-
# Consolidate oldest to short-term before removing
|
| 149 |
oldest = self.working_memory.pop(0)
|
| 150 |
self._consolidate_to_short_term(oldest)
|
| 151 |
|
|
@@ -154,7 +150,6 @@ class AdvancedMemorySystem:
|
|
| 154 |
text = f"User: {memory_item['user']}\nKenko: {memory_item['bot']}"
|
| 155 |
embedding = self.embedding_model.encode(text)
|
| 156 |
|
| 157 |
-
# Calculate importance
|
| 158 |
importance = self.calculate_importance(
|
| 159 |
memory_item['user'],
|
| 160 |
memory_item['emotion'],
|
|
@@ -169,7 +164,6 @@ class AdvancedMemorySystem:
|
|
| 169 |
"emotion": memory_item['emotion']
|
| 170 |
})
|
| 171 |
|
| 172 |
-
# Add to vector store with importance weighting
|
| 173 |
try:
|
| 174 |
self.vector_store.add_texts(
|
| 175 |
texts=[text],
|
|
@@ -180,31 +174,27 @@ class AdvancedMemorySystem:
|
|
| 180 |
|
| 181 |
def apply_temporal_decay(self, current_time):
|
| 182 |
"""Apply decay to short-term memories over time"""
|
| 183 |
-
decay_rate = 0.01
|
| 184 |
|
| 185 |
for memory in self.short_term_memory:
|
| 186 |
-
time_elapsed = (current_time - memory['timestamp']) / 60
|
| 187 |
decay_factor = np.exp(-decay_rate * time_elapsed)
|
| 188 |
memory['importance'] *= decay_factor
|
| 189 |
|
| 190 |
-
# If importance drops below threshold, cluster into long-term
|
| 191 |
if memory['importance'] < 0.15:
|
| 192 |
self._consolidate_to_long_term(memory)
|
| 193 |
|
| 194 |
def _consolidate_to_long_term(self, memory):
|
| 195 |
"""Cluster similar memories into semantic long-term memory"""
|
| 196 |
-
# Get embeddings of all long-term memories
|
| 197 |
if not self.semantic_clusters:
|
| 198 |
self.semantic_clusters[0] = [memory]
|
| 199 |
self.short_term_memory.remove(memory)
|
| 200 |
return
|
| 201 |
|
| 202 |
-
# Find semantic cluster using cosine similarity
|
| 203 |
best_cluster = 0
|
| 204 |
best_similarity = -1
|
| 205 |
|
| 206 |
for cluster_id, cluster_memories in self.semantic_clusters.items():
|
| 207 |
-
# Compare with cluster centroid
|
| 208 |
cluster_embeddings = [m['embedding'] for m in cluster_memories]
|
| 209 |
centroid = np.mean(cluster_embeddings, axis=0)
|
| 210 |
|
|
@@ -216,14 +206,12 @@ class AdvancedMemorySystem:
|
|
| 216 |
best_similarity = similarity
|
| 217 |
best_cluster = cluster_id
|
| 218 |
|
| 219 |
-
# Add to cluster if similar enough, else create new cluster
|
| 220 |
if best_similarity > 0.7:
|
| 221 |
self.semantic_clusters[best_cluster].append(memory)
|
| 222 |
else:
|
| 223 |
new_cluster_id = max(self.semantic_clusters.keys()) + 1
|
| 224 |
self.semantic_clusters[new_cluster_id] = [memory]
|
| 225 |
|
| 226 |
-
# Remove from short-term
|
| 227 |
if memory in self.short_term_memory:
|
| 228 |
self.short_term_memory.remove(memory)
|
| 229 |
|
|
@@ -236,7 +224,6 @@ class AdvancedMemorySystem:
|
|
| 236 |
"timestamp": time.time()
|
| 237 |
})
|
| 238 |
|
| 239 |
-
# Analyze if certain topics trigger emotional shifts
|
| 240 |
if prev_emotion != current_emotion:
|
| 241 |
self.emotional_memory["trigger_patterns"][current_emotion].append(context)
|
| 242 |
|
|
@@ -245,10 +232,9 @@ class AdvancedMemorySystem:
|
|
| 245 |
if len(self.short_term_memory) < 3:
|
| 246 |
return []
|
| 247 |
|
| 248 |
-
|
| 249 |
all_text = " ".join([m['text'] for m in self.short_term_memory])
|
| 250 |
|
| 251 |
-
# Extract key phrases
|
| 252 |
words = all_text.lower().split()
|
| 253 |
word_freq = defaultdict(int)
|
| 254 |
|
|
@@ -257,7 +243,7 @@ class AdvancedMemorySystem:
|
|
| 257 |
if word not in stopwords and len(word) > 4:
|
| 258 |
word_freq[word] += 1
|
| 259 |
|
| 260 |
-
|
| 261 |
themes = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
|
| 262 |
self.conversation_themes = [theme[0] for theme in themes]
|
| 263 |
|
|
@@ -273,34 +259,28 @@ class AdvancedMemorySystem:
|
|
| 273 |
"themes": []
|
| 274 |
}
|
| 275 |
|
| 276 |
-
|
| 277 |
-
context["working"] = self.working_memory[-3:]
|
| 278 |
|
| 279 |
-
|
| 280 |
if self.short_term_memory:
|
| 281 |
query_embedding = self.embedding_model.encode(query)
|
| 282 |
-
|
| 283 |
scored_memories = []
|
| 284 |
for memory in self.short_term_memory:
|
| 285 |
-
|
| 286 |
similarity = np.dot(query_embedding, memory['embedding']) / (
|
| 287 |
np.linalg.norm(query_embedding) * np.linalg.norm(memory['embedding'])
|
| 288 |
)
|
| 289 |
|
| 290 |
-
# Boost by importance score
|
| 291 |
final_score = similarity * memory['importance']
|
| 292 |
-
|
| 293 |
-
# Emotional congruence bonus
|
| 294 |
if memory['emotion'] == current_emotion:
|
| 295 |
final_score *= 1.2
|
| 296 |
|
| 297 |
scored_memories.append((final_score, memory))
|
| 298 |
|
| 299 |
-
# Get top 3 short-term memories
|
| 300 |
scored_memories.sort(reverse=True, key=lambda x: x[0])
|
| 301 |
context["short_term"] = [m[1] for m in scored_memories[:3]]
|
| 302 |
|
| 303 |
-
# 3. Long-term memory (semantic clusters)
|
| 304 |
if self.semantic_clusters:
|
| 305 |
query_embedding = self.embedding_model.encode(query)
|
| 306 |
best_cluster_id = None
|
|
@@ -319,23 +299,19 @@ class AdvancedMemorySystem:
|
|
| 319 |
best_cluster_id = cluster_id
|
| 320 |
|
| 321 |
if best_cluster_id is not None and best_cluster_score > 0.6:
|
| 322 |
-
# Return summary of cluster
|
| 323 |
cluster = self.semantic_clusters[best_cluster_id]
|
| 324 |
-
context["long_term"] = cluster[:2]
|
| 325 |
|
| 326 |
-
# 4. Emotional memory patterns
|
| 327 |
if current_emotion in self.emotional_memory["trigger_patterns"]:
|
| 328 |
triggers = self.emotional_memory["trigger_patterns"][current_emotion]
|
| 329 |
-
context["emotional"] = triggers[-2:]
|
| 330 |
|
| 331 |
-
# 5. Conversation themes
|
| 332 |
context["themes"] = self.analyze_conversation_themes()
|
| 333 |
|
| 334 |
return context
|
| 335 |
|
| 336 |
def update_user_model(self, message, emotion):
|
| 337 |
"""Build a psychological profile of the user over time"""
|
| 338 |
-
# Communication style detection
|
| 339 |
if len(message.split()) > 50:
|
| 340 |
style = "detailed"
|
| 341 |
elif len(message.split()) < 10:
|
|
@@ -345,7 +321,6 @@ class AdvancedMemorySystem:
|
|
| 345 |
|
| 346 |
self.user_model["communication_style"] = style
|
| 347 |
|
| 348 |
-
# Track recurring concerns
|
| 349 |
concern_keywords = {
|
| 350 |
"anxiety": ["anxious", "worried", "panic", "nervous", "anxiety"],
|
| 351 |
"depression": ["sad", "depressed", "hopeless", "empty", "depression"],
|
|
@@ -362,7 +337,6 @@ class AdvancedMemorySystem:
|
|
| 362 |
"""Format retrieved memories into prompt context"""
|
| 363 |
context_parts = []
|
| 364 |
|
| 365 |
-
# Working memory (recent conversation)
|
| 366 |
if contextual_memory["working"]:
|
| 367 |
recent = "\n".join([
|
| 368 |
f"User: {m['user']}\nKenko: {m['bot']}"
|
|
@@ -370,27 +344,22 @@ class AdvancedMemorySystem:
|
|
| 370 |
])
|
| 371 |
context_parts.append(f"### Recent Conversation:\n{recent}")
|
| 372 |
|
| 373 |
-
# Important short-term memories
|
| 374 |
if contextual_memory["short_term"]:
|
| 375 |
important = "\n".join([m['text'] for m in contextual_memory["short_term"]])
|
| 376 |
context_parts.append(f"### Important Recent Context:\n{important}")
|
| 377 |
|
| 378 |
-
# Long-term thematic memories
|
| 379 |
if contextual_memory["long_term"]:
|
| 380 |
longterm = "\n".join([m['text'] for m in contextual_memory["long_term"]])
|
| 381 |
context_parts.append(f"### Related Past Discussions:\n{longterm}")
|
| 382 |
|
| 383 |
-
# Emotional patterns
|
| 384 |
if contextual_memory["emotional"]:
|
| 385 |
emotional = ", ".join(contextual_memory["emotional"][:3])
|
| 386 |
context_parts.append(f"### Emotional Pattern: Previously triggered by: {emotional}")
|
| 387 |
|
| 388 |
-
# Conversation themes
|
| 389 |
if contextual_memory["themes"]:
|
| 390 |
themes = ", ".join(contextual_memory["themes"])
|
| 391 |
context_parts.append(f"### Session Themes: {themes}")
|
| 392 |
-
|
| 393 |
-
# User model insights
|
| 394 |
if self.user_model["recurring_concerns"]:
|
| 395 |
concerns = ", ".join(self.user_model["recurring_concerns"])
|
| 396 |
context_parts.append(f"### Recurring Concerns: {concerns}")
|
|
@@ -416,12 +385,10 @@ class AdvancedMemorySystem:
|
|
| 416 |
}
|
| 417 |
|
| 418 |
|
| 419 |
-
# Initialize the advanced memory system
|
| 420 |
print("🔄 Initializing Advanced Memory System...")
|
| 421 |
advanced_memory = AdvancedMemorySystem(embedding_model, global_vector_store)
|
| 422 |
print("✅ Advanced Memory System initialized!")
|
| 423 |
|
| 424 |
-
# Track previous emotion for transition analysis
|
| 425 |
previous_emotion = "neutral"
|
| 426 |
|
| 427 |
def update_emotion_status():
|
|
@@ -497,7 +464,6 @@ def chat_with_kenko(message, history):
|
|
| 497 |
|
| 498 |
emotion_context = get_emotion_context()
|
| 499 |
|
| 500 |
-
# Removed the call to the undefined get_threat_context() function
|
| 501 |
|
| 502 |
prompt = f"""### Instruction:
|
| 503 |
You are Kenko, a compassionate mental health therapist. Provide empathetic, helpful, and professional responses to support the user's mental wellbeing.
|
|
@@ -558,7 +524,6 @@ def generate_tts(text):
|
|
| 558 |
traceback.print_exc()
|
| 559 |
return None
|
| 560 |
|
| 561 |
-
# Custom CSS for a calming interface
|
| 562 |
css = """
|
| 563 |
.gradio-container {
|
| 564 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
|
|
|
| 86 |
self.embedding_model = embedding_model
|
| 87 |
self.vector_store = vector_store
|
| 88 |
|
| 89 |
+
|
| 90 |
self.working_memory = []
|
| 91 |
|
| 92 |
+
|
| 93 |
+
self.short_term_memory = []
|
| 94 |
|
| 95 |
+
self.semantic_clusters = defaultdict(list)
|
|
|
|
| 96 |
|
|
|
|
| 97 |
self.emotional_memory = {
|
| 98 |
+
"emotion_transitions": [],
|
| 99 |
+
"trigger_patterns": defaultdict(list),
|
| 100 |
+
"coping_effectiveness": {}
|
| 101 |
}
|
| 102 |
+
|
|
|
|
| 103 |
self.conversation_themes = []
|
| 104 |
self.user_model = {
|
| 105 |
"communication_style": None,
|
|
|
|
| 110 |
|
| 111 |
def calculate_importance(self, text, emotion, user_engagement):
|
| 112 |
"""Calculate memory importance using multiple factors"""
|
| 113 |
+
importance = 0.5
|
| 114 |
|
| 115 |
+
|
| 116 |
high_intensity_emotions = ["fear", "angry", "sad", "surprise"]
|
| 117 |
if emotion in high_intensity_emotions:
|
| 118 |
importance += 0.3
|
| 119 |
|
| 120 |
+
|
| 121 |
+
if len(text.split()) > 30:
|
| 122 |
importance += 0.2
|
| 123 |
|
| 124 |
+
|
| 125 |
therapeutic_keywords = [
|
| 126 |
"trauma", "suicide", "self-harm", "abuse", "panic",
|
| 127 |
"breakthrough", "progress", "better", "worse", "relationship"
|
|
|
|
| 140 |
"timestamp": timestamp
|
| 141 |
})
|
| 142 |
|
| 143 |
+
|
| 144 |
if len(self.working_memory) > 5:
|
|
|
|
| 145 |
oldest = self.working_memory.pop(0)
|
| 146 |
self._consolidate_to_short_term(oldest)
|
| 147 |
|
|
|
|
| 150 |
text = f"User: {memory_item['user']}\nKenko: {memory_item['bot']}"
|
| 151 |
embedding = self.embedding_model.encode(text)
|
| 152 |
|
|
|
|
| 153 |
importance = self.calculate_importance(
|
| 154 |
memory_item['user'],
|
| 155 |
memory_item['emotion'],
|
|
|
|
| 164 |
"emotion": memory_item['emotion']
|
| 165 |
})
|
| 166 |
|
|
|
|
| 167 |
try:
|
| 168 |
self.vector_store.add_texts(
|
| 169 |
texts=[text],
|
|
|
|
| 174 |
|
| 175 |
def apply_temporal_decay(self, current_time):
|
| 176 |
"""Apply decay to short-term memories over time"""
|
| 177 |
+
decay_rate = 0.01
|
| 178 |
|
| 179 |
for memory in self.short_term_memory:
|
| 180 |
+
time_elapsed = (current_time - memory['timestamp']) / 60
|
| 181 |
decay_factor = np.exp(-decay_rate * time_elapsed)
|
| 182 |
memory['importance'] *= decay_factor
|
| 183 |
|
|
|
|
| 184 |
if memory['importance'] < 0.15:
|
| 185 |
self._consolidate_to_long_term(memory)
|
| 186 |
|
| 187 |
def _consolidate_to_long_term(self, memory):
|
| 188 |
"""Cluster similar memories into semantic long-term memory"""
|
|
|
|
| 189 |
if not self.semantic_clusters:
|
| 190 |
self.semantic_clusters[0] = [memory]
|
| 191 |
self.short_term_memory.remove(memory)
|
| 192 |
return
|
| 193 |
|
|
|
|
| 194 |
best_cluster = 0
|
| 195 |
best_similarity = -1
|
| 196 |
|
| 197 |
for cluster_id, cluster_memories in self.semantic_clusters.items():
|
|
|
|
| 198 |
cluster_embeddings = [m['embedding'] for m in cluster_memories]
|
| 199 |
centroid = np.mean(cluster_embeddings, axis=0)
|
| 200 |
|
|
|
|
| 206 |
best_similarity = similarity
|
| 207 |
best_cluster = cluster_id
|
| 208 |
|
|
|
|
| 209 |
if best_similarity > 0.7:
|
| 210 |
self.semantic_clusters[best_cluster].append(memory)
|
| 211 |
else:
|
| 212 |
new_cluster_id = max(self.semantic_clusters.keys()) + 1
|
| 213 |
self.semantic_clusters[new_cluster_id] = [memory]
|
| 214 |
|
|
|
|
| 215 |
if memory in self.short_term_memory:
|
| 216 |
self.short_term_memory.remove(memory)
|
| 217 |
|
|
|
|
| 224 |
"timestamp": time.time()
|
| 225 |
})
|
| 226 |
|
|
|
|
| 227 |
if prev_emotion != current_emotion:
|
| 228 |
self.emotional_memory["trigger_patterns"][current_emotion].append(context)
|
| 229 |
|
|
|
|
| 232 |
if len(self.short_term_memory) < 3:
|
| 233 |
return []
|
| 234 |
|
| 235 |
+
|
| 236 |
all_text = " ".join([m['text'] for m in self.short_term_memory])
|
| 237 |
|
|
|
|
| 238 |
words = all_text.lower().split()
|
| 239 |
word_freq = defaultdict(int)
|
| 240 |
|
|
|
|
| 243 |
if word not in stopwords and len(word) > 4:
|
| 244 |
word_freq[word] += 1
|
| 245 |
|
| 246 |
+
|
| 247 |
themes = sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]
|
| 248 |
self.conversation_themes = [theme[0] for theme in themes]
|
| 249 |
|
|
|
|
| 259 |
"themes": []
|
| 260 |
}
|
| 261 |
|
| 262 |
+
|
| 263 |
+
context["working"] = self.working_memory[-3:]
|
| 264 |
|
| 265 |
+
|
| 266 |
if self.short_term_memory:
|
| 267 |
query_embedding = self.embedding_model.encode(query)
|
|
|
|
| 268 |
scored_memories = []
|
| 269 |
for memory in self.short_term_memory:
|
| 270 |
+
|
| 271 |
similarity = np.dot(query_embedding, memory['embedding']) / (
|
| 272 |
np.linalg.norm(query_embedding) * np.linalg.norm(memory['embedding'])
|
| 273 |
)
|
| 274 |
|
|
|
|
| 275 |
final_score = similarity * memory['importance']
|
|
|
|
|
|
|
| 276 |
if memory['emotion'] == current_emotion:
|
| 277 |
final_score *= 1.2
|
| 278 |
|
| 279 |
scored_memories.append((final_score, memory))
|
| 280 |
|
|
|
|
| 281 |
scored_memories.sort(reverse=True, key=lambda x: x[0])
|
| 282 |
context["short_term"] = [m[1] for m in scored_memories[:3]]
|
| 283 |
|
|
|
|
| 284 |
if self.semantic_clusters:
|
| 285 |
query_embedding = self.embedding_model.encode(query)
|
| 286 |
best_cluster_id = None
|
|
|
|
| 299 |
best_cluster_id = cluster_id
|
| 300 |
|
| 301 |
if best_cluster_id is not None and best_cluster_score > 0.6:
|
|
|
|
| 302 |
cluster = self.semantic_clusters[best_cluster_id]
|
| 303 |
+
context["long_term"] = cluster[:2]
|
| 304 |
|
|
|
|
| 305 |
if current_emotion in self.emotional_memory["trigger_patterns"]:
|
| 306 |
triggers = self.emotional_memory["trigger_patterns"][current_emotion]
|
| 307 |
+
context["emotional"] = triggers[-2:]
|
| 308 |
|
|
|
|
| 309 |
context["themes"] = self.analyze_conversation_themes()
|
| 310 |
|
| 311 |
return context
|
| 312 |
|
| 313 |
def update_user_model(self, message, emotion):
|
| 314 |
"""Build a psychological profile of the user over time"""
|
|
|
|
| 315 |
if len(message.split()) > 50:
|
| 316 |
style = "detailed"
|
| 317 |
elif len(message.split()) < 10:
|
|
|
|
| 321 |
|
| 322 |
self.user_model["communication_style"] = style
|
| 323 |
|
|
|
|
| 324 |
concern_keywords = {
|
| 325 |
"anxiety": ["anxious", "worried", "panic", "nervous", "anxiety"],
|
| 326 |
"depression": ["sad", "depressed", "hopeless", "empty", "depression"],
|
|
|
|
| 337 |
"""Format retrieved memories into prompt context"""
|
| 338 |
context_parts = []
|
| 339 |
|
|
|
|
| 340 |
if contextual_memory["working"]:
|
| 341 |
recent = "\n".join([
|
| 342 |
f"User: {m['user']}\nKenko: {m['bot']}"
|
|
|
|
| 344 |
])
|
| 345 |
context_parts.append(f"### Recent Conversation:\n{recent}")
|
| 346 |
|
|
|
|
| 347 |
if contextual_memory["short_term"]:
|
| 348 |
important = "\n".join([m['text'] for m in contextual_memory["short_term"]])
|
| 349 |
context_parts.append(f"### Important Recent Context:\n{important}")
|
| 350 |
|
|
|
|
| 351 |
if contextual_memory["long_term"]:
|
| 352 |
longterm = "\n".join([m['text'] for m in contextual_memory["long_term"]])
|
| 353 |
context_parts.append(f"### Related Past Discussions:\n{longterm}")
|
| 354 |
|
|
|
|
| 355 |
if contextual_memory["emotional"]:
|
| 356 |
emotional = ", ".join(contextual_memory["emotional"][:3])
|
| 357 |
context_parts.append(f"### Emotional Pattern: Previously triggered by: {emotional}")
|
| 358 |
|
|
|
|
| 359 |
if contextual_memory["themes"]:
|
| 360 |
themes = ", ".join(contextual_memory["themes"])
|
| 361 |
context_parts.append(f"### Session Themes: {themes}")
|
| 362 |
+
|
|
|
|
| 363 |
if self.user_model["recurring_concerns"]:
|
| 364 |
concerns = ", ".join(self.user_model["recurring_concerns"])
|
| 365 |
context_parts.append(f"### Recurring Concerns: {concerns}")
|
|
|
|
| 385 |
}
|
| 386 |
|
| 387 |
|
|
|
|
| 388 |
print("🔄 Initializing Advanced Memory System...")
|
| 389 |
advanced_memory = AdvancedMemorySystem(embedding_model, global_vector_store)
|
| 390 |
print("✅ Advanced Memory System initialized!")
|
| 391 |
|
|
|
|
| 392 |
previous_emotion = "neutral"
|
| 393 |
|
| 394 |
def update_emotion_status():
|
|
|
|
| 464 |
|
| 465 |
emotion_context = get_emotion_context()
|
| 466 |
|
|
|
|
| 467 |
|
| 468 |
prompt = f"""### Instruction:
|
| 469 |
You are Kenko, a compassionate mental health therapist. Provide empathetic, helpful, and professional responses to support the user's mental wellbeing.
|
|
|
|
| 524 |
traceback.print_exc()
|
| 525 |
return None
|
| 526 |
|
|
|
|
| 527 |
css = """
|
| 528 |
.gradio-container {
|
| 529 |
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|