|
|
""" |
|
|
Emotion Detection App - Hugging Face Spaces |
|
|
Multi-label emotion classification using DeBERTa-v3-base |
|
|
""" |
|
|
|
|
|
import gradio as gr |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from transformers import AutoTokenizer, AutoModel |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
MODEL_NAME = "microsoft/deberta-v3-base" |
|
|
EMOTION_LABELS = ['anger', 'fear', 'joy', 'sadness', 'surprise'] |
|
|
MAX_LENGTH = 160 |
|
|
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
|
|
|
class EmotionClassifier(nn.Module): |
|
|
"""DeBERTa-v3-base for multi-label emotion classification""" |
|
|
def __init__(self, model_name, num_labels=5, dropout=0.1): |
|
|
super().__init__() |
|
|
self.encoder = AutoModel.from_pretrained(model_name) |
|
|
hidden_size = self.encoder.config.hidden_size |
|
|
self.dropout = nn.Dropout(dropout) |
|
|
self.classifier = nn.Linear(hidden_size, num_labels) |
|
|
|
|
|
def forward(self, input_ids, attention_mask): |
|
|
outputs = self.encoder( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask |
|
|
) |
|
|
pooled_output = outputs.last_hidden_state[:, 0] |
|
|
pooled_output = self.dropout(pooled_output) |
|
|
logits = self.classifier(pooled_output) |
|
|
return logits |
|
|
|
|
|
|
|
|
print("Loading model...") |
|
|
print(f"Loading tokenizer from {MODEL_NAME}...") |
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
MODEL_NAME, |
|
|
use_fast=True, |
|
|
trust_remote_code=False |
|
|
) |
|
|
print("β Tokenizer loaded") |
|
|
|
|
|
print(f"Initializing model architecture...") |
|
|
model = EmotionClassifier(MODEL_NAME, len(EMOTION_LABELS)).to(DEVICE) |
|
|
print("β Model architecture created") |
|
|
|
|
|
|
|
|
try: |
|
|
model.load_state_dict(torch.load('best_deberta_model.pth', map_location=DEVICE)) |
|
|
print("β Model loaded successfully!") |
|
|
except Exception as e: |
|
|
print(f"Warning: Could not load model weights: {e}") |
|
|
print("Using randomly initialized model (for testing)") |
|
|
|
|
|
model.eval() |
|
|
|
|
|
def predict_emotions(text): |
|
|
""" |
|
|
Predict emotions for input text |
|
|
|
|
|
Args: |
|
|
text (str): Input text to analyze |
|
|
|
|
|
Returns: |
|
|
dict: Emotion probabilities and predictions |
|
|
""" |
|
|
if not text or len(text.strip()) == 0: |
|
|
return { |
|
|
"error": "Please enter some text to analyze", |
|
|
"probabilities": {}, |
|
|
"detected_emotions": [] |
|
|
} |
|
|
|
|
|
|
|
|
encoding = tokenizer( |
|
|
text, |
|
|
max_length=MAX_LENGTH, |
|
|
padding='max_length', |
|
|
truncation=True, |
|
|
return_tensors='pt' |
|
|
) |
|
|
|
|
|
input_ids = encoding['input_ids'].to(DEVICE) |
|
|
attention_mask = encoding['attention_mask'].to(DEVICE) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
logits = model(input_ids, attention_mask) |
|
|
probabilities = torch.sigmoid(logits).cpu().numpy()[0] |
|
|
|
|
|
|
|
|
results = {} |
|
|
detected_emotions = [] |
|
|
|
|
|
for label, prob in zip(EMOTION_LABELS, probabilities): |
|
|
results[label.capitalize()] = float(prob) |
|
|
if prob > 0.5: |
|
|
detected_emotions.append(f"{label.capitalize()} ({prob:.1%})") |
|
|
|
|
|
|
|
|
results = dict(sorted(results.items(), key=lambda x: x[1], reverse=True)) |
|
|
|
|
|
return results, ", ".join(detected_emotions) if detected_emotions else "No strong emotions detected" |
|
|
|
|
|
|
|
|
examples = [ |
|
|
["I just got accepted into my dream university! I can't believe it!"], |
|
|
["I'm so worried about the exam tomorrow. I haven't studied enough."], |
|
|
["This is the worst day of my life. Everything went wrong."], |
|
|
["I can't believe they did this to me. I'm furious!"], |
|
|
["Wow! I never expected that to happen!"], |
|
|
["The sunset today was beautiful. It made me feel peaceful."], |
|
|
["I'm frustrated with this project but also excited about the possibilities."] |
|
|
] |
|
|
|
|
|
|
|
|
with gr.Blocks(theme=gr.themes.Soft(), title="Emotion Detection") as demo: |
|
|
gr.Markdown( |
|
|
""" |
|
|
# π Multi-Label Emotion Detection |
|
|
|
|
|
Detect multiple emotions in text using **DeBERTa-v3-base** fine-tuned for emotion classification. |
|
|
|
|
|
**Emotions detected**: Anger, Fear, Joy, Sadness, Surprise |
|
|
|
|
|
This model can detect multiple emotions simultaneously in a single text. |
|
|
""" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(): |
|
|
text_input = gr.Textbox( |
|
|
label="Enter text to analyze", |
|
|
placeholder="Type or paste your text here...", |
|
|
lines=5 |
|
|
) |
|
|
|
|
|
analyze_btn = gr.Button("π Analyze Emotions", variant="primary", size="lg") |
|
|
|
|
|
gr.Markdown("### π‘ Try these examples:") |
|
|
gr.Examples( |
|
|
examples=examples, |
|
|
inputs=text_input, |
|
|
label="Click an example to try it" |
|
|
) |
|
|
|
|
|
with gr.Column(): |
|
|
gr.Markdown("### π Results") |
|
|
|
|
|
detected_output = gr.Textbox( |
|
|
label="Detected Emotions", |
|
|
placeholder="Results will appear here...", |
|
|
lines=2 |
|
|
) |
|
|
|
|
|
prob_output = gr.Label( |
|
|
label="Emotion Probabilities", |
|
|
num_top_classes=5 |
|
|
) |
|
|
|
|
|
gr.Markdown( |
|
|
""" |
|
|
--- |
|
|
**How it works:** |
|
|
- Probabilities above 50% indicate detected emotions |
|
|
- Multiple emotions can be present in the same text |
|
|
- Higher probability = stronger emotion signal |
|
|
|
|
|
**Model:** DeBERTa-v3-base (184M parameters) |
|
|
|
|
|
**Performance:** F1 Score ~0.85 on validation set |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
analyze_btn.click( |
|
|
fn=predict_emotions, |
|
|
inputs=text_input, |
|
|
outputs=[prob_output, detected_output] |
|
|
) |
|
|
|
|
|
|
|
|
text_input.submit( |
|
|
fn=predict_emotions, |
|
|
inputs=text_input, |
|
|
outputs=[prob_output, detected_output] |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|