Hananguyen12's picture
Updated LAPEFT model with enhanced documentation and examples
9ca6636 verified
"""
Usage examples for LAPEFT Financial Sentiment Analysis
"""
from transformers import pipeline, BertTokenizer, BertForSequenceClassification
from peft import PeftModel
import torch
def simple_usage():
"""Simple pipeline usage - recommended for most users"""
classifier = pipeline(
"text-classification",
model="Hananguyen12/LAPEFT-Financial-Sentiment-Analysis"
)
# Example texts
examples = [
"The company exceeded earnings expectations with strong revenue growth.",
"Market volatility continues with mixed signals from investors.",
"Bankruptcy filing has caused significant concern among stakeholders."
]
for text in examples:
result = classifier(text)[0]
print(f"Text: {text}")
print(f"Sentiment: {result['label']} (Confidence: {result['score']:.3f})")
print("-" * 50)
def advanced_usage():
"""Advanced usage with direct model access"""
# Load model components
base_model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=3)
model = PeftModel.from_pretrained(base_model, "Hananguyen12/LAPEFT-Financial-Sentiment-Analysis")
tokenizer = BertTokenizer.from_pretrained("Hananguyen12/LAPEFT-Financial-Sentiment-Analysis")
def predict_detailed(text):
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
return {
"negative": probs[0][0].item(),
"neutral": probs[0][1].item(),
"positive": probs[0][2].item()
}
# Example
text = "The quarterly report shows promising growth indicators."
scores = predict_detailed(text)
print(f"Text: {text}")
print(f"Detailed scores: {scores}")
if __name__ == "__main__":
print("=== Simple Usage ===")
simple_usage()
print("\n=== Advanced Usage ===")
advanced_usage()