|
|
import flask |
|
|
from flask import request, jsonify |
|
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
import torch |
|
|
|
|
|
app = flask.Flask(__name__) |
|
|
|
|
|
model_id = "facebook/blenderbot-400M-distill" |
|
|
|
|
|
print("π Loading fast chat model...") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_id) |
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
model.to(device) |
|
|
|
|
|
print("β
Model loaded instantly!") |
|
|
|
|
|
@app.route('/chat', methods=['POST']) |
|
|
def chat(): |
|
|
try: |
|
|
data = request.get_json() |
|
|
msg = data.get("message", "") |
|
|
|
|
|
if not msg: |
|
|
return jsonify({"error": "No message sent"}), 400 |
|
|
|
|
|
inputs = tokenizer(msg, return_tensors="pt").to(device) |
|
|
output = model.generate( |
|
|
**inputs, |
|
|
max_length=200, |
|
|
do_sample=True, |
|
|
top_p=0.92, |
|
|
temperature=0.7 |
|
|
) |
|
|
|
|
|
reply = tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
return jsonify({"reply": reply}) |
|
|
|
|
|
except Exception as e: |
|
|
return jsonify({"error": str(e)}), 500 |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
app.run(host='0.0.0.0', port=7860) |