| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| |
| model_name = "scb10x/llama-3-typhoon-v1.5-8b" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
| def ask_question(question): |
| inputs = tokenizer(question, return_tensors="pt") |
| outputs = model.generate(**inputs, max_length=500) |
| answer = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| return answer |
|
|
| iface = gr.Interface(fn=ask_question, inputs="text", outputs="text", title="LLaMA Chatbot") |
|
|
| iface.launch() |