Spaces:
Running
Running
| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| # Initialize HuggingFace router client | |
| client = OpenAI( | |
| base_url="https://router.huggingface.co/v1", | |
| api_key=os.getenv("HF_TOKEN") # Set in HF Spaces Secrets | |
| ) | |
| def chat_with_model(message: str): | |
| try: | |
| response = client.chat.completions.create( | |
| model="openai/gpt-oss-20b:nebius", | |
| messages=[{"role": "user", "content": message}], | |
| ) | |
| # Correct extraction of the text | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"β Error: {e}" | |
| # Build simple HF-friendly UI | |
| with gr.Blocks(title="GPT-OSS 20B Chat") as demo: | |
| gr.Markdown("## Chat with GPT-OSS 20B (HF Router)") | |
| input_box = gr.Textbox(lines=4, label="Your Message") | |
| output_box = gr.Textbox(label="Model Reply") | |
| submit = gr.Button("Send") | |
| submit.click(chat_with_model, inputs=input_box, outputs=output_box) | |
| # Launch for HF Spaces (no SSR, no hot reload) | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| show_error=True | |
| ) | |