Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from openai import OpenAI | |
| import os | |
| # Initialize the Hugging Face API client | |
| client = OpenAI( | |
| base_url="https://router.huggingface.co/v1", | |
| api_key= os.getenv("HF_TOKEN") | |
| ) | |
| # Function to interact with the model | |
| def chat_with_model(user_input): | |
| completion = client.chat.completions.create( | |
| model="openai/gpt-oss-20b:nebius", | |
| messages=[ | |
| {"role": "user", "content": user_input} | |
| ], | |
| ) | |
| return completion.choices[0].message['content'] | |
| # Gradio interface | |
| iface = gr.Interface( | |
| fn=chat_with_model, | |
| inputs=gr.Textbox(lines=5, placeholder="Type your message here..."), | |
| outputs=gr.Textbox(label="Response"), | |
| title="Hugging Face GPT-OSS Chat", | |
| description="Chat with GPT-OSS 20B model deployed via Hugging Face API" | |
| ) | |
| # Launch the interface | |
| iface.launch() | |