Muhammadidrees commited on
Commit
8cf42f3
·
verified ·
1 Parent(s): e00e822

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -52
app.py CHANGED
@@ -1,60 +1,30 @@
1
- import asyncio
2
- asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy()) # avoid fd errors
3
-
4
- import os
5
  import gradio as gr
6
  from openai import OpenAI
7
- from fastapi import FastAPI
8
- import uvicorn
9
-
10
- HF_API_KEY = os.getenv("HF_TOKEN")
11
 
 
12
  client = OpenAI(
13
  base_url="https://router.huggingface.co/v1",
14
- api_key=HF_API_KEY
15
  )
16
 
17
- def chat_with_model(user_message, history):
18
- if history is None:
19
- history = []
20
-
21
- messages = [{"role": "system", "content": "You are a helpful assistant."}]
22
-
23
- for human, bot in history:
24
- messages.append({"role": "user", "content": human})
25
- messages.append({"role": "assistant", "content": bot})
26
-
27
- messages.append({"role": "user", "content": user_message})
28
-
29
- try:
30
- completion = client.chat.completions.create(
31
- model="openai/gpt-oss-20b:nebius",
32
- messages=messages
33
- )
34
- reply = completion.choices[0].message["content"]
35
- except Exception as e:
36
- reply = f"Error: {e}"
37
-
38
- history.append((user_message, reply))
39
- return history
40
-
41
-
42
- with gr.Blocks() as interface:
43
- gr.Markdown("## 🤖 Chat with HuggingFace Router API (OpenAI-Compatible)")
44
- chatbot = gr.Chatbot()
45
- msg = gr.Textbox(label="Message")
46
-
47
- msg.submit(chat_with_model, [msg, chatbot], chatbot)
48
- msg.submit(lambda: "", None, msg)
49
-
50
- # FastAPI wrapper to avoid file-descriptor errors
51
- app = FastAPI()
52
-
53
- @app.get("/")
54
- def read_main():
55
- return {"message": "Gradio app is running"}
56
-
57
- app = gr.mount_gradio_app(app, interface, path="/chat")
58
 
59
- if __name__ == "__main__":
60
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
 
1
  import gradio as gr
2
  from openai import OpenAI
 
 
 
 
3
 
4
+ # Initialize the Hugging Face API client
5
  client = OpenAI(
6
  base_url="https://router.huggingface.co/v1",
7
+ api_key= os.getenv("HF_TOKEN")
8
  )
9
 
10
+ # Function to interact with the model
11
+ def chat_with_model(user_input):
12
+ completion = client.chat.completions.create(
13
+ model="openai/gpt-oss-20b:nebius",
14
+ messages=[
15
+ {"role": "user", "content": user_input}
16
+ ],
17
+ )
18
+ return completion.choices[0].message['content']
19
+
20
+ # Gradio interface
21
+ iface = gr.Interface(
22
+ fn=chat_with_model,
23
+ inputs=gr.Textbox(lines=5, placeholder="Type your message here..."),
24
+ outputs=gr.Textbox(label="Response"),
25
+ title="Hugging Face GPT-OSS Chat",
26
+ description="Chat with GPT-OSS 20B model deployed via Hugging Face API"
27
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # Launch the interface
30
+ iface.launch()