Muhammadidrees commited on
Commit
c4f1dd4
·
verified ·
1 Parent(s): 4ee5aad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -15
app.py CHANGED
@@ -1,15 +1,17 @@
1
  import asyncio
2
- asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy()) # <<< FIXES THE ERROR
3
 
4
  import os
5
  import gradio as gr
6
  from openai import OpenAI
 
 
7
 
8
  HF_API_KEY = os.getenv("HF_TOKEN")
9
 
10
  client = OpenAI(
11
  base_url="https://router.huggingface.co/v1",
12
- api_key=HF_API_KEY,
13
  )
14
 
15
  def chat_with_model(user_message, history):
@@ -17,6 +19,7 @@ def chat_with_model(user_message, history):
17
  history = []
18
 
19
  messages = [{"role": "system", "content": "You are a helpful assistant."}]
 
20
  for human, bot in history:
21
  messages.append({"role": "user", "content": human})
22
  messages.append({"role": "assistant", "content": bot})
@@ -28,24 +31,30 @@ def chat_with_model(user_message, history):
28
  model="openai/gpt-oss-20b:nebius",
29
  messages=messages
30
  )
31
-
32
  reply = completion.choices[0].message["content"]
 
 
33
 
34
- history.append((user_message, reply))
35
- return history
36
 
37
- except Exception as e:
38
- history.append((user_message, f"Error: {str(e)}"))
39
- return history
40
 
 
 
 
 
 
 
 
41
 
42
- with gr.Blocks() as demo:
43
- gr.Markdown("# 🤖 Chat with HuggingFace Router (OpenAI API Compatible)")
44
 
45
- chatbot = gr.Chatbot(height=500)
46
- text_input = gr.Textbox(label="Your message")
 
47
 
48
- text_input.submit(chat_with_model, [text_input, chatbot], chatbot)
49
- text_input.submit(lambda: "", None, text_input)
50
 
51
- demo.launch()
 
 
1
  import asyncio
2
+ asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy()) # avoid fd errors
3
 
4
  import os
5
  import gradio as gr
6
  from openai import OpenAI
7
+ from fastapi import FastAPI
8
+ import uvicorn
9
 
10
  HF_API_KEY = os.getenv("HF_TOKEN")
11
 
12
  client = OpenAI(
13
  base_url="https://router.huggingface.co/v1",
14
+ api_key=HF_API_KEY
15
  )
16
 
17
  def chat_with_model(user_message, history):
 
19
  history = []
20
 
21
  messages = [{"role": "system", "content": "You are a helpful assistant."}]
22
+
23
  for human, bot in history:
24
  messages.append({"role": "user", "content": human})
25
  messages.append({"role": "assistant", "content": bot})
 
31
  model="openai/gpt-oss-20b:nebius",
32
  messages=messages
33
  )
 
34
  reply = completion.choices[0].message["content"]
35
+ except Exception as e:
36
+ reply = f"Error: {e}"
37
 
38
+ history.append((user_message, reply))
39
+ return history
40
 
 
 
 
41
 
42
+ with gr.Blocks() as interface:
43
+ gr.Markdown("## 🤖 Chat with HuggingFace Router API (OpenAI-Compatible)")
44
+ chatbot = gr.Chatbot()
45
+ msg = gr.Textbox(label="Message")
46
+
47
+ msg.submit(chat_with_model, [msg, chatbot], chatbot)
48
+ msg.submit(lambda: "", None, msg)
49
 
50
+ # FastAPI wrapper to avoid file-descriptor errors
51
+ app = FastAPI()
52
 
53
+ @app.get("/")
54
+ def read_main():
55
+ return {"message": "Gradio app is running"}
56
 
57
+ app = gr.mount_gradio_app(app, interface, path="/chat")
 
58
 
59
+ if __name__ == "__main__":
60
+ uvicorn.run(app, host="0.0.0.0", port=7860)