Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -4,12 +4,12 @@ import json
|
|
| 4 |
import requests
|
| 5 |
|
| 6 |
#Streaming endpoint
|
| 7 |
-
API_URL = "https://api.openai.com/v1/chat/completions"
|
| 8 |
|
| 9 |
#Testing with my Open AI Key
|
| 10 |
#OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 11 |
|
| 12 |
-
def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
|
| 13 |
|
| 14 |
payload = {
|
| 15 |
"model": "gpt-3.5-turbo",
|
|
@@ -61,7 +61,6 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
|
|
| 61 |
print(f"payload is - {payload}")
|
| 62 |
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
| 63 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
| 64 |
-
#response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
| 65 |
token_counter = 0
|
| 66 |
partial_words = ""
|
| 67 |
|
|
@@ -91,22 +90,12 @@ def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[]
|
|
| 91 |
def reset_textbox():
|
| 92 |
return gr.update(value='')
|
| 93 |
|
| 94 |
-
title = """<
|
| 95 |
-
description = """
|
| 96 |
-
```
|
| 97 |
-
User: <utterance>
|
| 98 |
-
Assistant: <utterance>
|
| 99 |
-
User: <utterance>
|
| 100 |
-
Assistant: <utterance>
|
| 101 |
-
...
|
| 102 |
-
```
|
| 103 |
-
In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
|
| 104 |
-
"""
|
| 105 |
|
| 106 |
with gr.Blocks(css = """#col_container {width: 700px; margin-left: auto; margin-right: auto;}
|
| 107 |
-
#chatbot {height:
|
| 108 |
gr.HTML(title)
|
| 109 |
-
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPTwithAPI?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
| 110 |
with gr.Column(elem_id = "col_container"):
|
| 111 |
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
|
| 112 |
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
|
@@ -127,5 +116,5 @@ with gr.Blocks(css = """#col_container {width: 700px; margin-left: auto; margin-
|
|
| 127 |
b1.click(reset_textbox, [], [inputs])
|
| 128 |
inputs.submit(reset_textbox, [], [inputs])
|
| 129 |
|
| 130 |
-
|
| 131 |
demo.queue().launch(debug=True)
|
|
|
|
| 4 |
import requests
|
| 5 |
|
| 6 |
#Streaming endpoint
|
| 7 |
+
API_URL = "https://api.openai.com/v1/chat/completions"
|
| 8 |
|
| 9 |
#Testing with my Open AI Key
|
| 10 |
#OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 11 |
|
| 12 |
+
def predict(inputs, top_p, temperature, openai_api_key, chat_counter, chatbot=[], history=[]):
|
| 13 |
|
| 14 |
payload = {
|
| 15 |
"model": "gpt-3.5-turbo",
|
|
|
|
| 61 |
print(f"payload is - {payload}")
|
| 62 |
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
| 63 |
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
|
|
|
| 64 |
token_counter = 0
|
| 65 |
partial_words = ""
|
| 66 |
|
|
|
|
| 90 |
def reset_textbox():
|
| 91 |
return gr.update(value='')
|
| 92 |
|
| 93 |
+
title = """<h3 align="center">chatGPT API 테스트</h3>"""
|
| 94 |
+
description = """gpt-3.5-turbo LLM."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
with gr.Blocks(css = """#col_container {width: 700px; margin-left: auto; margin-right: auto;}
|
| 97 |
+
#chatbot {height: 600px; overflow: auto; line-height: 140%} code {background-color:black; display:block; padding: 8px; border-radius: 4px; }""") as demo:
|
| 98 |
gr.HTML(title)
|
|
|
|
| 99 |
with gr.Column(elem_id = "col_container"):
|
| 100 |
openai_api_key = gr.Textbox(type='password', label="Enter your OpenAI API key here")
|
| 101 |
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
|
|
|
| 116 |
b1.click(reset_textbox, [], [inputs])
|
| 117 |
inputs.submit(reset_textbox, [], [inputs])
|
| 118 |
|
| 119 |
+
gr.Markdown(description)
|
| 120 |
demo.queue().launch(debug=True)
|