Spaces:
Sleeping
Sleeping
Update backend/query_llm.py
Browse files- backend/query_llm.py +2 -0
backend/query_llm.py
CHANGED
|
@@ -104,6 +104,7 @@ def generate_hf(prompt: str, history: str, temperature: float = 0.9, max_new_tok
|
|
| 104 |
stream=True, details=True, return_full_text=False)
|
| 105 |
output = ""
|
| 106 |
for response in stream:
|
|
|
|
| 107 |
output += response.token.text
|
| 108 |
yield output
|
| 109 |
|
|
@@ -205,6 +206,7 @@ def generate_gemini(prompt: str, history: str, temperature: float = 0.9, max_new
|
|
| 205 |
stream=True)
|
| 206 |
output = ""
|
| 207 |
for response in stream:
|
|
|
|
| 208 |
output += response.text
|
| 209 |
yield output
|
| 210 |
|
|
|
|
| 104 |
stream=True, details=True, return_full_text=False)
|
| 105 |
output = ""
|
| 106 |
for response in stream:
|
| 107 |
+
print(response.token.text)
|
| 108 |
output += response.token.text
|
| 109 |
yield output
|
| 110 |
|
|
|
|
| 206 |
stream=True)
|
| 207 |
output = ""
|
| 208 |
for response in stream:
|
| 209 |
+
print(response.text)
|
| 210 |
output += response.text
|
| 211 |
yield output
|
| 212 |
|