Spaces:
Running
Running
GitHub Action
commited on
Commit
·
bb2bd12
1
Parent(s):
74ebe5c
Sync ling-space changes from GitHub commit b5051bd
Browse files
smart_writer_kit/agent_for_streaming_completion.py
CHANGED
|
@@ -1,40 +1,46 @@
|
|
| 1 |
from model_handler import ModelHandler
|
| 2 |
from config import LING_FLASH_2_0
|
| 3 |
|
| 4 |
-
def fetch_flow_suggestion_agent(editor_content: str):
|
| 5 |
"""
|
| 6 |
Agent for fetching a short, real-time continuation.
|
| 7 |
This agent calls a real LLM.
|
| 8 |
"""
|
| 9 |
|
| 10 |
-
if not editor_content or len(editor_content.strip()) <
|
| 11 |
return "(请输入更多内容以获取建议...)"
|
| 12 |
|
| 13 |
try:
|
| 14 |
model_handler = ModelHandler()
|
| 15 |
-
|
| 16 |
# For a simple continuation, we can use a concise system prompt.
|
| 17 |
-
system_prompt = "
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
# We use editor_content as the user prompt.
|
| 22 |
-
#
|
| 23 |
-
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# Use generate_code as it's a simple generator for direct content.
|
| 26 |
# We need to provide a dummy code_type and a model_choice.
|
| 27 |
# The model_choice here is the display name, but we can pass the constant.
|
| 28 |
response_generator = model_handler.generate_code(
|
| 29 |
system_prompt=system_prompt,
|
| 30 |
user_prompt=user_prompt,
|
| 31 |
-
model_choice=LING_FLASH_2_0
|
| 32 |
)
|
| 33 |
-
|
| 34 |
# Assemble the streamed response
|
| 35 |
full_response = "".join(chunk for chunk in response_generator)
|
| 36 |
-
print("【收到的完整上下文】")
|
| 37 |
-
print("full_response:", repr(full_response))
|
| 38 |
return full_response.strip()
|
| 39 |
|
| 40 |
except Exception as e:
|
|
@@ -51,6 +57,4 @@ def accept_flow_suggestion_agent(current_text: str, suggestion: str):
|
|
| 51 |
result = current_text
|
| 52 |
else:
|
| 53 |
result = current_text + suggestion
|
| 54 |
-
print("【收到的完整上下文】")
|
| 55 |
-
print("result:", repr(result))
|
| 56 |
return result
|
|
|
|
| 1 |
from model_handler import ModelHandler
|
| 2 |
from config import LING_FLASH_2_0
|
| 3 |
|
| 4 |
+
def fetch_flow_suggestion_agent(editor_content: str, style: str = "", kb=None, short_outline=None, long_outline=None):
|
| 5 |
"""
|
| 6 |
Agent for fetching a short, real-time continuation.
|
| 7 |
This agent calls a real LLM.
|
| 8 |
"""
|
| 9 |
|
| 10 |
+
if not editor_content or len(editor_content.strip()) < 4:
|
| 11 |
return "(请输入更多内容以获取建议...)"
|
| 12 |
|
| 13 |
try:
|
| 14 |
model_handler = ModelHandler()
|
| 15 |
+
|
| 16 |
# For a simple continuation, we can use a concise system prompt.
|
| 17 |
+
system_prompt = f"""你是一个写作助手,根据用户输入的内容,紧接着写一句 **简短、流畅** 的续写。
|
| 18 |
+
- 不要重复用户已输入的内容,直接开始写你续写的部分。
|
| 19 |
+
- 遵循 **整体章程** 中的风格和指导原则。
|
| 20 |
+
|
| 21 |
+
整体章程:
|
| 22 |
+
{style}"""
|
| 23 |
+
|
| 24 |
|
|
|
|
| 25 |
# We use editor_content as the user prompt.
|
| 26 |
+
# 我们的上下文编排方式,根据当前光标所在位置,来决定续写的位置。
|
| 27 |
+
# ---
|
| 28 |
+
# <之前的内容>...</之前的内容>{续写这里}
|
| 29 |
+
# <之后的内容>...</之后的内容>
|
| 30 |
+
# 写出 {续写这里} 部分的内容。
|
| 31 |
+
# ---
|
| 32 |
+
user_prompt = f"""===之前的内容===\n{editor_content[-80:]}"""
|
| 33 |
# Use generate_code as it's a simple generator for direct content.
|
| 34 |
# We need to provide a dummy code_type and a model_choice.
|
| 35 |
# The model_choice here is the display name, but we can pass the constant.
|
| 36 |
response_generator = model_handler.generate_code(
|
| 37 |
system_prompt=system_prompt,
|
| 38 |
user_prompt=user_prompt,
|
| 39 |
+
model_choice=LING_FLASH_2_0
|
| 40 |
)
|
| 41 |
+
|
| 42 |
# Assemble the streamed response
|
| 43 |
full_response = "".join(chunk for chunk in response_generator)
|
|
|
|
|
|
|
| 44 |
return full_response.strip()
|
| 45 |
|
| 46 |
except Exception as e:
|
|
|
|
| 57 |
result = current_text
|
| 58 |
else:
|
| 59 |
result = current_text + suggestion
|
|
|
|
|
|
|
| 60 |
return result
|
tab_smart_writer.py
CHANGED
|
@@ -51,7 +51,7 @@ def dismiss_inspiration():
|
|
| 51 |
# --- UI Construction ---
|
| 52 |
|
| 53 |
def create_smart_writer_tab():
|
| 54 |
-
debounce_state = gr.State({"last_change": 0, "active": False})
|
| 55 |
debounce_timer = gr.Timer(0.5, active=False)
|
| 56 |
|
| 57 |
with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
|
|
@@ -188,25 +188,25 @@ def create_smart_writer_tab():
|
|
| 188 |
cancel_insp_btn.click(fn=dismiss_inspiration, outputs=inspiration_modal, show_progress="hidden")
|
| 189 |
|
| 190 |
# 3. Flow Suggestion with Debounce
|
| 191 |
-
def start_debounce(editor_content):
|
| 192 |
-
return {"last_change": time.time(), "active": True}, gr.update(active=True), gr.update(visible=True, value="<progress value='0' max='100'></progress> 补全中... 3.0s")
|
| 193 |
|
| 194 |
def update_debounce(debounce_state, editor_content):
|
| 195 |
if not debounce_state["active"]:
|
| 196 |
return gr.update(), gr.update(), debounce_state, gr.update()
|
| 197 |
elapsed = time.time() - debounce_state["last_change"]
|
| 198 |
if elapsed >= 3:
|
| 199 |
-
suggestion = fetch_flow_suggestion_agent(editor_content)
|
| 200 |
-
return gr.update(visible=False), suggestion, {"last_change": 0, "active": False}, gr.update(active=False)
|
| 201 |
else:
|
| 202 |
progress = int((elapsed / 3) * 100)
|
| 203 |
remaining = 3 - elapsed
|
| 204 |
progress_html = f"<progress value='{progress}' max='100'></progress> 补全中... {remaining:.1f}s"
|
| 205 |
return gr.update(value=progress_html), gr.update(), debounce_state, gr.update()
|
| 206 |
|
| 207 |
-
editor.change(fn=start_debounce, inputs=editor, outputs=[debounce_state, debounce_timer, debounce_progress])
|
| 208 |
debounce_timer.tick(fn=update_debounce, inputs=[debounce_state, editor], outputs=[debounce_progress, flow_suggestion_display, debounce_state, debounce_timer])
|
| 209 |
-
refresh_flow_btn.click(fn=fetch_flow_suggestion_agent, inputs=editor, outputs=flow_suggestion_display)
|
| 210 |
|
| 211 |
# Accept Flow (Triggered by visible Button or hidden Tab Key trigger)
|
| 212 |
accept_flow_fn_inputs = [editor, flow_suggestion_display]
|
|
|
|
| 51 |
# --- UI Construction ---
|
| 52 |
|
| 53 |
def create_smart_writer_tab():
|
| 54 |
+
debounce_state = gr.State({"last_change": 0, "active": False, "style": "", "kb": [], "short_outline": [], "long_outline": []})
|
| 55 |
debounce_timer = gr.Timer(0.5, active=False)
|
| 56 |
|
| 57 |
with gr.Row(equal_height=False, elem_id="indicator-writing-tab"):
|
|
|
|
| 188 |
cancel_insp_btn.click(fn=dismiss_inspiration, outputs=inspiration_modal, show_progress="hidden")
|
| 189 |
|
| 190 |
# 3. Flow Suggestion with Debounce
|
| 191 |
+
def start_debounce(editor_content, style, kb, short_outline, long_outline):
|
| 192 |
+
return {"last_change": time.time(), "active": True, "style": style, "kb": kb, "short_outline": short_outline, "long_outline": long_outline}, gr.update(active=True), gr.update(visible=True, value="<progress value='0' max='100'></progress> 补全中... 3.0s")
|
| 193 |
|
| 194 |
def update_debounce(debounce_state, editor_content):
|
| 195 |
if not debounce_state["active"]:
|
| 196 |
return gr.update(), gr.update(), debounce_state, gr.update()
|
| 197 |
elapsed = time.time() - debounce_state["last_change"]
|
| 198 |
if elapsed >= 3:
|
| 199 |
+
suggestion = fetch_flow_suggestion_agent(editor_content, debounce_state["style"], debounce_state["kb"], debounce_state["short_outline"], debounce_state["long_outline"])
|
| 200 |
+
return gr.update(visible=False), suggestion, {"last_change": 0, "active": False, "style": "", "kb": [], "short_outline": [], "long_outline": []}, gr.update(active=False)
|
| 201 |
else:
|
| 202 |
progress = int((elapsed / 3) * 100)
|
| 203 |
remaining = 3 - elapsed
|
| 204 |
progress_html = f"<progress value='{progress}' max='100'></progress> 补全中... {remaining:.1f}s"
|
| 205 |
return gr.update(value=progress_html), gr.update(), debounce_state, gr.update()
|
| 206 |
|
| 207 |
+
editor.change(fn=start_debounce, inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], outputs=[debounce_state, debounce_timer, debounce_progress])
|
| 208 |
debounce_timer.tick(fn=update_debounce, inputs=[debounce_state, editor], outputs=[debounce_progress, flow_suggestion_display, debounce_state, debounce_timer])
|
| 209 |
+
refresh_flow_btn.click(fn=fetch_flow_suggestion_agent, inputs=[editor, style_input, kb_input, short_outline_input, long_outline_input], outputs=flow_suggestion_display)
|
| 210 |
|
| 211 |
# Accept Flow (Triggered by visible Button or hidden Tab Key trigger)
|
| 212 |
accept_flow_fn_inputs = [editor, flow_suggestion_display]
|