arkai2025 commited on
Commit
c7829ce
·
1 Parent(s): 21d72c5

feat(mcp): integrate local FastMCP client with logging and improve service management and add display cache

Browse files
Files changed (6) hide show
  1. README.md +8 -4
  2. agent.py +43 -12
  3. app.py +238 -95
  4. fire_rescue_mcp/mcp_client.py +114 -0
  5. fire_rescue_mcp/mcp_server.py +81 -28
  6. service.py +239 -62
README.md CHANGED
@@ -27,10 +27,11 @@ tags:
27
 
28
  ### Key Highlights
29
 
30
- - **Multi-stage advisor (Assess → Plan → Execute → Cycle Summary → After-Action)** powered by `openai/gpt-oss-120b` via HuggingFace Inference.
31
  - **10 FastMCP tools** cover scenario control, deployment, repositioning, removal, and analytics (idle units, coverage, building threats).
32
  - **Real-time Gradio 6 UI**: 10×10 emoji grid, animated status HUD, AI chat timeline, event logs, and clickable placement controls.
33
  - **Auto-execute toggle** lets players hand full control to the AI or approve every deployment manually—including adding new fires for stress tests.
 
34
  - **AI Battle Report Overlay**: After each victory/defeat the agent compiles charts, highlights, risks, and next actions before you rerun.
35
  - **Submission-ready metadata**: README + prompts + PRD/dev plan + Space tags prepared for the hackathon checklist.
36
 
@@ -132,7 +133,7 @@ Prompts for every stage live in `prompts.yaml`, making it easy to retune instruc
132
 
133
  - **`app.py`** – Gradio 6 Blocks UI, CSS-heavy HUD, timers, and event handlers for start/pause/reset, auto-execute toggles, and grid interactions.
134
  - **`service.py`** – Threaded simulation service with 1s ticks, 10-tick advisor cadence, change-tracked UI diffing, player action tracking, and after-action orchestration.
135
- - **`agent.py`** – Multi-stage LLM pipeline using HuggingFace’s OpenAI-compatible endpoint (`openai/gpt-oss-120b`) plus JSON-repair helpers and retry/backoff logic.
136
  - **`simulation.py`** – 10×10 grid-based simulation (fire spread, win/lose rules, unit power/range, building cluster generator, seedable randomness).
137
  - **`models.py`** – Dataclasses for world/units/fires/events and serialization helpers used by both Gradio and MCP servers.
138
  - **`prompts.yaml`** – Configuration for every stage (Assess/Plan/Execute/Summary/After-Action) and model defaults (`max_completion_tokens=10000`).
@@ -181,12 +182,15 @@ pip install -e .
181
 
182
  ```bash
183
  export HF_TOKEN=your-huggingface-token-here
 
 
184
  ```
185
 
186
  Or create `.env` next to `agent.py`:
187
 
188
  ```
189
  HF_TOKEN=hf_xxx
 
190
  ```
191
 
192
  ### Launch the Gradio app
@@ -241,8 +245,8 @@ fire-rescue-mcp/
241
  |------------|---------|
242
  | **Gradio 6** | Responsive Blocks UI, timers, and theming |
243
  | **FastMCP** | Lightweight MCP server for exposing tools |
244
- | **HuggingFace Inference (OpenAI-compatible)** | Runs `openai/gpt-oss-120b` without vendor lock-in |
245
- | **OpenAI Python SDK** | Client for HF’s router (JSON mode + retries) |
246
  | **UV** | Dependency + virtualenv manager |
247
  | **Python 3.10+ / dataclasses** | Core simulation + service logic |
248
 
 
27
 
28
  ### Key Highlights
29
 
30
+ - **Multi-stage advisor (Assess → Plan → Execute → Cycle Summary → After-Action)**
31
  - **10 FastMCP tools** cover scenario control, deployment, repositioning, removal, and analytics (idle units, coverage, building threats).
32
  - **Real-time Gradio 6 UI**: 10×10 emoji grid, animated status HUD, AI chat timeline, event logs, and clickable placement controls.
33
  - **Auto-execute toggle** lets players hand full control to the AI or approve every deployment manually—including adding new fires for stress tests.
34
+ - **Advisor model switcher** directly in the control panel lets you hop between GPT-OSS 120B/20B, Llama-3.1 8B, and OpenAI GPT-5.1 backends.
35
  - **AI Battle Report Overlay**: After each victory/defeat the agent compiles charts, highlights, risks, and next actions before you rerun.
36
  - **Submission-ready metadata**: README + prompts + PRD/dev plan + Space tags prepared for the hackathon checklist.
37
 
 
133
 
134
  - **`app.py`** – Gradio 6 Blocks UI, CSS-heavy HUD, timers, and event handlers for start/pause/reset, auto-execute toggles, and grid interactions.
135
  - **`service.py`** – Threaded simulation service with 1s ticks, 10-tick advisor cadence, change-tracked UI diffing, player action tracking, and after-action orchestration.
136
+ - **`agent.py`** – Multi-stage LLM pipeline that bridges HuggingFace’s OpenAI-compatible endpoint and native OpenAI APIs, alongside JSON-repair helpers and retry/backoff logic.
137
  - **`simulation.py`** – 10×10 grid-based simulation (fire spread, win/lose rules, unit power/range, building cluster generator, seedable randomness).
138
  - **`models.py`** – Dataclasses for world/units/fires/events and serialization helpers used by both Gradio and MCP servers.
139
  - **`prompts.yaml`** – Configuration for every stage (Assess/Plan/Execute/Summary/After-Action) and model defaults (`max_completion_tokens=10000`).
 
182
 
183
  ```bash
184
  export HF_TOKEN=your-huggingface-token-here
185
+ # Optional: enable OpenAI GPT-5 models via the new dropdown
186
+ export OPENAI_API_KEY=your-openai-key-here
187
  ```
188
 
189
  Or create `.env` next to `agent.py`:
190
 
191
  ```
192
  HF_TOKEN=hf_xxx
193
+ OPENAI_API_KEY=sk-...
194
  ```
195
 
196
  ### Launch the Gradio app
 
245
  |------------|---------|
246
  | **Gradio 6** | Responsive Blocks UI, timers, and theming |
247
  | **FastMCP** | Lightweight MCP server for exposing tools |
248
+ | **HuggingFace Inference (OpenAI-compatible)** | Runs the GPT-OSS 120B/20B and Llama-3.1 8B lineup without vendor lock-in |
249
+ | **OpenAI Python SDK** | Native client for the GPT-5.1 advisor option plus JSON-mode retries |
250
  | **UV** | Dependency + virtualenv manager |
251
  | **Python 3.10+ / dataclasses** | Core simulation + service logic |
252
 
agent.py CHANGED
@@ -37,6 +37,11 @@ except ImportError:
37
  HF_INFERENCE_BASE_URL = "https://router.huggingface.co/v1"
38
  HF_DEFAULT_MODEL = "openai/gpt-oss-120b"
39
 
 
 
 
 
 
40
  def get_hf_token() -> str | None:
41
  """
42
  Get HuggingFace token from environment variable.
@@ -47,6 +52,16 @@ def get_hf_token() -> str | None:
47
  return os.getenv("HF_TOKEN")
48
 
49
 
 
 
 
 
 
 
 
 
 
 
50
  # =============================================================================
51
  # Stage 3 action cap + Prompt loading
52
  # =============================================================================
@@ -250,38 +265,52 @@ class AdvisorAgent:
250
  self,
251
  api_key: Optional[str] = None,
252
  model: Optional[str] = None,
 
 
253
  ):
254
  """
255
  Initialize the advisor agent.
256
 
257
  Args:
258
- api_key: HuggingFace token (defaults to HF_TOKEN env var)
259
- model: Model to use for inference (defaults to openai/gpt-oss-120b)
 
 
260
  """
261
- # Get HuggingFace token
262
- self.api_key = api_key or get_hf_token()
263
- self.base_url = HF_INFERENCE_BASE_URL
 
 
 
 
 
 
 
264
 
265
  # Load model config from prompts.yaml
266
  model_config = PROMPTS_CONFIG.get("model", {})
267
 
268
  # Model priority: explicit param > prompts.yaml > default
269
- yaml_model = model_config.get("default")
270
- self.model = model or yaml_model or HF_DEFAULT_MODEL
 
271
 
272
  self.temperature = model_config.get("temperature") # None if not set
273
  self.max_completion_tokens = model_config.get("max_completion_tokens", 2000)
274
 
275
  # Initialize client
276
  if self.api_key:
277
- print(f"🤖 AI Advisor initialized with HuggingFace Inference API, model: {self.model}")
 
278
  self.client = OpenAI(
279
  api_key=self.api_key,
280
  base_url=self.base_url
281
  )
282
  else:
283
  self.client = None
284
- print("⚠️ Warning: No HF_TOKEN found. AI analysis will not work.")
 
285
 
286
  # =========================================================================
287
  # JSON Repair Helper
@@ -348,7 +377,8 @@ class AdvisorAgent:
348
  Parsed JSON dict, or None if failed
349
  """
350
  if not self.client:
351
- print("Error: No API client available (HF_TOKEN not set)")
 
352
  return None
353
 
354
  # Retry logic for rate limiting (429 errors)
@@ -357,16 +387,17 @@ class AdvisorAgent:
357
 
358
  for attempt in range(max_retries):
359
  try:
360
- # Build API call parameters for HuggingFace Inference Provider
 
361
  api_params = {
362
  "model": self.model,
363
  "messages": [
364
  {"role": "system", "content": system_prompt},
365
  {"role": "user", "content": user_message}
366
  ],
367
- "max_tokens": self.max_completion_tokens,
368
  "response_format": {"type": "json_object"}
369
  }
 
370
 
371
  # Only add temperature if explicitly set
372
  if self.temperature is not None:
 
37
  HF_INFERENCE_BASE_URL = "https://router.huggingface.co/v1"
38
  HF_DEFAULT_MODEL = "openai/gpt-oss-120b"
39
 
40
+ # OpenAI native configuration
41
+ OPENAI_DEFAULT_MODEL = "gpt-5-mini"
42
+ OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
43
+ OPENAI_API_ENV_VAR = "OPENAI_API_KEY"
44
+
45
  def get_hf_token() -> str | None:
46
  """
47
  Get HuggingFace token from environment variable.
 
52
  return os.getenv("HF_TOKEN")
53
 
54
 
55
+ def get_openai_api_key() -> str | None:
56
+ """
57
+ Get OpenAI API key from environment variable.
58
+
59
+ Returns:
60
+ OPENAI_API_KEY if available, None otherwise
61
+ """
62
+ return os.getenv(OPENAI_API_ENV_VAR)
63
+
64
+
65
  # =============================================================================
66
  # Stage 3 action cap + Prompt loading
67
  # =============================================================================
 
265
  self,
266
  api_key: Optional[str] = None,
267
  model: Optional[str] = None,
268
+ provider: str = "hf",
269
+ base_url: Optional[str] = None,
270
  ):
271
  """
272
  Initialize the advisor agent.
273
 
274
  Args:
275
+ api_key: Optional override for provider API key
276
+ model: Model to use for inference (defaults depend on provider)
277
+ provider: "hf" for HuggingFace Router or "openai" for native OpenAI
278
+ base_url: Optional override for API base URL
279
  """
280
+ self.provider = (provider or "hf").lower()
281
+ if self.provider not in {"hf", "openai"}:
282
+ self.provider = "hf"
283
+
284
+ if self.provider == "openai":
285
+ self.api_key = api_key or get_openai_api_key()
286
+ self.base_url = base_url or OPENAI_BASE_URL
287
+ else:
288
+ self.api_key = api_key or get_hf_token()
289
+ self.base_url = base_url or HF_INFERENCE_BASE_URL
290
 
291
  # Load model config from prompts.yaml
292
  model_config = PROMPTS_CONFIG.get("model", {})
293
 
294
  # Model priority: explicit param > prompts.yaml > default
295
+ yaml_model = model_config.get("default") if self.provider == "hf" else None
296
+ provider_default_model = OPENAI_DEFAULT_MODEL if self.provider == "openai" else HF_DEFAULT_MODEL
297
+ self.model = model or yaml_model or provider_default_model
298
 
299
  self.temperature = model_config.get("temperature") # None if not set
300
  self.max_completion_tokens = model_config.get("max_completion_tokens", 2000)
301
 
302
  # Initialize client
303
  if self.api_key:
304
+ provider_label = "OpenAI" if self.provider == "openai" else "HuggingFace Inference"
305
+ print(f"🤖 AI Advisor initialized with {provider_label} API, model: {self.model}")
306
  self.client = OpenAI(
307
  api_key=self.api_key,
308
  base_url=self.base_url
309
  )
310
  else:
311
  self.client = None
312
+ missing_env = OPENAI_API_ENV_VAR if self.provider == "openai" else "HF_TOKEN"
313
+ print(f"⚠️ Warning: Missing {missing_env}. AI analysis will not work.")
314
 
315
  # =========================================================================
316
  # JSON Repair Helper
 
377
  Parsed JSON dict, or None if failed
378
  """
379
  if not self.client:
380
+ missing_env = OPENAI_API_ENV_VAR if self.provider == "openai" else "HF_TOKEN"
381
+ print(f"Error: No API client available ({missing_env} not set)")
382
  return None
383
 
384
  # Retry logic for rate limiting (429 errors)
 
387
 
388
  for attempt in range(max_retries):
389
  try:
390
+ # Build API call parameters for current provider
391
+ token_param = "max_completion_tokens" if self.provider == "openai" else "max_tokens"
392
  api_params = {
393
  "model": self.model,
394
  "messages": [
395
  {"role": "system", "content": system_prompt},
396
  {"role": "user", "content": user_message}
397
  ],
 
398
  "response_format": {"type": "json_object"}
399
  }
400
+ api_params[token_param] = self.max_completion_tokens
401
 
402
  # Only add temperature if explicitly set
403
  if self.temperature is not None:
app.py CHANGED
@@ -11,16 +11,53 @@ import uuid
11
  import gradio as gr
12
  from typing import Optional
13
 
14
- from service import get_service, SimulationService
 
 
 
 
15
 
16
  AUTO_EXECUTE_DEFAULT = True # Keep UI + backend aligned on initial load
 
17
 
18
 
19
- def _reset_auto_execute_to_default() -> bool:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  """Restore auto-execute to the configured default and sync the checkbox."""
21
- service = get_service()
22
  service.set_auto_execute(AUTO_EXECUTE_DEFAULT)
23
- return AUTO_EXECUTE_DEFAULT
24
 
25
 
26
  def _generate_session_token() -> str:
@@ -34,14 +71,43 @@ def _generate_session_token() -> str:
34
  return f"{timestamp_ms}_{unique_id}"
35
 
36
 
37
- def _initialize_session_defaults() -> tuple[bool, bool, str]:
 
 
 
38
  """
39
  Reset auto-execute back to default and mark the new Gradio session
40
  as fresh so the next Start press always creates a brand-new run.
41
  Also issues a unique session token to bind backend ownership.
 
42
  """
43
- auto_value = _reset_auto_execute_to_default()
44
- return auto_value, False, _generate_session_token()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
 
47
  # =============================================================================
@@ -552,7 +618,7 @@ def render_game_result(status: str, report_payload: Optional[dict] = None) -> st
552
  <div class="result-title" style="color: {outcome_config['color']};">{outcome_config['title']}</div>
553
  <div class="result-subtitle">{outcome_config['subtitle']}</div>
554
  {report_section}
555
- <button class="result-ok-btn" onclick="document.getElementById('result-overlay').style.display='none'">OK</button>
556
  </div>
557
  </div>
558
  """
@@ -582,9 +648,13 @@ def start_or_resume_simulation(
582
  seed: Optional[int],
583
  should_resume: bool,
584
  session_token: Optional[str],
 
 
585
  ):
586
  """Handle Start/Resume button click."""
587
- service = get_service()
 
 
588
  token = (session_token or "").strip()
589
 
590
  resume_requested = bool(should_resume) and service.can_resume_session(token)
@@ -598,7 +668,7 @@ def start_or_resume_simulation(
598
  else:
599
  # Start new simulation - ALWAYS generate fresh token to ensure uniqueness
600
  # This prevents any possibility of matching old paused sessions
601
- _reset_advisor_display_cache()
602
  new_token = _generate_session_token()
603
  actual_seed = int(seed) if seed and seed > 0 else None
604
  state = service.start(
@@ -626,12 +696,17 @@ def start_or_resume_simulation(
626
  gr.update(interactive=False), # start btn
627
  gr.update(interactive=True), # pause btn
628
  render_status_html(state, is_thinking, thinking_stage),
629
- ] + updates + [False, new_token]
630
 
631
 
632
- def pause_simulation():
 
 
 
633
  """Handle Pause button click."""
634
- service = get_service()
 
 
635
  state = service.pause()
636
 
637
  updates = get_all_button_updates(state)
@@ -645,13 +720,24 @@ def pause_simulation():
645
  gr.update(interactive=True), # start btn (can resume)
646
  gr.update(interactive=False), # pause btn
647
  render_status_html(state),
648
- ] + updates + [True]
649
 
650
 
651
- def reset_simulation(fire_count: int, fire_intensity: float, building_count: int, max_units: int, seed: Optional[int], session_token: Optional[str]):
 
 
 
 
 
 
 
 
 
652
  """Handle Reset button click."""
653
- service = get_service()
654
- _reset_advisor_display_cache()
 
 
655
  # Always generate fresh token on reset to ensure clean state
656
  new_token = _generate_session_token()
657
  actual_seed = int(seed) if seed and seed > 0 else None
@@ -675,12 +761,30 @@ def reset_simulation(fire_count: int, fire_intensity: float, building_count: int
675
  gr.update(interactive=True),
676
  gr.update(interactive=False),
677
  render_status_html(state),
678
- ] + updates + [False, new_token]
679
 
680
 
681
- def deploy_at_cell(x: int, y: int, selection: str):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
682
  """Deploy unit or fire at specific cell, or remove if unit already exists there."""
683
- service = get_service()
 
 
684
 
685
  # Get thinking state
686
  is_thinking = service.is_thinking()
@@ -730,12 +834,12 @@ def deploy_at_cell(x: int, y: int, selection: str):
730
  _get_combined_advisor_messages(service),
731
  service.get_event_log_text(),
732
  render_status_html(state, is_thinking, thinking_stage),
733
- ] + updates
734
 
735
 
736
- def poll_after_action_report():
737
  """Poll after-action report status independently of the main simulation timer."""
738
- service = get_service()
739
  state = service.get_state()
740
  status = state.get("status", "idle")
741
  report_payload = state.get("after_action_report")
@@ -748,6 +852,7 @@ def poll_after_action_report():
748
  return [
749
  report_timer_update,
750
  render_game_result(status, report_payload),
 
751
  ]
752
 
753
 
@@ -761,30 +866,17 @@ def get_all_button_updates(state: dict) -> list:
761
  return updates
762
 
763
 
764
- # Cache for preventing unnecessary UI updates (flicker prevention)
765
- _last_status_html = None
766
- _last_advisor_signature = ()
767
- _history_messages_cache: list[dict] = []
768
- _current_cycle_messages_cache: list[dict] = []
769
- _result_freeze_state = ""
770
-
771
-
772
- def _reset_advisor_display_cache():
773
- """Clear cached advisor/chatbot renders (prevents stale history after reset)."""
774
- global _last_advisor_signature, _history_messages_cache, _current_cycle_messages_cache, _last_status_html, _result_freeze_state
775
- _last_advisor_signature = ()
776
- _history_messages_cache = []
777
- _current_cycle_messages_cache = []
778
- _last_status_html = None
779
- _result_freeze_state = ""
780
-
781
-
782
- def refresh_display():
783
  """
784
  Single timer refresh (every 1 second).
785
  Uses unified change tracking to update only changed components.
786
  """
787
- service = get_service()
 
 
788
 
789
  # Get all changes in one call
790
  changes = service.get_changed_components()
@@ -798,17 +890,13 @@ def refresh_display():
798
  thinking_stage = service.get_thinking_stage()
799
 
800
  # Freeze background UI once a win/lose overlay has already been shown
801
- global _result_freeze_state
802
- overlay_outcome = status if status in ["success", "fail"] else ""
803
- freeze_background = False
804
- if overlay_outcome:
805
- if _result_freeze_state == overlay_outcome:
806
- freeze_background = True
807
- else:
808
- _result_freeze_state = overlay_outcome
809
- else:
810
- if _result_freeze_state:
811
- _result_freeze_state = ""
812
 
813
  # Timer control - stop when game ends
814
  timer_update = gr.update()
@@ -832,19 +920,20 @@ def refresh_display():
832
  advisor_display = gr.skip()
833
  else:
834
  # Advisor display (chatbot) showing combined history + current cycle
835
- global _last_advisor_signature, _history_messages_cache, _current_cycle_messages_cache
836
  if changes["history_changed"]:
837
  history_messages = changes.get("advisor_history")
838
  if history_messages is not None:
839
- _history_messages_cache = history_messages
840
  current_cycle_messages = changes.get("advisor_messages")
841
  if current_cycle_messages is not None:
842
- _current_cycle_messages_cache = current_cycle_messages
843
- combined_messages = (_history_messages_cache or []) + (_current_cycle_messages_cache or [])
 
 
844
  if combined_messages:
845
  signature = _chat_signature(combined_messages)
846
- if signature != _last_advisor_signature:
847
- _last_advisor_signature = signature
848
  advisor_display = gr.update(value=combined_messages)
849
  else:
850
  advisor_display = gr.skip()
@@ -856,11 +945,12 @@ def refresh_display():
856
  event_log = gr.update()
857
  else:
858
  event_log = changes["event_log"] if changes["event_log_changed"] else gr.update()
 
859
 
860
  # Buttons - only when state changes
861
  if freeze_background:
862
- start_btn_update = gr.update()
863
- pause_btn_update = gr.update()
864
  else:
865
  if changes["buttons_changed"]:
866
  start_enabled, pause_enabled = changes["button_states"]
@@ -871,15 +961,14 @@ def refresh_display():
871
  pause_btn_update = gr.update()
872
 
873
  # Status bar - use cache to prevent flicker (only update when content changes)
874
- global _last_status_html
875
  if freeze_background:
876
  status_html_update = gr.skip()
877
  else:
878
  new_status_html = render_status_html(state, is_thinking, thinking_stage)
879
- if new_status_html == _last_status_html:
880
  status_html_update = gr.skip()
881
  else:
882
- _last_status_html = new_status_html
883
  status_html_update = new_status_html
884
 
885
  # Grid buttons - only update if changed
@@ -900,7 +989,7 @@ def refresh_display():
900
  start_btn_update,
901
  pause_btn_update,
902
  status_html_update,
903
- ] + updates
904
 
905
 
906
  # =============================================================================
@@ -1738,6 +1827,8 @@ def create_app() -> gr.Blocks:
1738
  gr.HTML(f"<style>{CUSTOM_CSS}</style>")
1739
  session_resume_state = gr.State(False)
1740
  session_token_state = gr.State("")
 
 
1741
 
1742
  # Header with title and instructions
1743
  gr.Markdown("# 🔥 Fire Rescue Simulator Game")
@@ -1803,18 +1894,28 @@ def create_app() -> gr.Blocks:
1803
  with gr.Row(elem_classes=["section-gap"]):
1804
  # Left column: AI Advisor - THE STAR OF THE SHOW
1805
  with gr.Column(scale=2, min_width=300):
1806
- service = get_service()
1807
- advisor_interval_ticks = getattr(service, "advisor_interval", 10)
1808
  gr.Markdown(
1809
  f"## 🤖 AI Tactical Advisor · (refreshes every {advisor_interval_ticks} ticks)"
1810
  )
 
 
 
 
 
 
1811
  auto_execute_toggle = gr.Checkbox(
1812
  label="🎮 Auto-Execute",
1813
  value=AUTO_EXECUTE_DEFAULT,
1814
  info="Automatically execute AI recommendations",
1815
  )
1816
 
1817
- advisor_initial_messages = _get_combined_advisor_messages(service)
 
 
 
 
 
1818
 
1819
  with gr.Accordion("📜 AI Analysis History", open=True):
1820
  gr.Markdown(
@@ -1908,75 +2009,117 @@ def create_app() -> gr.Blocks:
1908
  # Event handlers for simulation controls
1909
  start_btn.click(
1910
  fn=start_or_resume_simulation,
1911
- inputs=[fire_count, fire_intensity, building_count, max_units, seed_input, session_resume_state, session_token_state],
1912
- outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons + [session_resume_state, session_token_state]
 
 
 
 
 
 
 
 
 
 
 
 
1913
  )
1914
 
1915
  pause_btn.click(
1916
  fn=pause_simulation,
1917
- inputs=[],
1918
- outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons + [session_resume_state]
 
 
1919
  )
1920
 
1921
  reset_btn.click(
1922
- fn=reset_simulation,
1923
- inputs=[fire_count, fire_intensity, building_count, max_units, seed_input, session_token_state],
1924
- outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons + [session_resume_state, session_token_state]
 
 
1925
  )
1926
 
1927
  # Event handlers for grid buttons (click to place)
1928
  for x, y, btn in grid_buttons:
1929
  btn.click(
1930
- fn=lambda sel, _x=x, _y=y: deploy_at_cell(_x, _y, sel),
1931
- inputs=[place_selector],
1932
- outputs=[result_popup, advisor_display, event_log_display, status_display] + all_buttons
1933
  )
1934
 
1935
  # Timer tick handler - updates all components with change tracking
1936
  timer.tick(
1937
  fn=refresh_display,
1938
- outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons
 
 
 
1939
  )
1940
 
1941
  report_timer.tick(
1942
  fn=poll_after_action_report,
1943
- outputs=[report_timer, result_popup]
 
1944
  )
1945
 
1946
  # Auto-execute toggle handler
1947
- def on_auto_execute_toggle(enabled: bool):
1948
- service = get_service()
1949
  service.set_auto_execute(enabled)
1950
- # No return value needed since outputs=[]
 
 
 
 
 
 
 
1951
 
1952
  auto_execute_toggle.change(
1953
  fn=on_auto_execute_toggle,
1954
- inputs=[auto_execute_toggle],
1955
- outputs=[]
1956
  )
1957
 
 
 
 
 
 
1958
  app.load(
1959
  fn=_initialize_session_defaults,
1960
- inputs=None,
1961
- outputs=[auto_execute_toggle, session_resume_state, session_token_state],
 
 
 
 
 
 
 
 
 
 
 
1962
  )
1963
 
1964
  return app
1965
 
1966
 
 
 
 
1967
  def launch_simple():
1968
  """Launch simple Gradio app (for HF Spaces / local development)."""
1969
- gradio_app = create_app()
1970
- gradio_app.launch(
1971
  server_name="0.0.0.0",
1972
  server_port=7860,
1973
- ssr_mode=False, # Disable SSR for better compatibility
 
1974
  footer_links=["gradio", "settings"], # Hide API docs button
1975
  )
1976
 
1977
-
1978
- # For Hugging Face Spaces - must be named 'demo' for HF Spaces auto-detection
1979
- demo = create_app()
1980
-
1981
  if __name__ == "__main__":
1982
  launch_simple()
 
11
  import gradio as gr
12
  from typing import Optional
13
 
14
+ from service import (
15
+ ADVISOR_MODEL_CHOICES,
16
+ DEFAULT_ADVISOR_MODEL_CHOICE,
17
+ SimulationService,
18
+ )
19
 
20
  AUTO_EXECUTE_DEFAULT = True # Keep UI + backend aligned on initial load
21
+ ADVISOR_MODEL_LABELS = list(ADVISOR_MODEL_CHOICES.keys())
22
 
23
 
24
+ def get_or_create_service(service: Optional[SimulationService]) -> SimulationService:
25
+ """Return existing SimulationService or create a new one for the session."""
26
+ if service is None:
27
+ return SimulationService()
28
+ return service
29
+
30
+
31
+ def _create_display_cache() -> dict:
32
+ """Create a per-session cache for advisor/history/status renders."""
33
+ return {
34
+ "last_status_html": None,
35
+ "last_advisor_signature": (),
36
+ "history_messages_cache": [],
37
+ "current_cycle_messages_cache": [],
38
+ "result_freeze_state": "",
39
+ }
40
+
41
+
42
+ def _reset_advisor_display_cache(cache: Optional[dict] = None) -> dict:
43
+ """Clear cached advisor/chatbot renders (prevents stale history after reset)."""
44
+ if cache is None:
45
+ cache = _create_display_cache()
46
+ cache["last_status_html"] = None
47
+ cache["last_advisor_signature"] = ()
48
+ cache["history_messages_cache"] = []
49
+ cache["current_cycle_messages_cache"] = []
50
+ cache["result_freeze_state"] = ""
51
+ return cache
52
+
53
+
54
+ def _reset_auto_execute_to_default(
55
+ service: Optional[SimulationService] = None,
56
+ ) -> tuple[bool, SimulationService]:
57
  """Restore auto-execute to the configured default and sync the checkbox."""
58
+ service = get_or_create_service(service)
59
  service.set_auto_execute(AUTO_EXECUTE_DEFAULT)
60
+ return AUTO_EXECUTE_DEFAULT, service
61
 
62
 
63
  def _generate_session_token() -> str:
 
71
  return f"{timestamp_ms}_{unique_id}"
72
 
73
 
74
+ def _initialize_session_defaults(
75
+ service: Optional[SimulationService] = None,
76
+ display_cache: Optional[dict] = None,
77
+ ):
78
  """
79
  Reset auto-execute back to default and mark the new Gradio session
80
  as fresh so the next Start press always creates a brand-new run.
81
  Also issues a unique session token to bind backend ownership.
82
+ Returns initial values for all UI components to ensure clean state.
83
  """
84
+ auto_value, service = _reset_auto_execute_to_default(service)
85
+ service = get_or_create_service(service)
86
+ if display_cache is None:
87
+ display_cache = _create_display_cache()
88
+ display_cache = _reset_advisor_display_cache(display_cache)
89
+ default_model_choice = service.reset_advisor_model_choice()
90
+ advisor_messages = [
91
+ {
92
+ "role": "assistant",
93
+ "content": "No AI analysis yet. Press **Start** to begin the advisor cycle.",
94
+ }
95
+ ]
96
+ event_log = "No events yet..."
97
+ status_html = render_status_html({"status": "idle"})
98
+ grid_updates = [gr.update(value="🌲") for _ in range(100)]
99
+ return (
100
+ auto_value,
101
+ False,
102
+ _generate_session_token(),
103
+ default_model_choice,
104
+ service,
105
+ display_cache,
106
+ advisor_messages,
107
+ event_log,
108
+ status_html,
109
+ *grid_updates,
110
+ )
111
 
112
 
113
  # =============================================================================
 
618
  <div class="result-title" style="color: {outcome_config['color']};">{outcome_config['title']}</div>
619
  <div class="result-subtitle">{outcome_config['subtitle']}</div>
620
  {report_section}
621
+ <button class="result-ok-btn" onclick="const overlay=document.getElementById('result-overlay');if(overlay){{overlay.style.display='none';}}">OK</button>
622
  </div>
623
  </div>
624
  """
 
648
  seed: Optional[int],
649
  should_resume: bool,
650
  session_token: Optional[str],
651
+ service: Optional[SimulationService],
652
+ display_cache: Optional[dict],
653
  ):
654
  """Handle Start/Resume button click."""
655
+ service = get_or_create_service(service)
656
+ if display_cache is None:
657
+ display_cache = _create_display_cache()
658
  token = (session_token or "").strip()
659
 
660
  resume_requested = bool(should_resume) and service.can_resume_session(token)
 
668
  else:
669
  # Start new simulation - ALWAYS generate fresh token to ensure uniqueness
670
  # This prevents any possibility of matching old paused sessions
671
+ display_cache = _reset_advisor_display_cache(display_cache)
672
  new_token = _generate_session_token()
673
  actual_seed = int(seed) if seed and seed > 0 else None
674
  state = service.start(
 
696
  gr.update(interactive=False), # start btn
697
  gr.update(interactive=True), # pause btn
698
  render_status_html(state, is_thinking, thinking_stage),
699
+ ] + updates + [False, new_token, service, display_cache]
700
 
701
 
702
+ def pause_simulation(
703
+ service: Optional[SimulationService],
704
+ display_cache: Optional[dict],
705
+ ):
706
  """Handle Pause button click."""
707
+ service = get_or_create_service(service)
708
+ if display_cache is None:
709
+ display_cache = _create_display_cache()
710
  state = service.pause()
711
 
712
  updates = get_all_button_updates(state)
 
720
  gr.update(interactive=True), # start btn (can resume)
721
  gr.update(interactive=False), # pause btn
722
  render_status_html(state),
723
+ ] + updates + [True, service, display_cache]
724
 
725
 
726
+ def reset_simulation(
727
+ fire_count: int,
728
+ fire_intensity: float,
729
+ building_count: int,
730
+ max_units: int,
731
+ seed: Optional[int],
732
+ session_token: Optional[str],
733
+ service: Optional[SimulationService],
734
+ display_cache: Optional[dict],
735
+ ):
736
  """Handle Reset button click."""
737
+ service = get_or_create_service(service)
738
+ if display_cache is None:
739
+ display_cache = _create_display_cache()
740
+ display_cache = _reset_advisor_display_cache(display_cache)
741
  # Always generate fresh token on reset to ensure clean state
742
  new_token = _generate_session_token()
743
  actual_seed = int(seed) if seed and seed > 0 else None
 
761
  gr.update(interactive=True),
762
  gr.update(interactive=False),
763
  render_status_html(state),
764
+ ] + updates + [False, new_token, service, display_cache]
765
 
766
 
767
+ def prepare_reset(
768
+ service: Optional[SimulationService],
769
+ display_cache: Optional[dict],
770
+ ):
771
+ """Clean up the current service instance before reloading."""
772
+ if service:
773
+ service.shutdown()
774
+ return None, None
775
+
776
+
777
+ def deploy_at_cell(
778
+ x: int,
779
+ y: int,
780
+ selection: str,
781
+ service: Optional[SimulationService],
782
+ display_cache: Optional[dict],
783
+ ):
784
  """Deploy unit or fire at specific cell, or remove if unit already exists there."""
785
+ service = get_or_create_service(service)
786
+ if display_cache is None:
787
+ display_cache = _create_display_cache()
788
 
789
  # Get thinking state
790
  is_thinking = service.is_thinking()
 
834
  _get_combined_advisor_messages(service),
835
  service.get_event_log_text(),
836
  render_status_html(state, is_thinking, thinking_stage),
837
+ ] + updates + [service, display_cache]
838
 
839
 
840
+ def poll_after_action_report(service: Optional[SimulationService]):
841
  """Poll after-action report status independently of the main simulation timer."""
842
+ service = get_or_create_service(service)
843
  state = service.get_state()
844
  status = state.get("status", "idle")
845
  report_payload = state.get("after_action_report")
 
852
  return [
853
  report_timer_update,
854
  render_game_result(status, report_payload),
855
+ service,
856
  ]
857
 
858
 
 
866
  return updates
867
 
868
 
869
+ def refresh_display(
870
+ service: Optional[SimulationService],
871
+ display_cache: Optional[dict],
872
+ ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
873
  """
874
  Single timer refresh (every 1 second).
875
  Uses unified change tracking to update only changed components.
876
  """
877
+ service = get_or_create_service(service)
878
+ if display_cache is None:
879
+ display_cache = _create_display_cache()
880
 
881
  # Get all changes in one call
882
  changes = service.get_changed_components()
 
890
  thinking_stage = service.get_thinking_stage()
891
 
892
  # Freeze background UI once a win/lose overlay has already been shown
893
+ overlay_state = changes.get("result_state", "")
894
+ freeze_background = bool(overlay_state)
895
+ if freeze_background:
896
+ if display_cache["result_freeze_state"] != overlay_state:
897
+ display_cache["result_freeze_state"] = overlay_state
898
+ elif display_cache["result_freeze_state"]:
899
+ display_cache["result_freeze_state"] = ""
 
 
 
 
900
 
901
  # Timer control - stop when game ends
902
  timer_update = gr.update()
 
920
  advisor_display = gr.skip()
921
  else:
922
  # Advisor display (chatbot) showing combined history + current cycle
 
923
  if changes["history_changed"]:
924
  history_messages = changes.get("advisor_history")
925
  if history_messages is not None:
926
+ display_cache["history_messages_cache"] = history_messages
927
  current_cycle_messages = changes.get("advisor_messages")
928
  if current_cycle_messages is not None:
929
+ display_cache["current_cycle_messages_cache"] = current_cycle_messages
930
+ combined_messages = (display_cache["history_messages_cache"] or []) + (
931
+ display_cache["current_cycle_messages_cache"] or []
932
+ )
933
  if combined_messages:
934
  signature = _chat_signature(combined_messages)
935
+ if signature != display_cache["last_advisor_signature"]:
936
+ display_cache["last_advisor_signature"] = signature
937
  advisor_display = gr.update(value=combined_messages)
938
  else:
939
  advisor_display = gr.skip()
 
945
  event_log = gr.update()
946
  else:
947
  event_log = changes["event_log"] if changes["event_log_changed"] else gr.update()
948
+
949
 
950
  # Buttons - only when state changes
951
  if freeze_background:
952
+ start_btn_update = gr.update(interactive=True)
953
+ pause_btn_update = gr.update(interactive=False)
954
  else:
955
  if changes["buttons_changed"]:
956
  start_enabled, pause_enabled = changes["button_states"]
 
961
  pause_btn_update = gr.update()
962
 
963
  # Status bar - use cache to prevent flicker (only update when content changes)
 
964
  if freeze_background:
965
  status_html_update = gr.skip()
966
  else:
967
  new_status_html = render_status_html(state, is_thinking, thinking_stage)
968
+ if new_status_html == display_cache["last_status_html"]:
969
  status_html_update = gr.skip()
970
  else:
971
+ display_cache["last_status_html"] = new_status_html
972
  status_html_update = new_status_html
973
 
974
  # Grid buttons - only update if changed
 
989
  start_btn_update,
990
  pause_btn_update,
991
  status_html_update,
992
+ ] + updates + [service, display_cache]
993
 
994
 
995
  # =============================================================================
 
1827
  gr.HTML(f"<style>{CUSTOM_CSS}</style>")
1828
  session_resume_state = gr.State(False)
1829
  session_token_state = gr.State("")
1830
+ service_state = gr.State(None)
1831
+ display_cache_state = gr.State(None)
1832
 
1833
  # Header with title and instructions
1834
  gr.Markdown("# 🔥 Fire Rescue Simulator Game")
 
1894
  with gr.Row(elem_classes=["section-gap"]):
1895
  # Left column: AI Advisor - THE STAR OF THE SHOW
1896
  with gr.Column(scale=2, min_width=300):
1897
+ advisor_interval_ticks = 10
 
1898
  gr.Markdown(
1899
  f"## 🤖 AI Tactical Advisor · (refreshes every {advisor_interval_ticks} ticks)"
1900
  )
1901
+ model_selector = gr.Dropdown(
1902
+ label="🧠 Advisor Model Source",
1903
+ choices=ADVISOR_MODEL_LABELS,
1904
+ value=DEFAULT_ADVISOR_MODEL_CHOICE,
1905
+ interactive=True,
1906
+ )
1907
  auto_execute_toggle = gr.Checkbox(
1908
  label="🎮 Auto-Execute",
1909
  value=AUTO_EXECUTE_DEFAULT,
1910
  info="Automatically execute AI recommendations",
1911
  )
1912
 
1913
+ advisor_initial_messages = [
1914
+ {
1915
+ "role": "assistant",
1916
+ "content": "No AI analysis yet. Press **Start** to begin the advisor cycle.",
1917
+ }
1918
+ ]
1919
 
1920
  with gr.Accordion("📜 AI Analysis History", open=True):
1921
  gr.Markdown(
 
2009
  # Event handlers for simulation controls
2010
  start_btn.click(
2011
  fn=start_or_resume_simulation,
2012
+ inputs=[
2013
+ fire_count,
2014
+ fire_intensity,
2015
+ building_count,
2016
+ max_units,
2017
+ seed_input,
2018
+ session_resume_state,
2019
+ session_token_state,
2020
+ service_state,
2021
+ display_cache_state,
2022
+ ],
2023
+ outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display]
2024
+ + all_buttons
2025
+ + [session_resume_state, session_token_state, service_state, display_cache_state],
2026
  )
2027
 
2028
  pause_btn.click(
2029
  fn=pause_simulation,
2030
+ inputs=[service_state, display_cache_state],
2031
+ outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display]
2032
+ + all_buttons
2033
+ + [session_resume_state, service_state, display_cache_state],
2034
  )
2035
 
2036
  reset_btn.click(
2037
+ fn=prepare_reset,
2038
+ inputs=[service_state, display_cache_state],
2039
+ outputs=[service_state, display_cache_state],
2040
+ queue=False,
2041
+ js="() => { window.location.reload(); return []; }",
2042
  )
2043
 
2044
  # Event handlers for grid buttons (click to place)
2045
  for x, y, btn in grid_buttons:
2046
  btn.click(
2047
+ fn=lambda sel, _x=x, _y=y, svc=None, cache=None: deploy_at_cell(_x, _y, sel, svc, cache),
2048
+ inputs=[place_selector, service_state, display_cache_state],
2049
+ outputs=[result_popup, advisor_display, event_log_display, status_display] + all_buttons + [service_state, display_cache_state],
2050
  )
2051
 
2052
  # Timer tick handler - updates all components with change tracking
2053
  timer.tick(
2054
  fn=refresh_display,
2055
+ inputs=[service_state, display_cache_state],
2056
+ outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display]
2057
+ + all_buttons
2058
+ + [service_state, display_cache_state],
2059
  )
2060
 
2061
  report_timer.tick(
2062
  fn=poll_after_action_report,
2063
+ inputs=[service_state],
2064
+ outputs=[report_timer, result_popup, service_state],
2065
  )
2066
 
2067
  # Auto-execute toggle handler
2068
+ def on_auto_execute_toggle(enabled: bool, service: Optional[SimulationService]):
2069
+ service = get_or_create_service(service)
2070
  service.set_auto_execute(enabled)
2071
+ return service
2072
+
2073
+ def on_model_choice_change(selection: str, service: Optional[SimulationService]):
2074
+ service = get_or_create_service(service)
2075
+ result = service.set_advisor_model_choice(selection)
2076
+ if result.get("status") != "ok":
2077
+ raise gr.Error(result.get("message", "Failed to switch advisor model"))
2078
+ return service
2079
 
2080
  auto_execute_toggle.change(
2081
  fn=on_auto_execute_toggle,
2082
+ inputs=[auto_execute_toggle, service_state],
2083
+ outputs=[service_state],
2084
  )
2085
 
2086
+ model_selector.change(
2087
+ fn=on_model_choice_change,
2088
+ inputs=[model_selector, service_state],
2089
+ outputs=[service_state],
2090
+ )
2091
  app.load(
2092
  fn=_initialize_session_defaults,
2093
+ inputs=[service_state, display_cache_state],
2094
+ outputs=[
2095
+ auto_execute_toggle,
2096
+ session_resume_state,
2097
+ session_token_state,
2098
+ model_selector,
2099
+ service_state,
2100
+ display_cache_state,
2101
+ advisor_display,
2102
+ event_log_display,
2103
+ status_display,
2104
+ ]
2105
+ + all_buttons,
2106
  )
2107
 
2108
  return app
2109
 
2110
 
2111
+ demo = create_app()
2112
+
2113
+
2114
  def launch_simple():
2115
  """Launch simple Gradio app (for HF Spaces / local development)."""
2116
+ demo.launch(
 
2117
  server_name="0.0.0.0",
2118
  server_port=7860,
2119
+ ssr_mode=False, # Disable SSR for better compatibility,
2120
+ mcp_server=True,
2121
  footer_links=["gradio", "settings"], # Hide API docs button
2122
  )
2123
 
 
 
 
 
2124
  if __name__ == "__main__":
2125
  launch_simple()
fire_rescue_mcp/mcp_client.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Local FastMCP client used by the Gradio simulator service.
3
+
4
+ This module provides a lightweight in-process client that routes every request
5
+ through the registered FastMCP server instance so the application can rely on
6
+ actual MCP tool invocations (and capture request/response telemetry) instead
7
+ of calling helper functions directly.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ import threading
14
+ import time
15
+ from copy import deepcopy
16
+ from datetime import datetime
17
+ from typing import Any, Callable, Sequence
18
+
19
+ import anyio
20
+
21
+ from mcp.types import ContentBlock
22
+
23
+
24
+ class LocalFastMCPClient:
25
+ """Synchronous helper that forwards calls to a FastMCP server instance."""
26
+
27
+ def __init__(self, server, log_callback: Callable[[dict[str, Any]], None] | None = None):
28
+ self._server = server
29
+ self._log_callback = log_callback
30
+ self._lock = threading.Lock()
31
+
32
+ def list_tools(self) -> Any:
33
+ """Expose server tool metadata (used for debugging/tests)."""
34
+
35
+ async def _list_tools():
36
+ return await self._server.list_tools()
37
+
38
+ with self._lock:
39
+ return anyio.run(_list_tools)
40
+
41
+ def call_tool(self, name: str, **arguments: Any) -> dict[str, Any]:
42
+ """Invoke an MCP tool and return a normalized dict response."""
43
+ clean_args = {k: v for k, v in arguments.items() if v is not None}
44
+ start = time.perf_counter()
45
+
46
+ async def _call():
47
+ return await self._server.call_tool(name, clean_args)
48
+
49
+ with self._lock:
50
+ raw_result = anyio.run(_call)
51
+
52
+ normalized = self._normalize_result(raw_result)
53
+ self._log(name, clean_args, normalized, start)
54
+ return normalized
55
+
56
+ # --------------------------------------------------------------------- #
57
+ # Internal utilities
58
+ # --------------------------------------------------------------------- #
59
+ def _normalize_result(self, result: Any) -> dict[str, Any]:
60
+ """Convert FastMCP responses into standard dicts for easier handling."""
61
+ if isinstance(result, dict):
62
+ return result
63
+ if isinstance(result, Sequence):
64
+ parsed = self._maybe_parse_json_from_blocks(result)
65
+ if parsed is not None:
66
+ return parsed
67
+ blocks: list[dict[str, Any]] = []
68
+ for block in result:
69
+ if isinstance(block, ContentBlock):
70
+ blocks.append(block.model_dump(mode="json"))
71
+ elif hasattr(block, "model_dump"):
72
+ blocks.append(block.model_dump(mode="json"))
73
+ else:
74
+ blocks.append({"type": "text", "text": str(block)})
75
+ return {"status": "ok", "content": blocks}
76
+ return {"status": "ok", "data": deepcopy(result)}
77
+
78
+ def _maybe_parse_json_from_blocks(self, blocks: Sequence[Any]) -> dict[str, Any] | None:
79
+ """If the MCP server returned a single text block containing JSON, parse it."""
80
+ if not blocks:
81
+ return None
82
+ first = blocks[0]
83
+ text = None
84
+ if isinstance(first, ContentBlock) and getattr(first, "type", None) == "text":
85
+ text = first.model_dump().get("text")
86
+ elif hasattr(first, "text"):
87
+ text = getattr(first, "text")
88
+ elif isinstance(first, dict) and first.get("type") == "text":
89
+ text = first.get("text")
90
+ if text is None:
91
+ return None
92
+ stripped = text.strip()
93
+ if not stripped or stripped[0] not in "{[":
94
+ return None
95
+ try:
96
+ return json.loads(stripped)
97
+ except json.JSONDecodeError:
98
+ return None
99
+
100
+ def _log(self, name: str, arguments: dict[str, Any], result: dict[str, Any], start: float) -> None:
101
+ """Send invocation metadata to the optional callback."""
102
+ if not self._log_callback:
103
+ return
104
+ duration_ms = round((time.perf_counter() - start) * 1000, 1)
105
+ entry = {
106
+ "timestamp": datetime.utcnow().isoformat(),
107
+ "tool": name,
108
+ "arguments": deepcopy(arguments),
109
+ "result": deepcopy(result),
110
+ "duration_ms": duration_ms,
111
+ }
112
+ self._log_callback(entry)
113
+
114
+
fire_rescue_mcp/mcp_server.py CHANGED
@@ -19,6 +19,7 @@ Data Query Tools:
19
  """
20
 
21
  import sys
 
22
  from pathlib import Path
23
  from typing import Optional
24
 
@@ -33,20 +34,40 @@ from simulation import SimulationEngine, SimulationConfig
33
  # Create FastMCP server instance
34
  mcp = FastMCP("Fire-Rescue Simulation")
35
 
36
- # Shared simulation engine instance
37
- _engine: Optional[SimulationEngine] = None
 
 
 
 
 
 
 
 
 
38
 
39
  # Unit effective ranges (matching SimulationConfig, Chebyshev/square distance)
40
  FIRE_TRUCK_RANGE = 1 # Square coverage radius (includes 8 neighbors)
41
  HELICOPTER_RANGE = 2 # Square coverage radius (extends two cells in all directions)
42
 
43
 
44
- def get_engine() -> SimulationEngine:
45
- """Get or create the simulation engine singleton."""
46
- global _engine
47
- if _engine is None:
48
- _engine = SimulationEngine()
49
- return _engine
 
 
 
 
 
 
 
 
 
 
 
50
 
51
 
52
  def _get_unit_effective_range(unit_type: str) -> int:
@@ -137,13 +158,22 @@ def generate_emoji_map(engine: SimulationEngine) -> str:
137
  return "\n".join(lines)
138
 
139
 
 
 
 
 
 
 
 
 
140
  @mcp.tool()
141
  def reset_scenario(
142
  seed: Optional[int] = None,
143
  fire_count: int = 10,
144
  fire_intensity: float = 0.5,
145
  building_count: int = 20,
146
- max_units: int = 10
 
147
  ) -> dict:
148
  """
149
  Reset and initialize a new fire rescue simulation scenario.
@@ -158,7 +188,9 @@ def reset_scenario(
158
  Returns:
159
  Status, summary, and emoji map of the initial state
160
  """
161
- engine = get_engine()
 
 
162
 
163
  world = engine.reset(
164
  seed=seed,
@@ -182,7 +214,7 @@ def reset_scenario(
182
 
183
 
184
  @mcp.tool()
185
- def get_world_state() -> dict:
186
  """
187
  Get the current world state snapshot with emoji map visualization.
188
 
@@ -198,7 +230,9 @@ def get_world_state() -> dict:
198
  🌲 Forest | 🏢 Building | 🔥 Fire (>=10%) | 💨 Smoke (<10%)
199
  🚒 Fire Truck | 🚁 Helicopter
200
  """
201
- engine = get_engine()
 
 
202
 
203
  if engine.world is None:
204
  return {
@@ -217,7 +251,8 @@ def deploy_unit(
217
  unit_type: str,
218
  x: int,
219
  y: int,
220
- source: str = "player"
 
221
  ) -> dict:
222
  """
223
  Deploy a firefighting unit at the specified position.
@@ -231,12 +266,14 @@ def deploy_unit(
231
  Returns:
232
  Status and details of the deployed unit
233
  """
234
- engine = get_engine()
 
 
235
  return engine.deploy_unit(unit_type, x, y, source)
236
 
237
 
238
  @mcp.tool()
239
- def step_simulation(ticks: int = 1) -> dict:
240
  """
241
  Advance the simulation by the specified number of ticks.
242
 
@@ -246,7 +283,9 @@ def step_simulation(ticks: int = 1) -> dict:
246
  Returns:
247
  Current world state with emoji map after advancing
248
  """
249
- engine = get_engine()
 
 
250
 
251
  if engine.world is None:
252
  return {
@@ -268,7 +307,8 @@ def move_unit(
268
  source_x: int,
269
  source_y: int,
270
  target_x: int,
271
- target_y: int
 
272
  ) -> dict:
273
  """
274
  Move an existing unit from source position to target position.
@@ -283,7 +323,9 @@ def move_unit(
283
  Returns:
284
  Status and details of the move operation
285
  """
286
- engine = get_engine()
 
 
287
 
288
  if engine.world is None:
289
  return {
@@ -336,7 +378,8 @@ def move_unit(
336
  @mcp.tool()
337
  def remove_unit(
338
  x: int,
339
- y: int
 
340
  ) -> dict:
341
  """
342
  Remove an existing unit at the specified position.
@@ -354,7 +397,9 @@ def remove_unit(
354
  - Free up deployment slot when unit is no longer needed
355
  - Reposition unit: remove_unit + deploy_unit at new location
356
  """
357
- engine = get_engine()
 
 
358
 
359
  if engine.world is None:
360
  return {
@@ -394,7 +439,7 @@ def remove_unit(
394
 
395
 
396
  @mcp.tool()
397
- def find_idle_units() -> dict:
398
  """
399
  Find units that are not covering any fires (idle/ineffective units).
400
 
@@ -405,7 +450,9 @@ def find_idle_units() -> dict:
405
  Returns:
406
  List of idle units and effective units with their positions
407
  """
408
- engine = get_engine()
 
 
409
 
410
  if engine.world is None:
411
  return {
@@ -453,7 +500,7 @@ def find_idle_units() -> dict:
453
 
454
 
455
  @mcp.tool()
456
- def find_uncovered_fires() -> dict:
457
  """
458
  Find fires that have NO unit coverage.
459
 
@@ -464,7 +511,9 @@ def find_uncovered_fires() -> dict:
464
  Returns:
465
  List of uncovered fires with their positions, intensity, and building threat status
466
  """
467
- engine = get_engine()
 
 
468
 
469
  if engine.world is None:
470
  return {
@@ -521,14 +570,16 @@ def find_uncovered_fires() -> dict:
521
 
522
 
523
  @mcp.tool()
524
- def find_building_threats() -> dict:
525
  """
526
  Find fires that are threatening buildings (within 2 cells of any building).
527
 
528
  Returns:
529
  List of building-threatening fires with their positions, threatened buildings, and coverage status
530
  """
531
- engine = get_engine()
 
 
532
 
533
  if engine.world is None:
534
  return {
@@ -583,7 +634,7 @@ def find_building_threats() -> dict:
583
 
584
 
585
  @mcp.tool()
586
- def analyze_coverage() -> dict:
587
  """
588
  Get comprehensive coverage analysis data.
589
 
@@ -596,7 +647,9 @@ def analyze_coverage() -> dict:
596
  Returns:
597
  Comprehensive data about fires, units, and coverage status
598
  """
599
- engine = get_engine()
 
 
600
 
601
  if engine.world is None:
602
  return {
 
19
  """
20
 
21
  import sys
22
+ import threading
23
  from pathlib import Path
24
  from typing import Optional
25
 
 
34
  # Create FastMCP server instance
35
  mcp = FastMCP("Fire-Rescue Simulation")
36
 
37
+ # Shared simulation engines keyed by session_id
38
+ _engines: dict[str, SimulationEngine] = {}
39
+ _engine_lock = threading.RLock()
40
+
41
+
42
+ def attach_engine(engine: SimulationEngine, session_id: str) -> None:
43
+ """Allow external callers to reuse their engine inside the MCP server."""
44
+ if not session_id:
45
+ raise ValueError("session_id is required to attach engine")
46
+ with _engine_lock:
47
+ _engines[session_id] = engine
48
 
49
  # Unit effective ranges (matching SimulationConfig, Chebyshev/square distance)
50
  FIRE_TRUCK_RANGE = 1 # Square coverage radius (includes 8 neighbors)
51
  HELICOPTER_RANGE = 2 # Square coverage radius (extends two cells in all directions)
52
 
53
 
54
+ def detach_engine(session_id: str) -> None:
55
+ """Remove engine mapping for a session (best-effort)."""
56
+ if not session_id:
57
+ return
58
+ with _engine_lock:
59
+ _engines.pop(session_id, None)
60
+
61
+
62
+ def get_engine(session_id: Optional[str]) -> SimulationEngine:
63
+ """Get the simulation engine for a specific session."""
64
+ if not session_id:
65
+ raise ValueError("session_id is required")
66
+ with _engine_lock:
67
+ engine = _engines.get(session_id)
68
+ if engine is None:
69
+ raise ValueError(f"No simulation engine attached for session '{session_id}'")
70
+ return engine
71
 
72
 
73
  def _get_unit_effective_range(unit_type: str) -> int:
 
158
  return "\n".join(lines)
159
 
160
 
161
+ def _resolve_engine(session_id: Optional[str]):
162
+ """Return (engine, error_dict) tuple for tool handlers."""
163
+ try:
164
+ return get_engine(session_id), None
165
+ except ValueError as exc:
166
+ return None, {"status": "error", "message": str(exc)}
167
+
168
+
169
  @mcp.tool()
170
  def reset_scenario(
171
  seed: Optional[int] = None,
172
  fire_count: int = 10,
173
  fire_intensity: float = 0.5,
174
  building_count: int = 20,
175
+ max_units: int = 10,
176
+ session_id: Optional[str] = None,
177
  ) -> dict:
178
  """
179
  Reset and initialize a new fire rescue simulation scenario.
 
188
  Returns:
189
  Status, summary, and emoji map of the initial state
190
  """
191
+ engine, error = _resolve_engine(session_id)
192
+ if error:
193
+ return error
194
 
195
  world = engine.reset(
196
  seed=seed,
 
214
 
215
 
216
  @mcp.tool()
217
+ def get_world_state(session_id: Optional[str] = None) -> dict:
218
  """
219
  Get the current world state snapshot with emoji map visualization.
220
 
 
230
  🌲 Forest | 🏢 Building | 🔥 Fire (>=10%) | 💨 Smoke (<10%)
231
  🚒 Fire Truck | 🚁 Helicopter
232
  """
233
+ engine, error = _resolve_engine(session_id)
234
+ if error:
235
+ return error
236
 
237
  if engine.world is None:
238
  return {
 
251
  unit_type: str,
252
  x: int,
253
  y: int,
254
+ source: str = "player",
255
+ session_id: Optional[str] = None,
256
  ) -> dict:
257
  """
258
  Deploy a firefighting unit at the specified position.
 
266
  Returns:
267
  Status and details of the deployed unit
268
  """
269
+ engine, error = _resolve_engine(session_id)
270
+ if error:
271
+ return error
272
  return engine.deploy_unit(unit_type, x, y, source)
273
 
274
 
275
  @mcp.tool()
276
+ def step_simulation(ticks: int = 1, session_id: Optional[str] = None) -> dict:
277
  """
278
  Advance the simulation by the specified number of ticks.
279
 
 
283
  Returns:
284
  Current world state with emoji map after advancing
285
  """
286
+ engine, error = _resolve_engine(session_id)
287
+ if error:
288
+ return error
289
 
290
  if engine.world is None:
291
  return {
 
307
  source_x: int,
308
  source_y: int,
309
  target_x: int,
310
+ target_y: int,
311
+ session_id: Optional[str] = None,
312
  ) -> dict:
313
  """
314
  Move an existing unit from source position to target position.
 
323
  Returns:
324
  Status and details of the move operation
325
  """
326
+ engine, error = _resolve_engine(session_id)
327
+ if error:
328
+ return error
329
 
330
  if engine.world is None:
331
  return {
 
378
  @mcp.tool()
379
  def remove_unit(
380
  x: int,
381
+ y: int,
382
+ session_id: Optional[str] = None,
383
  ) -> dict:
384
  """
385
  Remove an existing unit at the specified position.
 
397
  - Free up deployment slot when unit is no longer needed
398
  - Reposition unit: remove_unit + deploy_unit at new location
399
  """
400
+ engine, error = _resolve_engine(session_id)
401
+ if error:
402
+ return error
403
 
404
  if engine.world is None:
405
  return {
 
439
 
440
 
441
  @mcp.tool()
442
+ def find_idle_units(session_id: Optional[str] = None) -> dict:
443
  """
444
  Find units that are not covering any fires (idle/ineffective units).
445
 
 
450
  Returns:
451
  List of idle units and effective units with their positions
452
  """
453
+ engine, error = _resolve_engine(session_id)
454
+ if error:
455
+ return error
456
 
457
  if engine.world is None:
458
  return {
 
500
 
501
 
502
  @mcp.tool()
503
+ def find_uncovered_fires(session_id: Optional[str] = None) -> dict:
504
  """
505
  Find fires that have NO unit coverage.
506
 
 
511
  Returns:
512
  List of uncovered fires with their positions, intensity, and building threat status
513
  """
514
+ engine, error = _resolve_engine(session_id)
515
+ if error:
516
+ return error
517
 
518
  if engine.world is None:
519
  return {
 
570
 
571
 
572
  @mcp.tool()
573
+ def find_building_threats(session_id: Optional[str] = None) -> dict:
574
  """
575
  Find fires that are threatening buildings (within 2 cells of any building).
576
 
577
  Returns:
578
  List of building-threatening fires with their positions, threatened buildings, and coverage status
579
  """
580
+ engine, error = _resolve_engine(session_id)
581
+ if error:
582
+ return error
583
 
584
  if engine.world is None:
585
  return {
 
634
 
635
 
636
  @mcp.tool()
637
+ def analyze_coverage(session_id: Optional[str] = None) -> dict:
638
  """
639
  Get comprehensive coverage analysis data.
640
 
 
647
  Returns:
648
  Comprehensive data about fires, units, and coverage status
649
  """
650
+ engine, error = _resolve_engine(session_id)
651
+ if error:
652
+ return error
653
 
654
  if engine.world is None:
655
  return {
service.py CHANGED
@@ -9,11 +9,12 @@ import asyncio
9
  import concurrent.futures
10
  import html
11
  import json
 
12
  import threading
13
  import time
14
  from dataclasses import dataclass, field
15
  from datetime import datetime
16
- from typing import Callable, Optional
17
 
18
  from agent import (
19
  AdvisorAgent,
@@ -23,10 +24,38 @@ from agent import (
23
  PlanResult,
24
  CycleSummary,
25
  )
 
 
26
  from models import SimulationStatus, CellType
27
  from simulation import SimulationEngine
28
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def generate_emoji_map(engine: SimulationEngine) -> str:
31
  """
32
  Generate an emoji-based visualization of the current world state.
@@ -124,7 +153,7 @@ class SimulationService:
124
  # Runtime state
125
  _running: bool = False
126
  _thread: Optional[threading.Thread] = None
127
- _lock: threading.Lock = field(default_factory=threading.Lock)
128
 
129
  # Logs and recommendations
130
  _logs: list[LogEntry] = field(default_factory=list)
@@ -144,6 +173,12 @@ class SimulationService:
144
  _threat_history: list[dict] = field(default_factory=list)
145
  _action_history: list[dict] = field(default_factory=list)
146
  _player_actions: list[dict] = field(default_factory=list)
 
 
 
 
 
 
147
 
148
  # Advisor call control
149
  _advisor_running: bool = False # Prevent concurrent advisor calls
@@ -163,6 +198,8 @@ class SimulationService:
163
  _auto_execute: bool = True # Whether to automatically execute AI recommendations
164
  _executed_recommendations: set = field(default_factory=set) # Track executed recommendations
165
  _session_owner_id: str = ""
 
 
166
 
167
  # Simulation loop state (preserved across pause/resume)
168
  _tick_count: int = 0 # Current tick count in simulation loop
@@ -177,7 +214,7 @@ class SimulationService:
177
  _last_result_state: str = "" # Last result popup state ("", "success", "fail")
178
 
179
  def __post_init__(self):
180
- self._lock = threading.Lock()
181
  self._logs = []
182
  self._result_shown = False
183
  self._result_dismissed = False
@@ -204,6 +241,10 @@ class SimulationService:
204
  self._last_event_log = ""
205
  self._last_button_states = (True, False)
206
  self._last_result_state = ""
 
 
 
 
207
 
208
  def start(
209
  self,
@@ -591,6 +632,32 @@ class SimulationService:
591
  "markdown": markdown,
592
  }
593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594
  def _get_after_action_report_payload_locked(self) -> dict:
595
  """Serialize after-action report state for UI consumption."""
596
  payload: dict = {"status": self._result_report_status}
@@ -599,6 +666,58 @@ class SimulationService:
599
  elif self._result_report_status == "error":
600
  payload["error"] = self._result_report_error or "Unknown error"
601
  return payload
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602
 
603
  def should_show_result(self) -> tuple[bool, bool]:
604
  """
@@ -782,6 +901,11 @@ class SimulationService:
782
  else:
783
  lines.append(f"[Tick {log.tick}] {log.message}")
784
  return "\n".join(lines)
 
 
 
 
 
785
 
786
  def get_advisor_text(self, limit: int = 5) -> str:
787
  """Get AI advisor display with rich reasoning (legacy text format)."""
@@ -1120,6 +1244,17 @@ class SimulationService:
1120
  result["event_log"] = event_log if result["event_log_changed"] else None
1121
  if result["event_log_changed"]:
1122
  self._last_event_log = event_log
 
 
 
 
 
 
 
 
 
 
 
1123
 
1124
  # 4. Button states
1125
  status = state.get("status", "idle")
@@ -1181,6 +1316,8 @@ class SimulationService:
1181
  "advisor_history": ui["advisor_history"],
1182
  "event_log_changed": ui["event_log_changed"],
1183
  "event_log": ui["event_log"],
 
 
1184
  "buttons_changed": ui["buttons_changed"],
1185
  "button_states": ui["button_states"],
1186
  "result_changed": ui["result_changed"],
@@ -1266,7 +1403,7 @@ class SimulationService:
1266
  def deploy_unit(self, unit_type: str, x: int, y: int, source: str = "player") -> dict:
1267
  """Deploy a unit (thread-safe)."""
1268
  with self._lock:
1269
- result = self.engine.deploy_unit(unit_type, x, y, source)
1270
 
1271
  if result.get("status") == "ok":
1272
  self._add_log(
@@ -1292,7 +1429,7 @@ class SimulationService:
1292
  def remove_unit(self, x: int, y: int) -> dict:
1293
  """Remove a unit at position (thread-safe)."""
1294
  with self._lock:
1295
- result = self.engine.remove_unit_at(x, y)
1296
 
1297
  if result.get("status") == "ok":
1298
  self._add_log(
@@ -1300,8 +1437,11 @@ class SimulationService:
1300
  f"Removed unit at ({x}, {y})",
1301
  {"unit": result.get("unit")}
1302
  )
1303
- removed_unit = result.get("unit") or {}
1304
- unit_type = removed_unit.get("type", "")
 
 
 
1305
  unit_label = (
1306
  "fire truck"
1307
  if unit_type == "fire_truck"
@@ -1385,6 +1525,50 @@ class SimulationService:
1385
  """Check if auto-execute is enabled."""
1386
  return self._auto_execute
1387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  def is_thinking(self) -> bool:
1389
  """Check if AI advisor is currently thinking."""
1390
  return self._is_thinking
@@ -1408,8 +1592,7 @@ class SimulationService:
1408
  # Run advisor immediately on first tick (only once per simulation)
1409
  if self._advisor_first_run:
1410
  self._advisor_first_run = False
1411
- state = self.engine.get_state()
1412
- self._run_advisor(state)
1413
 
1414
  # Advance simulation
1415
  self.engine.step()
@@ -1431,7 +1614,7 @@ class SimulationService:
1431
  else:
1432
  # Periodic advisor evaluation (every advisor_interval ticks)
1433
  if self._tick_count % self.advisor_interval == 0:
1434
- self._run_advisor(state)
1435
 
1436
  # Notify UI
1437
  if self._on_update:
@@ -1902,87 +2085,81 @@ mcp.deploy_unit(
1902
  """Execute AI recommendations (must be called with lock held)."""
1903
  executed_count = 0
1904
  for rec in response.recommendations:
1905
- # Create unique key for this recommendation
1906
- action = getattr(rec, 'action', 'deploy')
1907
  rec_key = f"{tick}_{action}_{rec.suggested_unit_type}_{rec.target_x}_{rec.target_y}"
1908
-
1909
- # Skip if already executed
1910
  if rec_key in self._executed_recommendations:
1911
  continue
1912
-
1913
  if action == "move":
1914
- # Move action: remove from source, deploy at target
1915
- source_x = getattr(rec, 'source_x', -1)
1916
- source_y = getattr(rec, 'source_y', -1)
1917
-
1918
- if source_x >= 0 and source_y >= 0:
1919
- # First remove the unit from source
1920
- remove_result = self.engine.remove_unit_at(source_x, source_y)
1921
-
1922
- if remove_result.get("status") == "ok":
1923
- # Then deploy at target
1924
- deploy_result = self.engine.deploy_unit(rec.suggested_unit_type, rec.target_x, rec.target_y, "ai")
1925
-
1926
- if deploy_result.get("status") == "ok":
1927
- executed_count += 1
1928
- unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
1929
- self._add_log(
1930
- "deploy",
1931
- f"🤖 AI moved {unit_name}: ({source_x},{source_y}) → ({rec.target_x},{rec.target_y})",
1932
- {"unit": deploy_result.get("unit"), "source": "ai", "reason": rec.reason, "action": "move"}
1933
- )
1934
- self._executed_recommendations.add(rec_key)
1935
- else:
1936
- # Deploy failed, try to restore the removed unit
1937
- self.engine.deploy_unit(rec.suggested_unit_type, source_x, source_y, "ai")
1938
- self._add_log(
1939
- "error",
1940
- f"🤖 AI move failed: {deploy_result.get('message')} - restored unit at ({source_x},{source_y})"
1941
- )
1942
- else:
1943
- self._add_log(
1944
- "error",
1945
- f"🤖 AI move failed: No unit at source ({source_x},{source_y})"
1946
- )
1947
  elif action == "remove":
1948
- # Remove action: remove unit at position (frees slot for redeployment elsewhere)
1949
- remove_result = self.engine.remove_unit_at(rec.target_x, rec.target_y)
1950
-
1951
- if remove_result.get("status") == "ok":
1952
  executed_count += 1
1953
  unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
1954
  self._add_log(
1955
  "deploy",
1956
  f"🤖 AI removed {unit_name} at ({rec.target_x},{rec.target_y}) - ready to redeploy",
1957
- {"source": "ai", "reason": rec.reason, "action": "remove"}
1958
  )
1959
  self._executed_recommendations.add(rec_key)
1960
  else:
1961
  self._add_log(
1962
  "error",
1963
- f"🤖 AI remove failed: {remove_result.get('message')} at ({rec.target_x},{rec.target_y})"
 
1964
  )
1965
  else:
1966
- # Deploy action
1967
- result = self.engine.deploy_unit(rec.suggested_unit_type, rec.target_x, rec.target_y, "ai")
1968
-
 
 
 
 
1969
  if result.get("status") == "ok":
1970
  executed_count += 1
1971
  unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
1972
  self._add_log(
1973
  "deploy",
1974
  f"🤖 AI deployed {unit_name} at ({rec.target_x}, {rec.target_y})",
1975
- {"unit": result.get("unit"), "source": "ai", "reason": rec.reason, "action": "deploy"}
1976
  )
1977
  self._executed_recommendations.add(rec_key)
1978
  else:
1979
- # Log failure but don't stop
1980
  self._add_log(
1981
  "error",
1982
- f"🤖 AI deploy failed: {result.get('message')} at ({rec.target_x}, {rec.target_y})"
 
1983
  )
1984
-
1985
- # Keep executed recommendations bounded
1986
  if len(self._executed_recommendations) > 100:
1987
  self._executed_recommendations = set(list(self._executed_recommendations)[-50:])
1988
 
 
9
  import concurrent.futures
10
  import html
11
  import json
12
+ import os
13
  import threading
14
  import time
15
  from dataclasses import dataclass, field
16
  from datetime import datetime
17
+ from typing import Any, Callable, Optional
18
 
19
  from agent import (
20
  AdvisorAgent,
 
24
  PlanResult,
25
  CycleSummary,
26
  )
27
+ from fire_rescue_mcp.mcp_client import LocalFastMCPClient
28
+ from fire_rescue_mcp.mcp_server import attach_engine, detach_engine, mcp as fastmcp_server
29
  from models import SimulationStatus, CellType
30
  from simulation import SimulationEngine
31
 
32
 
33
+ ADVISOR_MODEL_CHOICES = {
34
+ "GPT-OSS · HuggingFace (openai/gpt-oss-120b)": {
35
+ "provider": "hf",
36
+ "model": "openai/gpt-oss-120b",
37
+ "description": "Default OSS advisor routed through HuggingFace Inference",
38
+ },
39
+ "GPT-OSS-20B · HuggingFace (openai/gpt-oss-20b)": {
40
+ "provider": "hf",
41
+ "model": "openai/gpt-oss-20b",
42
+ "description": "OpenAI GPT-OSS 20B model via HuggingFace Inference",
43
+ },
44
+ "Llama-3.1 · HuggingFace (meta-llama/Llama-3.1-8B-Instruct)": {
45
+ "provider": "hf",
46
+ "model": "meta-llama/Llama-3.1-8B-Instruct",
47
+ "description": "Meta Llama-3.1 8B Instruct model via HuggingFace Inference",
48
+ },
49
+ "OpenAI · gpt-5.1": {
50
+ "provider": "openai",
51
+ "model": "gpt-5.1",
52
+ "description": "Flagship GPT-5.1 via native OpenAI API",
53
+ },
54
+ }
55
+
56
+ DEFAULT_ADVISOR_MODEL_CHOICE = "GPT-OSS · HuggingFace (openai/gpt-oss-120b)"
57
+
58
+
59
  def generate_emoji_map(engine: SimulationEngine) -> str:
60
  """
61
  Generate an emoji-based visualization of the current world state.
 
153
  # Runtime state
154
  _running: bool = False
155
  _thread: Optional[threading.Thread] = None
156
+ _lock: threading.RLock = field(default_factory=threading.RLock)
157
 
158
  # Logs and recommendations
159
  _logs: list[LogEntry] = field(default_factory=list)
 
173
  _threat_history: list[dict] = field(default_factory=list)
174
  _action_history: list[dict] = field(default_factory=list)
175
  _player_actions: list[dict] = field(default_factory=list)
176
+
177
+ # MCP integration
178
+ _mcp_client: LocalFastMCPClient | None = None
179
+ _mcp_call_log: list[dict] = field(default_factory=list)
180
+ _mcp_log_dirty: bool = False
181
+ _last_mcp_log: str = ""
182
 
183
  # Advisor call control
184
  _advisor_running: bool = False # Prevent concurrent advisor calls
 
198
  _auto_execute: bool = True # Whether to automatically execute AI recommendations
199
  _executed_recommendations: set = field(default_factory=set) # Track executed recommendations
200
  _session_owner_id: str = ""
201
+ _attached_session_id: str = ""
202
+ _model_choice: str = DEFAULT_ADVISOR_MODEL_CHOICE
203
 
204
  # Simulation loop state (preserved across pause/resume)
205
  _tick_count: int = 0 # Current tick count in simulation loop
 
214
  _last_result_state: str = "" # Last result popup state ("", "success", "fail")
215
 
216
  def __post_init__(self):
217
+ self._lock = threading.RLock()
218
  self._logs = []
219
  self._result_shown = False
220
  self._result_dismissed = False
 
241
  self._last_event_log = ""
242
  self._last_button_states = (True, False)
243
  self._last_result_state = ""
244
+ self._mcp_client = LocalFastMCPClient(fastmcp_server, self._record_mcp_call)
245
+ self._mcp_call_log = []
246
+ self._mcp_log_dirty = False
247
+ self._last_mcp_log = ""
248
 
249
  def start(
250
  self,
 
632
  "markdown": markdown,
633
  }
634
 
635
+ def _record_mcp_call(self, entry: dict[str, Any]) -> None:
636
+ """Capture MCP tool invocations for UI display."""
637
+ with self._lock:
638
+ cloned = dict(entry)
639
+ cloned.setdefault("local_timestamp", datetime.now().strftime("%H:%M:%S"))
640
+ self._mcp_call_log.append(cloned)
641
+ if len(self._mcp_call_log) > 80:
642
+ self._mcp_call_log = self._mcp_call_log[-80:]
643
+ self._mcp_log_dirty = True
644
+
645
+ def _get_mcp_log_text_locked(self, limit: int = 20) -> str:
646
+ """Format MCP tool logs for UI display."""
647
+ if not self._mcp_call_log:
648
+ return "No MCP tool calls yet..."
649
+ lines = []
650
+ for entry in self._mcp_call_log[-limit:]:
651
+ ts = entry.get("local_timestamp", entry.get("timestamp", ""))
652
+ tool = entry.get("tool", "unknown")
653
+ args = entry.get("arguments", {})
654
+ args_preview = ", ".join(f"{k}={v}" for k, v in list(args.items())[:3])
655
+ result = entry.get("result", {})
656
+ status = result.get("status", "ok") if isinstance(result, dict) else "ok"
657
+ duration = entry.get("duration_ms", 0)
658
+ lines.append(f"[{ts}] {tool}({args_preview}) → {status} ({duration} ms)")
659
+ return "\n".join(lines)
660
+
661
  def _get_after_action_report_payload_locked(self) -> dict:
662
  """Serialize after-action report state for UI consumption."""
663
  payload: dict = {"status": self._result_report_status}
 
666
  elif self._result_report_status == "error":
667
  payload["error"] = self._result_report_error or "Unknown error"
668
  return payload
669
+
670
+ def _call_mcp_tool(self, name: str, **kwargs: Any) -> dict[str, Any]:
671
+ """Invoke a tool through the shared FastMCP client."""
672
+ session_id = self._session_owner_id or "default"
673
+ if not self._mcp_client:
674
+ self._mcp_client = LocalFastMCPClient(fastmcp_server, self._record_mcp_call)
675
+ if self._attached_session_id and self._attached_session_id != session_id:
676
+ detach_engine(self._attached_session_id)
677
+ self._attached_session_id = ""
678
+ if self._attached_session_id != session_id:
679
+ attach_engine(self.engine, session_id)
680
+ self._attached_session_id = session_id
681
+ try:
682
+ call_kwargs = dict(kwargs)
683
+ call_kwargs["session_id"] = session_id
684
+ return self._mcp_client.call_tool(name, **call_kwargs)
685
+ except Exception as exc: # pragma: no cover
686
+ self._add_log("error", f"MCP tool {name} failed: {exc}")
687
+ return {"status": "error", "message": str(exc)}
688
+
689
+ def shutdown(self) -> None:
690
+ """Stop background threads and detach from MCP server."""
691
+ thread = None
692
+ with self._lock:
693
+ if self._running:
694
+ self._running = False
695
+ thread = self._thread
696
+ self._thread = None
697
+ if thread and thread.is_alive():
698
+ thread.join(timeout=2.0)
699
+ with self._lock:
700
+ if self._attached_session_id:
701
+ detach_engine(self._attached_session_id)
702
+ self._attached_session_id = ""
703
+ self._session_owner_id = ""
704
+ self._on_update = None
705
+
706
+ def _get_mcp_world_state(self) -> dict:
707
+ """Fetch world state via the MCP tools with local fallback."""
708
+ state = self._call_mcp_tool("get_world_state")
709
+ if not isinstance(state, dict) or state.get("status") == "error":
710
+ state = self.engine.get_state()
711
+
712
+ def _maybe(name: str) -> dict[str, Any]:
713
+ data = self._call_mcp_tool(name)
714
+ return data if isinstance(data, dict) else {}
715
+
716
+ state["mcp_idle_units"] = _maybe("find_idle_units")
717
+ state["mcp_uncovered_fires"] = _maybe("find_uncovered_fires")
718
+ state["mcp_building_threats"] = _maybe("find_building_threats")
719
+ state["mcp_coverage"] = _maybe("analyze_coverage")
720
+ return state
721
 
722
  def should_show_result(self) -> tuple[bool, bool]:
723
  """
 
901
  else:
902
  lines.append(f"[Tick {log.tick}] {log.message}")
903
  return "\n".join(lines)
904
+
905
+ def get_mcp_log_text(self, limit: int = 20) -> str:
906
+ """Expose MCP call history."""
907
+ with self._lock:
908
+ return self._get_mcp_log_text_locked(limit)
909
 
910
  def get_advisor_text(self, limit: int = 5) -> str:
911
  """Get AI advisor display with rich reasoning (legacy text format)."""
 
1244
  result["event_log"] = event_log if result["event_log_changed"] else None
1245
  if result["event_log_changed"]:
1246
  self._last_event_log = event_log
1247
+
1248
+ # 3b. MCP tool log
1249
+ if self._mcp_log_dirty:
1250
+ mcp_log = self._get_mcp_log_text_locked()
1251
+ result["mcp_log_changed"] = True
1252
+ result["mcp_log"] = mcp_log
1253
+ self._last_mcp_log = mcp_log
1254
+ self._mcp_log_dirty = False
1255
+ else:
1256
+ result["mcp_log_changed"] = False
1257
+ result["mcp_log"] = None
1258
 
1259
  # 4. Button states
1260
  status = state.get("status", "idle")
 
1316
  "advisor_history": ui["advisor_history"],
1317
  "event_log_changed": ui["event_log_changed"],
1318
  "event_log": ui["event_log"],
1319
+ "mcp_log_changed": ui["mcp_log_changed"],
1320
+ "mcp_log": ui["mcp_log"],
1321
  "buttons_changed": ui["buttons_changed"],
1322
  "button_states": ui["button_states"],
1323
  "result_changed": ui["result_changed"],
 
1403
  def deploy_unit(self, unit_type: str, x: int, y: int, source: str = "player") -> dict:
1404
  """Deploy a unit (thread-safe)."""
1405
  with self._lock:
1406
+ result = self._call_mcp_tool("deploy_unit", unit_type=unit_type, x=x, y=y, source=source)
1407
 
1408
  if result.get("status") == "ok":
1409
  self._add_log(
 
1429
  def remove_unit(self, x: int, y: int) -> dict:
1430
  """Remove a unit at position (thread-safe)."""
1431
  with self._lock:
1432
+ result = self._call_mcp_tool("remove_unit", x=x, y=y)
1433
 
1434
  if result.get("status") == "ok":
1435
  self._add_log(
 
1437
  f"Removed unit at ({x}, {y})",
1438
  {"unit": result.get("unit")}
1439
  )
1440
+ removed_unit = result.get("unit") or {
1441
+ "type": result.get("removed_unit_type"),
1442
+ **(result.get("position") or {"x": x, "y": y}),
1443
+ }
1444
+ unit_type = removed_unit.get("type") or result.get("removed_unit_type", "")
1445
  unit_label = (
1446
  "fire truck"
1447
  if unit_type == "fire_truck"
 
1525
  """Check if auto-execute is enabled."""
1526
  return self._auto_execute
1527
 
1528
+ def get_advisor_model_choice(self) -> str:
1529
+ """Return the currently selected advisor model label."""
1530
+ with self._lock:
1531
+ return self._model_choice
1532
+
1533
+ def set_advisor_model_choice(self, choice: str) -> dict:
1534
+ """
1535
+ Switch the advisor backend/model based on UI selection.
1536
+ Returns status dict usable by the UI for feedback.
1537
+ """
1538
+ preset = ADVISOR_MODEL_CHOICES.get(choice)
1539
+ if not preset:
1540
+ return {"status": "error", "message": "Unknown model selection."}
1541
+
1542
+ if preset["provider"] == "openai" and not os.getenv("OPENAI_API_KEY"):
1543
+ return {
1544
+ "status": "error",
1545
+ "message": "Please set OPENAI_API_KEY before selecting an OpenAI model.",
1546
+ }
1547
+
1548
+ with self._lock:
1549
+ self.advisor = AdvisorAgent(
1550
+ provider=preset["provider"],
1551
+ model=preset["model"],
1552
+ )
1553
+ self._model_choice = choice
1554
+ self._add_log("status", f"Advisor model switched to {choice}")
1555
+
1556
+ return {"status": "ok", "selection": choice}
1557
+
1558
+ def reset_advisor_model_choice(self) -> str:
1559
+ """Reset advisor selection back to the default preset."""
1560
+ default_choice = DEFAULT_ADVISOR_MODEL_CHOICE
1561
+ preset = ADVISOR_MODEL_CHOICES[default_choice]
1562
+ with self._lock:
1563
+ self.advisor = AdvisorAgent(
1564
+ provider=preset["provider"],
1565
+ model=preset["model"],
1566
+ )
1567
+ self._model_choice = default_choice
1568
+ self._advisor_first_run = True
1569
+ self._add_log("status", f"Advisor model reset to {default_choice}")
1570
+ return default_choice
1571
+
1572
  def is_thinking(self) -> bool:
1573
  """Check if AI advisor is currently thinking."""
1574
  return self._is_thinking
 
1592
  # Run advisor immediately on first tick (only once per simulation)
1593
  if self._advisor_first_run:
1594
  self._advisor_first_run = False
1595
+ self._run_advisor(self._get_mcp_world_state())
 
1596
 
1597
  # Advance simulation
1598
  self.engine.step()
 
1614
  else:
1615
  # Periodic advisor evaluation (every advisor_interval ticks)
1616
  if self._tick_count % self.advisor_interval == 0:
1617
+ self._run_advisor(self._get_mcp_world_state())
1618
 
1619
  # Notify UI
1620
  if self._on_update:
 
2085
  """Execute AI recommendations (must be called with lock held)."""
2086
  executed_count = 0
2087
  for rec in response.recommendations:
2088
+ action = getattr(rec, "action", "deploy")
 
2089
  rec_key = f"{tick}_{action}_{rec.suggested_unit_type}_{rec.target_x}_{rec.target_y}"
2090
+
 
2091
  if rec_key in self._executed_recommendations:
2092
  continue
2093
+
2094
  if action == "move":
2095
+ source_x = getattr(rec, "source_x", -1)
2096
+ source_y = getattr(rec, "source_y", -1)
2097
+ if source_x < 0 or source_y < 0:
2098
+ self._add_log("error", f"🤖 AI move failed: missing source ({source_x},{source_y})")
2099
+ continue
2100
+ result = self._call_mcp_tool(
2101
+ "move_unit",
2102
+ source_x=source_x,
2103
+ source_y=source_y,
2104
+ target_x=rec.target_x,
2105
+ target_y=rec.target_y,
2106
+ )
2107
+ if result.get("status") == "ok":
2108
+ executed_count += 1
2109
+ unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
2110
+ self._add_log(
2111
+ "deploy",
2112
+ f"🤖 AI moved {unit_name}: ({source_x},{source_y}) → ({rec.target_x},{rec.target_y})",
2113
+ {"source": "ai", "reason": rec.reason, "action": "move"},
2114
+ )
2115
+ self._executed_recommendations.add(rec_key)
2116
+ else:
2117
+ self._add_log(
2118
+ "error",
2119
+ f"🤖 AI move failed: {result.get('message', 'unknown error')} "
2120
+ f"(source=({source_x},{source_y}) target=({rec.target_x},{rec.target_y}))",
2121
+ )
 
 
 
 
 
 
2122
  elif action == "remove":
2123
+ result = self._call_mcp_tool("remove_unit", x=rec.target_x, y=rec.target_y)
2124
+ if result.get("status") == "ok":
 
 
2125
  executed_count += 1
2126
  unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
2127
  self._add_log(
2128
  "deploy",
2129
  f"🤖 AI removed {unit_name} at ({rec.target_x},{rec.target_y}) - ready to redeploy",
2130
+ {"source": "ai", "reason": rec.reason, "action": "remove"},
2131
  )
2132
  self._executed_recommendations.add(rec_key)
2133
  else:
2134
  self._add_log(
2135
  "error",
2136
+ f"🤖 AI remove failed: {result.get('message', 'unknown error')} at "
2137
+ f"({rec.target_x},{rec.target_y})",
2138
  )
2139
  else:
2140
+ result = self._call_mcp_tool(
2141
+ "deploy_unit",
2142
+ unit_type=rec.suggested_unit_type,
2143
+ x=rec.target_x,
2144
+ y=rec.target_y,
2145
+ source="ai",
2146
+ )
2147
  if result.get("status") == "ok":
2148
  executed_count += 1
2149
  unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
2150
  self._add_log(
2151
  "deploy",
2152
  f"🤖 AI deployed {unit_name} at ({rec.target_x}, {rec.target_y})",
2153
+ {"source": "ai", "reason": rec.reason, "action": "deploy"},
2154
  )
2155
  self._executed_recommendations.add(rec_key)
2156
  else:
 
2157
  self._add_log(
2158
  "error",
2159
+ f"🤖 AI deploy failed: {result.get('message', 'unknown error')} "
2160
+ f"at ({rec.target_x}, {rec.target_y})",
2161
  )
2162
+
 
2163
  if len(self._executed_recommendations) > 100:
2164
  self._executed_recommendations = set(list(self._executed_recommendations)[-50:])
2165