first version
Browse files- .gitignore +23 -0
- README.md +262 -3
- agent.py +1280 -0
- app.py +1865 -0
- fire_rescue_mcp/__init__.py +13 -0
- fire_rescue_mcp/mcp_server.py +706 -0
- models.py +356 -0
- prompts.yaml +378 -0
- pyproject.toml +31 -0
- requirements.txt +8 -0
- restart.sh +12 -0
- service.py +1995 -0
- simulation.py +338 -0
- uv.lock +0 -0
.gitignore
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment
|
| 2 |
+
.env.*
|
| 3 |
+
|
| 4 |
+
# Python
|
| 5 |
+
__pycache__/
|
| 6 |
+
*.py[cod]
|
| 7 |
+
*.so
|
| 8 |
+
.venv/
|
| 9 |
+
venv/
|
| 10 |
+
*.egg-info/
|
| 11 |
+
dist/
|
| 12 |
+
build/
|
| 13 |
+
|
| 14 |
+
# IDE
|
| 15 |
+
.idea/
|
| 16 |
+
*.swp
|
| 17 |
+
*.swo
|
| 18 |
+
|
| 19 |
+
# OS
|
| 20 |
+
.DS_Store
|
| 21 |
+
Thumbs.db
|
| 22 |
+
.cursor/
|
| 23 |
+
.env
|
README.md
CHANGED
|
@@ -1,14 +1,273 @@
|
|
| 1 |
---
|
| 2 |
title: Fire Rescue Simulator Game
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: red
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 6.0.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: AI Agent autonomously fights fires using MCP tools
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
title: Fire Rescue Simulator Game
|
| 3 |
+
emoji: 🔥
|
| 4 |
colorFrom: red
|
| 5 |
+
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 6.0.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
short_description: AI Agent autonomously fights fires using MCP tools
|
| 12 |
+
tags:
|
| 13 |
+
- mcp-in-action-track-creative
|
| 14 |
+
- mcp-1st-birthday
|
| 15 |
+
- game
|
| 16 |
---
|
| 17 |
|
| 18 |
+
# 🔥 Fire Rescue Simulator Game
|
| 19 |
+
|
| 20 |
+
An AI-powered fire rescue simulation where an **autonomous agent** analyzes tactical situations and deploys firefighting units using **Model Context Protocol (MCP)** tools.
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
## 🎥 Demo Video
|
| 25 |
+
|
| 26 |
+
[](https://youtu.be/YOUR_VIDEO_ID)
|
| 27 |
+
|
| 28 |
+
> 📹 *Coming soon! Demo video will be added before submission deadline.*
|
| 29 |
+
|
| 30 |
+
---
|
| 31 |
+
|
| 32 |
+
## 🌟 Project Overview
|
| 33 |
+
|
| 34 |
+
> *An interactive game where you watch AI Agent autonomously fight fires using MCP tools!*
|
| 35 |
+
> Take control, collaborate with AI, or let it run fully autonomous. Your choice!
|
| 36 |
+
|
| 37 |
+
This project demonstrates how AI agents can leverage **MCP (Model Context Protocol)** tools to make autonomous decisions in real-time tactical scenarios. The AI Advisor:
|
| 38 |
+
|
| 39 |
+
1. 📊 **Queries** the simulation state using MCP tools
|
| 40 |
+
2. 🎯 **Analyzes** fire threats, building integrity, and unit coverage
|
| 41 |
+
3. ⚡ **Executes** deployment decisions through MCP tool calls
|
| 42 |
+
4. 🔄 **Adapts** strategy based on changing battlefield conditions
|
| 43 |
+
|
| 44 |
+
### Key Highlights
|
| 45 |
+
|
| 46 |
+
- **🤖 Autonomous AI Agent**: Watches the battlefield and makes tactical decisions without human intervention
|
| 47 |
+
- **🔧 8 MCP Tools**: Rich set of tools for state queries, analysis, and unit deployment
|
| 48 |
+
- **🎨 Beautiful Gradio 6 UI**: Real-time visualization with emoji-based grid map
|
| 49 |
+
- **📊 Transparent Reasoning**: Watch AI's thought process in real-time
|
| 50 |
+
- **🎮 Hybrid Control**: Toggle between full AI autonomy and manual control
|
| 51 |
+
|
| 52 |
+
---
|
| 53 |
+
|
| 54 |
+
## 🛠️ MCP Tools
|
| 55 |
+
|
| 56 |
+
The Fire-Rescue MCP Server exposes **8 functional tools** for AI agents:
|
| 57 |
+
|
| 58 |
+
### Core Operations
|
| 59 |
+
|
| 60 |
+
| Tool | Description |
|
| 61 |
+
|------|-------------|
|
| 62 |
+
| `reset_scenario` | Initialize a new simulation with configurable parameters |
|
| 63 |
+
| `get_world_state` | Get complete world state snapshot with emoji map |
|
| 64 |
+
| `deploy_unit` | Deploy a firefighting unit at specified coordinates |
|
| 65 |
+
| `move_unit` | Reposition an existing unit to a new location |
|
| 66 |
+
| `remove_unit` | Remove a unit (frees slot for redeployment elsewhere) |
|
| 67 |
+
| `step_simulation` | Advance simulation by N ticks |
|
| 68 |
+
|
| 69 |
+
### Data Query & Analysis
|
| 70 |
+
|
| 71 |
+
| Tool | Description |
|
| 72 |
+
|------|-------------|
|
| 73 |
+
| `find_idle_units` | Identify units not covering any active fires |
|
| 74 |
+
| `find_uncovered_fires` | Find fires with no unit coverage |
|
| 75 |
+
| `find_building_threats` | Detect fires threatening buildings |
|
| 76 |
+
| `analyze_coverage` | Comprehensive coverage analysis data |
|
| 77 |
+
|
| 78 |
+
---
|
| 79 |
+
|
| 80 |
+
## 🔄 Architecture & Workflow
|
| 81 |
+
|
| 82 |
+
```
|
| 83 |
+
┌─────────────────────────────────────────────────────────────────┐
|
| 84 |
+
│ Gradio 6 Web UI │
|
| 85 |
+
│ ┌──────────────┐ ┌──────────────┐ ┌───────────────────────┐ │
|
| 86 |
+
│ │ Control Panel│ │ 10x10 Grid │ │ AI Advisor Panel │ │
|
| 87 |
+
│ │ Start/Pause │ │ 🌲🔥🚒🏢🚁 │ │ • Reasoning │ │
|
| 88 |
+
│ │ Reset/Config │ │ Click Deploy │ │ • Tool Calls │ │
|
| 89 |
+
│ └──────────────┘ └──────────────┘ │ • Recommendations │ │
|
| 90 |
+
│ └───────────────────────┘ │
|
| 91 |
+
└─────────────────────────────────────────────────────────────────┘
|
| 92 |
+
│
|
| 93 |
+
▼
|
| 94 |
+
┌─────────────────────────────────────────────────────────────────┐
|
| 95 |
+
│ Background Service │
|
| 96 |
+
│ ┌──────────────────┐ ┌────────────────────────────────────┐ │
|
| 97 |
+
│ │ Simulation Engine │◄──►│ AI Advisor Agent │ │
|
| 98 |
+
│ │ • Fire spread │ │ • HuggingFace Inference Provider │ │
|
| 99 |
+
│ │ • Unit behavior │ │ • MCP Tool Invocation │ │
|
| 100 |
+
│ │ • Win/Lose logic │ │ • Strategy Planning │ │
|
| 101 |
+
│ └──────────────────┘ └────────────────────────────────────┘ │
|
| 102 |
+
└─────────────────────────────────────────────────────────────────┘
|
| 103 |
+
│
|
| 104 |
+
▼
|
| 105 |
+
┌─────────────────────────────────────────────────────────────────┐
|
| 106 |
+
│ MCP Server (FastMCP) │
|
| 107 |
+
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌───────────┐ │
|
| 108 |
+
│ │reset_scenario│ │get_world_ │ │deploy_unit │ │move_unit │ │
|
| 109 |
+
│ └─────────────┘ │state │ └─────────────┘ └───────────┘ │
|
| 110 |
+
│ ┌─────────────┐ └─────────────┘ ┌─────────────┐ ┌───────────┐ │
|
| 111 |
+
│ │step_ │ ┌─────────────┐ │find_uncov- │ │find_build-│ │
|
| 112 |
+
│ │simulation │ │find_idle_ │ │ered_fires │ │ing_threats│ │
|
| 113 |
+
│ └─────────────┘ │units │ └─────────────┘ └───────────┘ │
|
| 114 |
+
│ └─────────────┘ ┌─────────────────────────────┐ │
|
| 115 |
+
│ │ analyze_coverage │ │
|
| 116 |
+
│ └─────────────────────────────┘ │
|
| 117 |
+
└─────────────────────────────────────────────────────────────────┘
|
| 118 |
+
```
|
| 119 |
+
|
| 120 |
+
---
|
| 121 |
+
|
| 122 |
+
## 🧩 Core Components
|
| 123 |
+
|
| 124 |
+
### 🤖 AI Advisor Agent (`agent.py`)
|
| 125 |
+
|
| 126 |
+
- **Role**: Autonomous tactical advisor
|
| 127 |
+
- **LLM**: HuggingFace Inference Provider (`openai/gpt-oss-120b`)
|
| 128 |
+
- **Responsibilities**:
|
| 129 |
+
- Analyze battlefield state via MCP tools
|
| 130 |
+
- Generate deployment recommendations
|
| 131 |
+
- Execute strategies when auto-execute is enabled
|
| 132 |
+
|
| 133 |
+
### 🎮 Simulation Engine (`simulation.py`)
|
| 134 |
+
|
| 135 |
+
- **Role**: Core game logic
|
| 136 |
+
- **Features**:
|
| 137 |
+
- 2D grid-based fire spread simulation
|
| 138 |
+
- Unit behavior (fire trucks & helicopters)
|
| 139 |
+
- Building integrity tracking
|
| 140 |
+
- Win/Lose condition evaluation
|
| 141 |
+
|
| 142 |
+
### 🔧 MCP Server (`fire_rescue_mcp/mcp_server.py`)
|
| 143 |
+
|
| 144 |
+
- **Role**: Expose simulation as MCP tools
|
| 145 |
+
- **Framework**: FastMCP
|
| 146 |
+
- **Tools**: 8 functional tools for state queries and actions
|
| 147 |
+
|
| 148 |
+
### 🎨 Web Interface (`app.py`)
|
| 149 |
+
|
| 150 |
+
- **Role**: User interface
|
| 151 |
+
- **Framework**: Gradio 6
|
| 152 |
+
- **Features**:
|
| 153 |
+
- Real-time emoji grid visualization
|
| 154 |
+
- AI reasoning panel with tool call history
|
| 155 |
+
- Interactive unit deployment
|
| 156 |
+
- Auto-execute toggle
|
| 157 |
+
|
| 158 |
+
---
|
| 159 |
+
|
| 160 |
+
## 🎯 Game Rules
|
| 161 |
+
|
| 162 |
+
### Units
|
| 163 |
+
|
| 164 |
+
| Unit | Symbol | Power | Range | Best For |
|
| 165 |
+
|------|--------|-------|-------|----------|
|
| 166 |
+
| Fire Truck | 🚒 | 40% | Covers 1 tile outward | High-intensity fires & building threats |
|
| 167 |
+
| Helicopter | 🚁 | 25% | Covers 2 tiles outward | Wide area coverage |
|
| 168 |
+
|
| 169 |
+
### Win/Lose Conditions
|
| 170 |
+
|
| 171 |
+
- **🏆 Victory**: All fires extinguished (intensity < 10%)
|
| 172 |
+
- **💀 Game Over**:
|
| 173 |
+
- Building integrity drops below 50%
|
| 174 |
+
- Forest burn ratio exceeds 80%
|
| 175 |
+
- Time limit exceeded (200 ticks)
|
| 176 |
+
|
| 177 |
+
### Map Legend
|
| 178 |
+
|
| 179 |
+
| Symbol | Meaning |
|
| 180 |
+
|--------|---------|
|
| 181 |
+
| 🌲 | Forest |
|
| 182 |
+
| 🏢 | Building |
|
| 183 |
+
| 🔥 | Active Fire (≥10% intensity) |
|
| 184 |
+
| 💨 | Smoke (<10% intensity) |
|
| 185 |
+
| 🚒 | Fire Truck |
|
| 186 |
+
| 🚁 | Helicopter |
|
| 187 |
+
|
| 188 |
+
---
|
| 189 |
+
|
| 190 |
+
## 🚀 Quick Start
|
| 191 |
+
|
| 192 |
+
### Prerequisites
|
| 193 |
+
|
| 194 |
+
- Python 3.10+
|
| 195 |
+
- HuggingFace API Token (for AI Advisor)
|
| 196 |
+
|
| 197 |
+
### Installation
|
| 198 |
+
|
| 199 |
+
```bash
|
| 200 |
+
# Clone the repository
|
| 201 |
+
git clone https://github.com/ArkaiAriza/fire-rescue-mcp.git
|
| 202 |
+
cd fire-rescue-mcp
|
| 203 |
+
|
| 204 |
+
# Install with UV (recommended)
|
| 205 |
+
uv sync
|
| 206 |
+
|
| 207 |
+
# Or install with pip
|
| 208 |
+
pip install -e .
|
| 209 |
+
```
|
| 210 |
+
|
| 211 |
+
### Configuration
|
| 212 |
+
|
| 213 |
+
Set your HuggingFace token:
|
| 214 |
+
|
| 215 |
+
```bash
|
| 216 |
+
export HF_TOKEN=your-huggingface-token-here
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
Or create a `.env` file:
|
| 220 |
+
|
| 221 |
+
```
|
| 222 |
+
HF_TOKEN=your-huggingface-token-here
|
| 223 |
+
```
|
| 224 |
+
|
| 225 |
+
### Launch
|
| 226 |
+
|
| 227 |
+
```bash
|
| 228 |
+
# Start the Gradio web app
|
| 229 |
+
uv run python app.py
|
| 230 |
+
|
| 231 |
+
# Or run as module
|
| 232 |
+
python -m app
|
| 233 |
+
```
|
| 234 |
+
|
| 235 |
+
Then open http://localhost:7860 in your browser.
|
| 236 |
+
|
| 237 |
+
---
|
| 238 |
+
|
| 239 |
+
## 📁 Project Structure
|
| 240 |
+
|
| 241 |
+
```
|
| 242 |
+
fire-rescue-mcp/
|
| 243 |
+
├── app.py # Gradio web application (entry point)
|
| 244 |
+
├── service.py # Background simulation service
|
| 245 |
+
├── agent.py # LLM advisor agent
|
| 246 |
+
├── simulation.py # Simulation engine (fire spread, unit behavior)
|
| 247 |
+
├── models.py # Data structures (WorldState, Unit, Fire, etc.)
|
| 248 |
+
├── prompts.yaml # AI advisor prompts
|
| 249 |
+
├── fire_rescue_mcp/ # MCP core package
|
| 250 |
+
│ ├── __init__.py # Package exports
|
| 251 |
+
│ └── mcp_server.py # MCP server with 8 tools
|
| 252 |
+
├── pyproject.toml # Project configuration
|
| 253 |
+
├── requirements.txt # Python dependencies
|
| 254 |
+
└── README.md # This file
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
---
|
| 258 |
+
|
| 259 |
+
## 🛠️ Technologies Used
|
| 260 |
+
|
| 261 |
+
| Technology | Purpose |
|
| 262 |
+
|------------|---------|
|
| 263 |
+
| **Gradio 6** | Web UI framework |
|
| 264 |
+
| **FastMCP** | MCP server implementation |
|
| 265 |
+
| **HuggingFace Inference** | LLM provider for AI Advisor |
|
| 266 |
+
| **UV** | Package management |
|
| 267 |
+
| **Python 3.10+** | Core language |
|
| 268 |
+
|
| 269 |
+
---
|
| 270 |
+
|
| 271 |
+
## 📝 License
|
| 272 |
+
|
| 273 |
+
This project is licensed under the **MIT License** - see the [LICENSE](LICENSE) file for details.
|
agent.py
ADDED
|
@@ -0,0 +1,1280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue - Multi-Stage LLM Advisor Agent
|
| 3 |
+
|
| 4 |
+
Provides advisory recommendations based on world state analysis using a
|
| 5 |
+
multi-stage approach: Assessment → Planning → Execution.
|
| 6 |
+
|
| 7 |
+
The agent only suggests actions; it does not directly control units.
|
| 8 |
+
All analysis is performed by the AI - no rule-based fallback.
|
| 9 |
+
|
| 10 |
+
Uses HuggingFace Inference Provider API with openai/gpt-oss-120b model.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import os
|
| 15 |
+
from dataclasses import dataclass, field
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from typing import Optional
|
| 18 |
+
|
| 19 |
+
import yaml
|
| 20 |
+
from openai import OpenAI
|
| 21 |
+
|
| 22 |
+
# Load .env file if it exists
|
| 23 |
+
try:
|
| 24 |
+
from dotenv import load_dotenv
|
| 25 |
+
env_path = Path(__file__).parent / ".env"
|
| 26 |
+
if env_path.exists():
|
| 27 |
+
load_dotenv(env_path)
|
| 28 |
+
except ImportError:
|
| 29 |
+
pass # dotenv not installed, rely on environment variables
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# =============================================================================
|
| 33 |
+
# HuggingFace Inference Provider Configuration
|
| 34 |
+
# =============================================================================
|
| 35 |
+
|
| 36 |
+
# HuggingFace Inference Provider base URL (OpenAI-compatible)
|
| 37 |
+
HF_INFERENCE_BASE_URL = "https://router.huggingface.co/v1"
|
| 38 |
+
HF_DEFAULT_MODEL = "openai/gpt-oss-120b"
|
| 39 |
+
|
| 40 |
+
def get_hf_token() -> str | None:
|
| 41 |
+
"""
|
| 42 |
+
Get HuggingFace token from environment variable.
|
| 43 |
+
|
| 44 |
+
Returns:
|
| 45 |
+
HF_TOKEN if available, None otherwise
|
| 46 |
+
"""
|
| 47 |
+
return os.getenv("HF_TOKEN")
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# =============================================================================
|
| 51 |
+
# Load Prompts from Configuration
|
| 52 |
+
# =============================================================================
|
| 53 |
+
|
| 54 |
+
def load_prompts() -> dict:
|
| 55 |
+
"""Load prompts from prompts.yaml configuration file."""
|
| 56 |
+
prompts_path = Path(__file__).parent / "prompts.yaml"
|
| 57 |
+
if prompts_path.exists():
|
| 58 |
+
with open(prompts_path, "r", encoding="utf-8") as f:
|
| 59 |
+
return yaml.safe_load(f)
|
| 60 |
+
return {}
|
| 61 |
+
|
| 62 |
+
PROMPTS_CONFIG = load_prompts()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# =============================================================================
|
| 66 |
+
# Data Models
|
| 67 |
+
# =============================================================================
|
| 68 |
+
|
| 69 |
+
@dataclass
|
| 70 |
+
class Recommendation:
|
| 71 |
+
"""A single deployment or move recommendation from the advisor."""
|
| 72 |
+
reason: str
|
| 73 |
+
suggested_unit_type: str
|
| 74 |
+
target_x: int
|
| 75 |
+
target_y: int
|
| 76 |
+
action: str = "deploy" # "deploy", "move", or "remove"
|
| 77 |
+
source_x: int = -1 # For move action: source position
|
| 78 |
+
source_y: int = -1
|
| 79 |
+
|
| 80 |
+
def to_dict(self) -> dict:
|
| 81 |
+
result = {
|
| 82 |
+
"reason": self.reason,
|
| 83 |
+
"suggested_unit_type": self.suggested_unit_type,
|
| 84 |
+
"target": {"x": self.target_x, "y": self.target_y},
|
| 85 |
+
"action": self.action
|
| 86 |
+
}
|
| 87 |
+
if self.action == "move":
|
| 88 |
+
result["source"] = {"x": self.source_x, "y": self.source_y}
|
| 89 |
+
return result
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
@dataclass
|
| 93 |
+
class AssessmentResult:
|
| 94 |
+
"""Result from Stage 1: Assessment - analyzing the current situation."""
|
| 95 |
+
fire_count: int
|
| 96 |
+
high_intensity_fires: list # fires > 70%
|
| 97 |
+
building_threats: list # fires near buildings
|
| 98 |
+
uncovered_fires: list # fires with no unit in range
|
| 99 |
+
unit_count: int
|
| 100 |
+
max_units: int
|
| 101 |
+
effective_units: list # units in range of fires
|
| 102 |
+
ineffective_units: list # units not covering any fire
|
| 103 |
+
coverage_ratio: float # ratio of fires covered by units
|
| 104 |
+
threat_level: str # CRITICAL / HIGH / MODERATE / LOW
|
| 105 |
+
summary: str
|
| 106 |
+
building_integrity: float
|
| 107 |
+
|
| 108 |
+
def to_dict(self) -> dict:
|
| 109 |
+
return {
|
| 110 |
+
"fire_count": self.fire_count,
|
| 111 |
+
"high_intensity_fires": len(self.high_intensity_fires),
|
| 112 |
+
"building_threats": len(self.building_threats),
|
| 113 |
+
"uncovered_fires": len(self.uncovered_fires),
|
| 114 |
+
"unit_count": self.unit_count,
|
| 115 |
+
"max_units": self.max_units,
|
| 116 |
+
"effective_units": len(self.effective_units),
|
| 117 |
+
"ineffective_units": len(self.ineffective_units),
|
| 118 |
+
"coverage_ratio": round(self.coverage_ratio, 2),
|
| 119 |
+
"threat_level": self.threat_level,
|
| 120 |
+
"summary": self.summary,
|
| 121 |
+
"building_integrity": round(self.building_integrity, 2)
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@dataclass
|
| 126 |
+
class PlanResult:
|
| 127 |
+
"""Result from Stage 2: Planning - deciding the strategy."""
|
| 128 |
+
strategy: str # "deploy_new" / "optimize_existing" / "balanced" / "monitor"
|
| 129 |
+
reasoning: str
|
| 130 |
+
deploy_count: int # how many new units to deploy
|
| 131 |
+
reposition_units: list # units to move (list of dicts with source and reason)
|
| 132 |
+
priority_targets: list # fires to prioritize (sorted by priority)
|
| 133 |
+
|
| 134 |
+
def to_dict(self) -> dict:
|
| 135 |
+
return {
|
| 136 |
+
"strategy": self.strategy,
|
| 137 |
+
"reasoning": self.reasoning,
|
| 138 |
+
"deploy_count": self.deploy_count,
|
| 139 |
+
"reposition_count": len(self.reposition_units),
|
| 140 |
+
"priority_targets": len(self.priority_targets)
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
@dataclass
|
| 145 |
+
class AdvisorResponse:
|
| 146 |
+
"""Complete response from the advisor agent."""
|
| 147 |
+
summary: str
|
| 148 |
+
recommendations: list[Recommendation]
|
| 149 |
+
thinking: str = "" # Chain of thought reasoning
|
| 150 |
+
analysis: str = "" # Situation analysis
|
| 151 |
+
priority: str = "" # Priority assessment
|
| 152 |
+
raw_response: Optional[str] = None
|
| 153 |
+
error: Optional[str] = None
|
| 154 |
+
# Multi-stage results
|
| 155 |
+
assessment: Optional[AssessmentResult] = None
|
| 156 |
+
plan: Optional[PlanResult] = None
|
| 157 |
+
|
| 158 |
+
def to_dict(self) -> dict:
|
| 159 |
+
result = {
|
| 160 |
+
"summary": self.summary,
|
| 161 |
+
"recommendations": [r.to_dict() for r in self.recommendations],
|
| 162 |
+
"thinking": self.thinking,
|
| 163 |
+
"analysis": self.analysis,
|
| 164 |
+
"priority": self.priority
|
| 165 |
+
}
|
| 166 |
+
if self.assessment:
|
| 167 |
+
result["assessment"] = self.assessment.to_dict()
|
| 168 |
+
if self.plan:
|
| 169 |
+
result["plan"] = self.plan.to_dict()
|
| 170 |
+
return result
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
@dataclass
|
| 174 |
+
class CycleSummary:
|
| 175 |
+
"""Stage 4 summary for a single advisor cycle."""
|
| 176 |
+
headline: str
|
| 177 |
+
threat_level: str
|
| 178 |
+
key_highlights: list[str]
|
| 179 |
+
risks: list[str]
|
| 180 |
+
next_focus: list[str]
|
| 181 |
+
|
| 182 |
+
def to_dict(self) -> dict:
|
| 183 |
+
return {
|
| 184 |
+
"headline": self.headline,
|
| 185 |
+
"threat_level": self.threat_level,
|
| 186 |
+
"key_highlights": self.key_highlights,
|
| 187 |
+
"risks": self.risks,
|
| 188 |
+
"next_focus": self.next_focus,
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@dataclass
|
| 193 |
+
class AfterActionReport:
|
| 194 |
+
"""Structured after-action report summarizing mission outcome."""
|
| 195 |
+
summary: str
|
| 196 |
+
strengths: list[str]
|
| 197 |
+
improvements: list[str]
|
| 198 |
+
next_actions: list[str]
|
| 199 |
+
outcome: str = ""
|
| 200 |
+
error: Optional[str] = None
|
| 201 |
+
charts: dict = field(default_factory=dict)
|
| 202 |
+
player_actions: dict = field(default_factory=dict)
|
| 203 |
+
|
| 204 |
+
def to_dict(self) -> dict:
|
| 205 |
+
return {
|
| 206 |
+
"summary": self.summary,
|
| 207 |
+
"strengths": self.strengths,
|
| 208 |
+
"improvements": self.improvements,
|
| 209 |
+
"next_actions": self.next_actions,
|
| 210 |
+
"outcome": self.outcome,
|
| 211 |
+
"error": self.error,
|
| 212 |
+
"charts": self.charts,
|
| 213 |
+
"player_actions": self.player_actions,
|
| 214 |
+
}
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
# =============================================================================
|
| 218 |
+
# Advisor Agent
|
| 219 |
+
# =============================================================================
|
| 220 |
+
|
| 221 |
+
class AdvisorAgent:
|
| 222 |
+
"""
|
| 223 |
+
Multi-stage LLM-based advisor agent that analyzes world state and provides recommendations.
|
| 224 |
+
|
| 225 |
+
Stage 1: ASSESS - Analyze the situation
|
| 226 |
+
Stage 2: PLAN - Decide strategy
|
| 227 |
+
Stage 3: EXECUTE - Generate specific actions
|
| 228 |
+
|
| 229 |
+
All analysis is performed by the AI model - no fallback logic.
|
| 230 |
+
Uses HuggingFace Inference Provider API.
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
def __init__(
|
| 234 |
+
self,
|
| 235 |
+
api_key: Optional[str] = None,
|
| 236 |
+
model: Optional[str] = None,
|
| 237 |
+
):
|
| 238 |
+
"""
|
| 239 |
+
Initialize the advisor agent.
|
| 240 |
+
|
| 241 |
+
Args:
|
| 242 |
+
api_key: HuggingFace token (defaults to HF_TOKEN env var)
|
| 243 |
+
model: Model to use for inference (defaults to openai/gpt-oss-120b)
|
| 244 |
+
"""
|
| 245 |
+
# Get HuggingFace token
|
| 246 |
+
self.api_key = api_key or get_hf_token()
|
| 247 |
+
self.base_url = HF_INFERENCE_BASE_URL
|
| 248 |
+
|
| 249 |
+
# Load model config from prompts.yaml
|
| 250 |
+
model_config = PROMPTS_CONFIG.get("model", {})
|
| 251 |
+
|
| 252 |
+
# Model priority: explicit param > prompts.yaml > default
|
| 253 |
+
yaml_model = model_config.get("default")
|
| 254 |
+
self.model = model or yaml_model or HF_DEFAULT_MODEL
|
| 255 |
+
|
| 256 |
+
self.temperature = model_config.get("temperature") # None if not set
|
| 257 |
+
self.max_completion_tokens = model_config.get("max_completion_tokens", 2000)
|
| 258 |
+
|
| 259 |
+
# Initialize client
|
| 260 |
+
if self.api_key:
|
| 261 |
+
print(f"🤖 AI Advisor initialized with HuggingFace Inference API, model: {self.model}")
|
| 262 |
+
self.client = OpenAI(
|
| 263 |
+
api_key=self.api_key,
|
| 264 |
+
base_url=self.base_url
|
| 265 |
+
)
|
| 266 |
+
else:
|
| 267 |
+
self.client = None
|
| 268 |
+
print("⚠️ Warning: No HF_TOKEN found. AI analysis will not work.")
|
| 269 |
+
|
| 270 |
+
# =========================================================================
|
| 271 |
+
# JSON Repair Helper
|
| 272 |
+
# =========================================================================
|
| 273 |
+
|
| 274 |
+
def _try_repair_json(self, content: str) -> Optional[dict]:
|
| 275 |
+
"""
|
| 276 |
+
Attempt to repair truncated JSON by closing open brackets/braces.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
Repaired JSON dict, or None if repair failed
|
| 280 |
+
"""
|
| 281 |
+
# Count open brackets and braces
|
| 282 |
+
open_braces = content.count('{') - content.count('}')
|
| 283 |
+
open_brackets = content.count('[') - content.count(']')
|
| 284 |
+
|
| 285 |
+
# Check if we're in the middle of a string (odd number of unescaped quotes)
|
| 286 |
+
in_string = False
|
| 287 |
+
i = 0
|
| 288 |
+
while i < len(content):
|
| 289 |
+
if content[i] == '"' and (i == 0 or content[i-1] != '\\'):
|
| 290 |
+
in_string = not in_string
|
| 291 |
+
i += 1
|
| 292 |
+
|
| 293 |
+
repaired = content
|
| 294 |
+
|
| 295 |
+
# Close any open string
|
| 296 |
+
if in_string:
|
| 297 |
+
repaired += '"'
|
| 298 |
+
|
| 299 |
+
# Close brackets and braces in reverse order of opening
|
| 300 |
+
# This is a simplified repair - close all brackets then all braces
|
| 301 |
+
repaired += ']' * open_brackets
|
| 302 |
+
repaired += '}' * open_braces
|
| 303 |
+
|
| 304 |
+
try:
|
| 305 |
+
return json.loads(repaired)
|
| 306 |
+
except json.JSONDecodeError:
|
| 307 |
+
# Try more aggressive repair - find last valid JSON object
|
| 308 |
+
try:
|
| 309 |
+
# Try to find a partial valid structure
|
| 310 |
+
for end_pos in range(len(content), 0, -1):
|
| 311 |
+
partial = content[:end_pos]
|
| 312 |
+
open_b = partial.count('{') - partial.count('}')
|
| 313 |
+
open_br = partial.count('[') - partial.count(']')
|
| 314 |
+
attempt = partial + ']' * open_br + '}' * open_b
|
| 315 |
+
try:
|
| 316 |
+
return json.loads(attempt)
|
| 317 |
+
except json.JSONDecodeError:
|
| 318 |
+
continue
|
| 319 |
+
except Exception:
|
| 320 |
+
pass
|
| 321 |
+
return None
|
| 322 |
+
|
| 323 |
+
# =========================================================================
|
| 324 |
+
# LLM Call Helper
|
| 325 |
+
# =========================================================================
|
| 326 |
+
|
| 327 |
+
def _call_llm(self, system_prompt: str, user_message: str) -> Optional[dict]:
|
| 328 |
+
"""
|
| 329 |
+
Make an LLM API call to HuggingFace Inference Provider and parse JSON response.
|
| 330 |
+
|
| 331 |
+
Returns:
|
| 332 |
+
Parsed JSON dict, or None if failed
|
| 333 |
+
"""
|
| 334 |
+
if not self.client:
|
| 335 |
+
print("Error: No API client available (HF_TOKEN not set)")
|
| 336 |
+
return None
|
| 337 |
+
|
| 338 |
+
# Retry logic for rate limiting (429 errors)
|
| 339 |
+
max_retries = 3
|
| 340 |
+
retry_delay = 5 # seconds
|
| 341 |
+
|
| 342 |
+
for attempt in range(max_retries):
|
| 343 |
+
try:
|
| 344 |
+
# Build API call parameters for HuggingFace Inference Provider
|
| 345 |
+
api_params = {
|
| 346 |
+
"model": self.model,
|
| 347 |
+
"messages": [
|
| 348 |
+
{"role": "system", "content": system_prompt},
|
| 349 |
+
{"role": "user", "content": user_message}
|
| 350 |
+
],
|
| 351 |
+
"max_tokens": self.max_completion_tokens,
|
| 352 |
+
"response_format": {"type": "json_object"}
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
# Only add temperature if explicitly set
|
| 356 |
+
if self.temperature is not None:
|
| 357 |
+
api_params["temperature"] = self.temperature
|
| 358 |
+
|
| 359 |
+
response = self.client.chat.completions.create(**api_params)
|
| 360 |
+
|
| 361 |
+
content = response.choices[0].message.content
|
| 362 |
+
finish_reason = response.choices[0].finish_reason
|
| 363 |
+
|
| 364 |
+
# Check if response was truncated
|
| 365 |
+
if finish_reason == "length":
|
| 366 |
+
print(f"⚠️ Warning: Response was truncated (hit token limit)")
|
| 367 |
+
|
| 368 |
+
if content:
|
| 369 |
+
# Try to parse JSON from the response
|
| 370 |
+
# Handle potential markdown code blocks in response
|
| 371 |
+
content = content.strip()
|
| 372 |
+
if content.startswith("```json"):
|
| 373 |
+
content = content[7:]
|
| 374 |
+
if content.startswith("```"):
|
| 375 |
+
content = content[3:]
|
| 376 |
+
if content.endswith("```"):
|
| 377 |
+
content = content[:-3]
|
| 378 |
+
content = content.strip()
|
| 379 |
+
|
| 380 |
+
try:
|
| 381 |
+
return json.loads(content)
|
| 382 |
+
except json.JSONDecodeError as e:
|
| 383 |
+
# Try to repair truncated JSON
|
| 384 |
+
print(f"⚠️ JSON parse error: {e}")
|
| 385 |
+
print(f" Raw content (first 500 chars): {content[:500]}...")
|
| 386 |
+
repaired = self._try_repair_json(content)
|
| 387 |
+
if repaired:
|
| 388 |
+
print("✅ Successfully repaired truncated JSON")
|
| 389 |
+
return repaired
|
| 390 |
+
return None
|
| 391 |
+
return None
|
| 392 |
+
|
| 393 |
+
except Exception as e:
|
| 394 |
+
error_str = str(e)
|
| 395 |
+
# Check for rate limiting (429) error
|
| 396 |
+
if "429" in error_str or "too_many_requests" in error_str.lower():
|
| 397 |
+
if attempt < max_retries - 1:
|
| 398 |
+
print(f"⚠️ Rate limited (429), retrying in {retry_delay}s... (attempt {attempt + 1}/{max_retries})")
|
| 399 |
+
import time
|
| 400 |
+
time.sleep(retry_delay)
|
| 401 |
+
continue
|
| 402 |
+
else:
|
| 403 |
+
print(f"❌ Rate limited after {max_retries} attempts")
|
| 404 |
+
return None
|
| 405 |
+
else:
|
| 406 |
+
print(f"LLM call error: {e}")
|
| 407 |
+
return None
|
| 408 |
+
|
| 409 |
+
return None # Should not reach here
|
| 410 |
+
|
| 411 |
+
# =========================================================================
|
| 412 |
+
# Stage 1: Assessment
|
| 413 |
+
# =========================================================================
|
| 414 |
+
|
| 415 |
+
def assess(self, world_state: dict) -> AssessmentResult:
|
| 416 |
+
"""
|
| 417 |
+
Stage 1: Assess the current situation using AI.
|
| 418 |
+
"""
|
| 419 |
+
assess_config = PROMPTS_CONFIG.get("assess", {})
|
| 420 |
+
system = assess_config.get("system", "")
|
| 421 |
+
output_format = assess_config.get("output_format", "")
|
| 422 |
+
|
| 423 |
+
fires = world_state.get("fires", [])
|
| 424 |
+
units = world_state.get("units", [])
|
| 425 |
+
buildings = world_state.get("buildings", [])
|
| 426 |
+
building_integrity = world_state.get("building_integrity", 1.0)
|
| 427 |
+
max_units = world_state.get("max_units", 10)
|
| 428 |
+
|
| 429 |
+
# Prepare detailed user message
|
| 430 |
+
user_message = f"""Current World State:
|
| 431 |
+
- Grid size: {world_state.get("width", 10)}x{world_state.get("height", 10)}
|
| 432 |
+
- Building Integrity: {building_integrity:.1%}
|
| 433 |
+
- Max Units Allowed: {max_units}
|
| 434 |
+
|
| 435 |
+
FIRES ({len(fires)} total):
|
| 436 |
+
{json.dumps(fires, indent=2)}
|
| 437 |
+
|
| 438 |
+
UNITS ({len(units)} deployed):
|
| 439 |
+
{json.dumps(units, indent=2)}
|
| 440 |
+
|
| 441 |
+
BUILDINGS ({len(buildings)} total):
|
| 442 |
+
{json.dumps(buildings[:20], indent=2)}{" (showing first 20)" if len(buildings) > 20 else ""}
|
| 443 |
+
|
| 444 |
+
Remember:
|
| 445 |
+
- Fire Truck effective range: covers 1 tile outward from its center
|
| 446 |
+
- Helicopter effective range: covers 2 tiles outward from its center
|
| 447 |
+
- A fire is UNCOVERED if no unit is within range
|
| 448 |
+
- A unit is INEFFECTIVE if no fire is within its range
|
| 449 |
+
|
| 450 |
+
Output format:
|
| 451 |
+
{output_format}"""
|
| 452 |
+
|
| 453 |
+
full_prompt = system + "\n\n" + output_format
|
| 454 |
+
response = self._call_llm(full_prompt, user_message)
|
| 455 |
+
|
| 456 |
+
if not response:
|
| 457 |
+
# Return minimal assessment if AI fails
|
| 458 |
+
return AssessmentResult(
|
| 459 |
+
fire_count=len(fires),
|
| 460 |
+
high_intensity_fires=[f for f in fires if f.get("intensity", 0) > 0.7],
|
| 461 |
+
building_threats=[],
|
| 462 |
+
uncovered_fires=fires[:], # Assume all uncovered if AI fails
|
| 463 |
+
unit_count=len(units),
|
| 464 |
+
max_units=max_units,
|
| 465 |
+
effective_units=[],
|
| 466 |
+
ineffective_units=units[:], # Assume all ineffective if AI fails
|
| 467 |
+
coverage_ratio=0.0,
|
| 468 |
+
threat_level="HIGH",
|
| 469 |
+
summary="AI assessment unavailable - assuming high threat",
|
| 470 |
+
building_integrity=building_integrity
|
| 471 |
+
)
|
| 472 |
+
|
| 473 |
+
try:
|
| 474 |
+
fire_analysis = response.get("fire_analysis", {})
|
| 475 |
+
unit_analysis = response.get("unit_analysis", {})
|
| 476 |
+
|
| 477 |
+
# Parse uncovered fire positions
|
| 478 |
+
uncovered_positions = fire_analysis.get("uncovered_fire_positions", [])
|
| 479 |
+
uncovered_fires = []
|
| 480 |
+
for pos in uncovered_positions:
|
| 481 |
+
if isinstance(pos, list) and len(pos) >= 2:
|
| 482 |
+
for f in fires:
|
| 483 |
+
if f["x"] == pos[0] and f["y"] == pos[1]:
|
| 484 |
+
uncovered_fires.append(f)
|
| 485 |
+
break
|
| 486 |
+
|
| 487 |
+
# Parse high intensity positions
|
| 488 |
+
high_positions = fire_analysis.get("high_intensity_positions", [])
|
| 489 |
+
high_intensity_fires = []
|
| 490 |
+
for pos in high_positions:
|
| 491 |
+
if isinstance(pos, list) and len(pos) >= 2:
|
| 492 |
+
for f in fires:
|
| 493 |
+
if f["x"] == pos[0] and f["y"] == pos[1]:
|
| 494 |
+
high_intensity_fires.append(f)
|
| 495 |
+
break
|
| 496 |
+
# Fallback to intensity check
|
| 497 |
+
if not high_intensity_fires:
|
| 498 |
+
high_intensity_fires = [f for f in fires if f.get("intensity", 0) > 0.7]
|
| 499 |
+
|
| 500 |
+
# Parse building threat positions
|
| 501 |
+
building_threat_positions = fire_analysis.get("building_threat_positions", [])
|
| 502 |
+
building_threats = []
|
| 503 |
+
for pos in building_threat_positions:
|
| 504 |
+
if isinstance(pos, list) and len(pos) >= 2:
|
| 505 |
+
for f in fires:
|
| 506 |
+
if f["x"] == pos[0] and f["y"] == pos[1]:
|
| 507 |
+
building_threats.append(f)
|
| 508 |
+
break
|
| 509 |
+
|
| 510 |
+
# Parse ineffective units
|
| 511 |
+
ineffective_positions = unit_analysis.get("ineffective_positions", [])
|
| 512 |
+
ineffective_units = []
|
| 513 |
+
for pos in ineffective_positions:
|
| 514 |
+
if isinstance(pos, list) and len(pos) >= 2:
|
| 515 |
+
for u in units:
|
| 516 |
+
if u["x"] == pos[0] and u["y"] == pos[1]:
|
| 517 |
+
ineffective_units.append(u)
|
| 518 |
+
break
|
| 519 |
+
|
| 520 |
+
# Calculate effective units (all units not in ineffective list)
|
| 521 |
+
ineffective_set = set((u["x"], u["y"]) for u in ineffective_units)
|
| 522 |
+
effective_units = [u for u in units if (u["x"], u["y"]) not in ineffective_set]
|
| 523 |
+
|
| 524 |
+
return AssessmentResult(
|
| 525 |
+
fire_count=fire_analysis.get("total_fires", len(fires)),
|
| 526 |
+
high_intensity_fires=high_intensity_fires,
|
| 527 |
+
building_threats=building_threats,
|
| 528 |
+
uncovered_fires=uncovered_fires,
|
| 529 |
+
unit_count=len(units),
|
| 530 |
+
max_units=max_units,
|
| 531 |
+
effective_units=effective_units,
|
| 532 |
+
ineffective_units=ineffective_units,
|
| 533 |
+
coverage_ratio=unit_analysis.get("coverage_ratio", 0.0),
|
| 534 |
+
threat_level=response.get("threat_level", "MODERATE"),
|
| 535 |
+
summary=response.get("summary", ""),
|
| 536 |
+
building_integrity=building_integrity
|
| 537 |
+
)
|
| 538 |
+
except Exception as e:
|
| 539 |
+
print(f"Error parsing AI assessment: {e}")
|
| 540 |
+
return AssessmentResult(
|
| 541 |
+
fire_count=len(fires),
|
| 542 |
+
high_intensity_fires=[],
|
| 543 |
+
building_threats=[],
|
| 544 |
+
uncovered_fires=fires[:],
|
| 545 |
+
unit_count=len(units),
|
| 546 |
+
max_units=max_units,
|
| 547 |
+
effective_units=[],
|
| 548 |
+
ineffective_units=units[:],
|
| 549 |
+
coverage_ratio=0.0,
|
| 550 |
+
threat_level="HIGH",
|
| 551 |
+
summary=f"Assessment parse error: {e}",
|
| 552 |
+
building_integrity=building_integrity
|
| 553 |
+
)
|
| 554 |
+
|
| 555 |
+
# =========================================================================
|
| 556 |
+
# Stage 2: Planning
|
| 557 |
+
# =========================================================================
|
| 558 |
+
|
| 559 |
+
def plan(self, world_state: dict, assessment: AssessmentResult) -> PlanResult:
|
| 560 |
+
"""
|
| 561 |
+
Stage 2: Decide the strategy based on assessment using AI.
|
| 562 |
+
"""
|
| 563 |
+
plan_config = PROMPTS_CONFIG.get("plan", {})
|
| 564 |
+
system = plan_config.get("system", "")
|
| 565 |
+
output_format = plan_config.get("output_format", "")
|
| 566 |
+
|
| 567 |
+
fires = world_state.get("fires", [])
|
| 568 |
+
available_slots = assessment.max_units - assessment.unit_count
|
| 569 |
+
|
| 570 |
+
# Sort fires by priority for context
|
| 571 |
+
buildings = world_state.get("buildings", [])
|
| 572 |
+
building_positions = set((b["x"], b["y"]) for b in buildings)
|
| 573 |
+
|
| 574 |
+
def fire_priority(f):
|
| 575 |
+
near_building = any(
|
| 576 |
+
abs(f["x"] - bx) <= 1 and abs(f["y"] - by) <= 1
|
| 577 |
+
for bx, by in building_positions
|
| 578 |
+
)
|
| 579 |
+
return f.get("intensity", 0) + (1.0 if near_building else 0)
|
| 580 |
+
|
| 581 |
+
priority_targets = sorted(fires, key=fire_priority, reverse=True)
|
| 582 |
+
|
| 583 |
+
user_message = f"""Assessment Result:
|
| 584 |
+
- Threat Level: {assessment.threat_level}
|
| 585 |
+
- Fire Count: {assessment.fire_count}
|
| 586 |
+
- High Intensity Fires: {len(assessment.high_intensity_fires)}
|
| 587 |
+
- Building Threats: {len(assessment.building_threats)}
|
| 588 |
+
- UNCOVERED Fires (no unit in range): {len(assessment.uncovered_fires)}
|
| 589 |
+
- Coverage Ratio: {assessment.coverage_ratio:.1%}
|
| 590 |
+
- Effective Units: {len(assessment.effective_units)}
|
| 591 |
+
- INEFFECTIVE Units (not near any fire): {len(assessment.ineffective_units)}
|
| 592 |
+
- Summary: {assessment.summary}
|
| 593 |
+
|
| 594 |
+
Current Resources:
|
| 595 |
+
- Units deployed: {assessment.unit_count} / {assessment.max_units}
|
| 596 |
+
- Available slots: {available_slots}
|
| 597 |
+
|
| 598 |
+
UNCOVERED Fires (PRIORITY - these need coverage!):
|
| 599 |
+
{json.dumps([{"x": f["x"], "y": f["y"], "intensity": f["intensity"]} for f in assessment.uncovered_fires[:5]], indent=2)}
|
| 600 |
+
|
| 601 |
+
INEFFECTIVE Units (SHOULD BE MOVED to cover fires!):
|
| 602 |
+
{json.dumps([{"x": u["x"], "y": u["y"], "type": u["type"]} for u in assessment.ineffective_units], indent=2)}
|
| 603 |
+
|
| 604 |
+
Priority Fires (top 5):
|
| 605 |
+
{json.dumps([{"x": f["x"], "y": f["y"], "intensity": f["intensity"]} for f in priority_targets[:5]], indent=2)}
|
| 606 |
+
|
| 607 |
+
REMEMBER: If there are uncovered fires AND ineffective units, you SHOULD reposition those units!
|
| 608 |
+
|
| 609 |
+
Output format:
|
| 610 |
+
{output_format}"""
|
| 611 |
+
|
| 612 |
+
response = self._call_llm(system, user_message)
|
| 613 |
+
|
| 614 |
+
if not response:
|
| 615 |
+
# Smart fallback: calculate deploy count based on situation
|
| 616 |
+
uncovered_count = len(assessment.uncovered_fires)
|
| 617 |
+
idle_count = len(assessment.ineffective_units)
|
| 618 |
+
fires_after_reposition = max(0, uncovered_count - idle_count)
|
| 619 |
+
building_threats = len(assessment.building_threats)
|
| 620 |
+
|
| 621 |
+
# Smart deploy calculation
|
| 622 |
+
if building_threats > 0:
|
| 623 |
+
# Building emergency! Deploy enough to cover all building threats
|
| 624 |
+
smart_deploy = max(building_threats, fires_after_reposition)
|
| 625 |
+
elif uncovered_count <= 2:
|
| 626 |
+
# Few fires - deploy just enough
|
| 627 |
+
smart_deploy = fires_after_reposition
|
| 628 |
+
elif uncovered_count <= 5:
|
| 629 |
+
# Moderate fires - deploy to cover + small buffer
|
| 630 |
+
smart_deploy = fires_after_reposition + 1
|
| 631 |
+
else:
|
| 632 |
+
# Many fires - deploy more aggressively
|
| 633 |
+
smart_deploy = fires_after_reposition + 2
|
| 634 |
+
|
| 635 |
+
smart_deploy = min(smart_deploy, available_slots)
|
| 636 |
+
|
| 637 |
+
return PlanResult(
|
| 638 |
+
strategy="balanced" if assessment.ineffective_units else "deploy_new",
|
| 639 |
+
reasoning=f"AI planning unavailable - smart fallback: {uncovered_count} uncovered fires, repositioning {idle_count} idle units, deploying {smart_deploy} new",
|
| 640 |
+
deploy_count=smart_deploy,
|
| 641 |
+
reposition_units=assessment.ineffective_units[:],
|
| 642 |
+
priority_targets=priority_targets[:5]
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
try:
|
| 646 |
+
# Parse units to reposition
|
| 647 |
+
reposition_data = response.get("units_to_reposition", [])
|
| 648 |
+
reposition_units = []
|
| 649 |
+
for item in reposition_data:
|
| 650 |
+
if isinstance(item, list) and len(item) >= 3:
|
| 651 |
+
sx, sy, utype = item[0], item[1], item[2]
|
| 652 |
+
for u in assessment.ineffective_units:
|
| 653 |
+
if u["x"] == sx and u["y"] == sy:
|
| 654 |
+
reposition_units.append(u)
|
| 655 |
+
break
|
| 656 |
+
|
| 657 |
+
# If reposition_needed but no specific units, use all ineffective
|
| 658 |
+
if response.get("reposition_needed", False) and not reposition_units:
|
| 659 |
+
reposition_units = assessment.ineffective_units[:]
|
| 660 |
+
|
| 661 |
+
# Map priority_fire_indices to actual fires
|
| 662 |
+
priority_indices = response.get("priority_fire_indices", [0, 1, 2])
|
| 663 |
+
selected_targets = []
|
| 664 |
+
for i in priority_indices:
|
| 665 |
+
if isinstance(i, int) and i < len(priority_targets):
|
| 666 |
+
selected_targets.append(priority_targets[i])
|
| 667 |
+
|
| 668 |
+
if not selected_targets:
|
| 669 |
+
selected_targets = priority_targets[:5]
|
| 670 |
+
|
| 671 |
+
return PlanResult(
|
| 672 |
+
strategy=response.get("strategy", "balanced"),
|
| 673 |
+
reasoning=response.get("reasoning", ""),
|
| 674 |
+
deploy_count=response.get("deploy_count", 0),
|
| 675 |
+
reposition_units=reposition_units,
|
| 676 |
+
priority_targets=selected_targets
|
| 677 |
+
)
|
| 678 |
+
except Exception as e:
|
| 679 |
+
print(f"Error parsing AI plan: {e}")
|
| 680 |
+
# Smart fallback: calculate deploy count based on situation
|
| 681 |
+
uncovered_count = len(assessment.uncovered_fires)
|
| 682 |
+
idle_count = len(assessment.ineffective_units)
|
| 683 |
+
fires_after_reposition = max(0, uncovered_count - idle_count)
|
| 684 |
+
building_threats = len(assessment.building_threats)
|
| 685 |
+
|
| 686 |
+
# Smart deploy calculation
|
| 687 |
+
if building_threats > 0:
|
| 688 |
+
smart_deploy = max(building_threats, fires_after_reposition)
|
| 689 |
+
elif uncovered_count <= 2:
|
| 690 |
+
smart_deploy = fires_after_reposition
|
| 691 |
+
else:
|
| 692 |
+
smart_deploy = fires_after_reposition + 1
|
| 693 |
+
|
| 694 |
+
smart_deploy = min(smart_deploy, available_slots)
|
| 695 |
+
|
| 696 |
+
return PlanResult(
|
| 697 |
+
strategy="balanced",
|
| 698 |
+
reasoning=f"Plan parse error: {e} - using smart fallback",
|
| 699 |
+
deploy_count=smart_deploy,
|
| 700 |
+
reposition_units=assessment.ineffective_units[:],
|
| 701 |
+
priority_targets=priority_targets[:5]
|
| 702 |
+
)
|
| 703 |
+
|
| 704 |
+
# =========================================================================
|
| 705 |
+
# Stage 3: Execution
|
| 706 |
+
# =========================================================================
|
| 707 |
+
|
| 708 |
+
def execute(
|
| 709 |
+
self,
|
| 710 |
+
world_state: dict,
|
| 711 |
+
assessment: AssessmentResult,
|
| 712 |
+
plan: PlanResult
|
| 713 |
+
) -> list[Recommendation]:
|
| 714 |
+
"""
|
| 715 |
+
Stage 3: Generate specific deployment/move recommendations using AI.
|
| 716 |
+
"""
|
| 717 |
+
# Skip if strategy is monitor
|
| 718 |
+
if plan.strategy == "monitor":
|
| 719 |
+
return []
|
| 720 |
+
|
| 721 |
+
execute_config = PROMPTS_CONFIG.get("execute", {})
|
| 722 |
+
system = execute_config.get("system", "")
|
| 723 |
+
output_format = execute_config.get("output_format", "")
|
| 724 |
+
|
| 725 |
+
fires = world_state.get("fires", [])
|
| 726 |
+
units = world_state.get("units", [])
|
| 727 |
+
buildings = world_state.get("buildings", [])
|
| 728 |
+
width = world_state.get("width", 10)
|
| 729 |
+
height = world_state.get("height", 10)
|
| 730 |
+
|
| 731 |
+
user_message = f"""Assessment:
|
| 732 |
+
- Threat Level: {assessment.threat_level}
|
| 733 |
+
- Summary: {assessment.summary}
|
| 734 |
+
- Uncovered Fires: {len(assessment.uncovered_fires)}
|
| 735 |
+
- Effective Units: {len(assessment.effective_units)}
|
| 736 |
+
- Ineffective Units: {len(assessment.ineffective_units)}
|
| 737 |
+
|
| 738 |
+
Plan:
|
| 739 |
+
- Strategy: {plan.strategy}
|
| 740 |
+
- Reasoning: {plan.reasoning}
|
| 741 |
+
- Deploy Count: {plan.deploy_count}
|
| 742 |
+
- Reposition Needed: {len(plan.reposition_units) > 0}
|
| 743 |
+
|
| 744 |
+
World State:
|
| 745 |
+
- Grid: {width}x{height}
|
| 746 |
+
|
| 747 |
+
UNCOVERED FIRES (PRIORITY TARGETS - these need units!):
|
| 748 |
+
{json.dumps([{"x": f["x"], "y": f["y"], "intensity": f["intensity"]} for f in assessment.uncovered_fires[:5]], indent=2)}
|
| 749 |
+
|
| 750 |
+
INEFFECTIVE UNITS (MOVE THESE to uncovered fires!):
|
| 751 |
+
{json.dumps([{"x": u["x"], "y": u["y"], "type": u["type"]} for u in plan.reposition_units[:5]], indent=2)}
|
| 752 |
+
|
| 753 |
+
All Fire positions: {json.dumps([(f["x"], f["y"], round(f["intensity"], 2)) for f in fires[:15]])}
|
| 754 |
+
All Unit positions: {json.dumps([(u["x"], u["y"], u["type"]) for u in units])}
|
| 755 |
+
Building positions: {json.dumps([(b["x"], b["y"]) for b in buildings[:15]])}
|
| 756 |
+
|
| 757 |
+
INSTRUCTIONS:
|
| 758 |
+
1. FIRST generate MOVE actions for ineffective units → move them to uncovered fires
|
| 759 |
+
2. THEN generate DEPLOY actions if more units needed
|
| 760 |
+
3. Max 4 recommendations total
|
| 761 |
+
4. Remember: deploy ADJACENT to fire (1-2 cells away), not ON the fire
|
| 762 |
+
|
| 763 |
+
Output format:
|
| 764 |
+
{output_format}"""
|
| 765 |
+
|
| 766 |
+
response = self._call_llm(system, user_message)
|
| 767 |
+
|
| 768 |
+
if not response:
|
| 769 |
+
# Generate basic recommendations if AI fails
|
| 770 |
+
return self._generate_fallback_recommendations(world_state, assessment, plan)
|
| 771 |
+
|
| 772 |
+
try:
|
| 773 |
+
recommendations = []
|
| 774 |
+
raw_recs = response.get("recommendations", [])
|
| 775 |
+
|
| 776 |
+
# Get blocked positions
|
| 777 |
+
fire_positions = set((f["x"], f["y"]) for f in fires)
|
| 778 |
+
unit_positions = set((u["x"], u["y"]) for u in units)
|
| 779 |
+
building_positions = set((b["x"], b["y"]) for b in buildings)
|
| 780 |
+
used_positions = set()
|
| 781 |
+
|
| 782 |
+
for rec in raw_recs[:4]: # Limit to 4
|
| 783 |
+
action = rec.get("action", "deploy")
|
| 784 |
+
unit_type = rec.get("unit_type", "fire_truck")
|
| 785 |
+
target = rec.get("target", {})
|
| 786 |
+
target_x = target.get("x", 0)
|
| 787 |
+
target_y = target.get("y", 0)
|
| 788 |
+
reason = rec.get("reason", "AI recommendation")
|
| 789 |
+
|
| 790 |
+
# Validate target position
|
| 791 |
+
pos = (target_x, target_y)
|
| 792 |
+
if pos in fire_positions or pos in building_positions or pos in used_positions:
|
| 793 |
+
# Try to find valid nearby position
|
| 794 |
+
valid_pos = self._find_deploy_position(
|
| 795 |
+
target_x, target_y, world_state,
|
| 796 |
+
exclude_positions=used_positions | unit_positions
|
| 797 |
+
)
|
| 798 |
+
if valid_pos:
|
| 799 |
+
target_x, target_y = valid_pos
|
| 800 |
+
else:
|
| 801 |
+
continue
|
| 802 |
+
|
| 803 |
+
used_positions.add((target_x, target_y))
|
| 804 |
+
|
| 805 |
+
if action == "move":
|
| 806 |
+
source = rec.get("source", {})
|
| 807 |
+
source_x = source.get("x", -1)
|
| 808 |
+
source_y = source.get("y", -1)
|
| 809 |
+
|
| 810 |
+
# Validate source position has a unit
|
| 811 |
+
if (source_x, source_y) not in unit_positions:
|
| 812 |
+
continue
|
| 813 |
+
|
| 814 |
+
recommendations.append(Recommendation(
|
| 815 |
+
reason=reason,
|
| 816 |
+
suggested_unit_type=unit_type,
|
| 817 |
+
target_x=target_x,
|
| 818 |
+
target_y=target_y,
|
| 819 |
+
action="move",
|
| 820 |
+
source_x=source_x,
|
| 821 |
+
source_y=source_y
|
| 822 |
+
))
|
| 823 |
+
elif action == "remove":
|
| 824 |
+
# Remove action: remove unit at position
|
| 825 |
+
position = rec.get("position", {})
|
| 826 |
+
pos_x = position.get("x", target_x)
|
| 827 |
+
pos_y = position.get("y", target_y)
|
| 828 |
+
unit_type = rec.get("unit_type", "fire_truck")
|
| 829 |
+
|
| 830 |
+
# Validate position has a unit
|
| 831 |
+
if (pos_x, pos_y) not in unit_positions:
|
| 832 |
+
continue
|
| 833 |
+
|
| 834 |
+
recommendations.append(Recommendation(
|
| 835 |
+
reason=reason,
|
| 836 |
+
suggested_unit_type=unit_type,
|
| 837 |
+
target_x=pos_x,
|
| 838 |
+
target_y=pos_y,
|
| 839 |
+
action="remove"
|
| 840 |
+
))
|
| 841 |
+
else:
|
| 842 |
+
recommendations.append(Recommendation(
|
| 843 |
+
reason=reason,
|
| 844 |
+
suggested_unit_type=unit_type,
|
| 845 |
+
target_x=target_x,
|
| 846 |
+
target_y=target_y,
|
| 847 |
+
action="deploy"
|
| 848 |
+
))
|
| 849 |
+
|
| 850 |
+
return recommendations
|
| 851 |
+
except Exception as e:
|
| 852 |
+
print(f"Error parsing AI execution: {e}")
|
| 853 |
+
return self._generate_fallback_recommendations(world_state, assessment, plan)
|
| 854 |
+
|
| 855 |
+
def summarize(
|
| 856 |
+
self,
|
| 857 |
+
world_state: dict,
|
| 858 |
+
assessment: AssessmentResult,
|
| 859 |
+
plan: PlanResult,
|
| 860 |
+
recommendations: list[Recommendation],
|
| 861 |
+
advisor_response: AdvisorResponse,
|
| 862 |
+
) -> CycleSummary:
|
| 863 |
+
"""Stage 4: Summarize the cycle results using AI."""
|
| 864 |
+
summary_config = PROMPTS_CONFIG.get("summary", {})
|
| 865 |
+
system = summary_config.get("system", "")
|
| 866 |
+
output_format = summary_config.get("output_format", "")
|
| 867 |
+
|
| 868 |
+
fires = world_state.get("fires", [])
|
| 869 |
+
units = world_state.get("units", [])
|
| 870 |
+
tick = world_state.get("tick", 0)
|
| 871 |
+
status = world_state.get("status", "running")
|
| 872 |
+
|
| 873 |
+
rec_blocks = []
|
| 874 |
+
for idx, rec in enumerate(recommendations, 1):
|
| 875 |
+
block = {
|
| 876 |
+
"index": idx,
|
| 877 |
+
"action": rec.action,
|
| 878 |
+
"unit_type": rec.suggested_unit_type,
|
| 879 |
+
"target": {"x": rec.target_x, "y": rec.target_y},
|
| 880 |
+
"source": {"x": rec.source_x, "y": rec.source_y} if rec.action == "move" else None,
|
| 881 |
+
"reason": rec.reason,
|
| 882 |
+
}
|
| 883 |
+
rec_blocks.append(block)
|
| 884 |
+
|
| 885 |
+
user_message = f"""Tick: {tick} | Status: {status}
|
| 886 |
+
Threat Level: {assessment.threat_level} | Building Integrity: {assessment.building_integrity:.0%}
|
| 887 |
+
Fires: {assessment.fire_count} | Uncovered Fires: {len(assessment.uncovered_fires)}
|
| 888 |
+
Idle Units: {len(assessment.ineffective_units)} | Total Units: {assessment.unit_count}/{assessment.max_units}
|
| 889 |
+
|
| 890 |
+
Stage 1 Summary:
|
| 891 |
+
{assessment.summary}
|
| 892 |
+
|
| 893 |
+
Stage 2 Strategy:
|
| 894 |
+
- Strategy: {plan.strategy}
|
| 895 |
+
- Reasoning: {plan.reasoning}
|
| 896 |
+
- Deploy Count: {plan.deploy_count}
|
| 897 |
+
- Reposition Units: {len(plan.reposition_units)}
|
| 898 |
+
|
| 899 |
+
Stage 3 Recommendations:
|
| 900 |
+
{json.dumps(rec_blocks[:5], indent=2)}
|
| 901 |
+
|
| 902 |
+
World Snapshot (first 5 fires / units):
|
| 903 |
+
Fires -> {json.dumps(fires[:5], indent=2)}
|
| 904 |
+
Units -> {json.dumps(units[:5], indent=2)}
|
| 905 |
+
|
| 906 |
+
OUTPUT FORMAT:
|
| 907 |
+
{output_format}
|
| 908 |
+
"""
|
| 909 |
+
|
| 910 |
+
response = self._call_llm(system, user_message)
|
| 911 |
+
if not response:
|
| 912 |
+
return CycleSummary(
|
| 913 |
+
headline=advisor_response.summary if advisor_response else "Cycle summary unavailable",
|
| 914 |
+
threat_level=assessment.threat_level,
|
| 915 |
+
key_highlights=[advisor_response.analysis or "Analysis unavailable."],
|
| 916 |
+
risks=["Summary model unavailable."],
|
| 917 |
+
next_focus=["Review building-adjacent fires manually."],
|
| 918 |
+
)
|
| 919 |
+
|
| 920 |
+
def _coerce_items(value, fallback):
|
| 921 |
+
if isinstance(value, list):
|
| 922 |
+
cleaned = [str(item).strip() for item in value if str(item).strip()]
|
| 923 |
+
return cleaned or fallback
|
| 924 |
+
if isinstance(value, str) and value.strip():
|
| 925 |
+
return [value.strip()]
|
| 926 |
+
return fallback
|
| 927 |
+
|
| 928 |
+
headline = str(response.get("headline", advisor_response.summary if advisor_response else "Cycle summary")).strip()
|
| 929 |
+
threat_level = str(response.get("threat_level", assessment.threat_level or "MODERATE")).strip()
|
| 930 |
+
key_highlights = _coerce_items(response.get("key_highlights"), [advisor_response.analysis or "Highlights unavailable."])
|
| 931 |
+
risks = _coerce_items(response.get("risks"), ["No risks provided."])
|
| 932 |
+
next_focus = _coerce_items(response.get("next_focus"), ["Maintain coverage on building threats."])
|
| 933 |
+
|
| 934 |
+
return CycleSummary(
|
| 935 |
+
headline=headline or "Cycle summary",
|
| 936 |
+
threat_level=threat_level or (assessment.threat_level or "MODERATE"),
|
| 937 |
+
key_highlights=key_highlights,
|
| 938 |
+
risks=risks,
|
| 939 |
+
next_focus=next_focus,
|
| 940 |
+
)
|
| 941 |
+
|
| 942 |
+
def _generate_fallback_recommendations(
|
| 943 |
+
self,
|
| 944 |
+
world_state: dict,
|
| 945 |
+
assessment: AssessmentResult,
|
| 946 |
+
plan: PlanResult
|
| 947 |
+
) -> list[Recommendation]:
|
| 948 |
+
"""Generate SMART recommendations when AI fails - prioritize buildings, deploy efficiently!"""
|
| 949 |
+
recommendations = []
|
| 950 |
+
units = world_state.get("units", [])
|
| 951 |
+
buildings = world_state.get("buildings", [])
|
| 952 |
+
unit_positions = set((u["x"], u["y"]) for u in units)
|
| 953 |
+
building_positions = set((b["x"], b["y"]) for b in buildings)
|
| 954 |
+
used_positions = set()
|
| 955 |
+
|
| 956 |
+
# Helper: check if fire threatens building
|
| 957 |
+
def threatens_building(fire):
|
| 958 |
+
for bx, by in building_positions:
|
| 959 |
+
if abs(fire["x"] - bx) + abs(fire["y"] - by) <= 2:
|
| 960 |
+
return True
|
| 961 |
+
return False
|
| 962 |
+
|
| 963 |
+
# Sort uncovered fires: building threats FIRST, then by intensity
|
| 964 |
+
priority_fires = sorted(
|
| 965 |
+
assessment.uncovered_fires,
|
| 966 |
+
key=lambda f: (-int(threatens_building(f)), -f.get("intensity", 0))
|
| 967 |
+
)
|
| 968 |
+
|
| 969 |
+
# Count building threats
|
| 970 |
+
building_threat_count = sum(1 for f in priority_fires if threatens_building(f))
|
| 971 |
+
|
| 972 |
+
# Move ALL ineffective units to priority fires (this is free optimization!)
|
| 973 |
+
for i, unit in enumerate(plan.reposition_units):
|
| 974 |
+
if i >= len(priority_fires):
|
| 975 |
+
break
|
| 976 |
+
|
| 977 |
+
target_fire = priority_fires[i]
|
| 978 |
+
deploy_pos = self._find_deploy_position(
|
| 979 |
+
target_fire["x"], target_fire["y"], world_state,
|
| 980 |
+
exclude_positions=used_positions | unit_positions - {(unit["x"], unit["y"])}
|
| 981 |
+
)
|
| 982 |
+
|
| 983 |
+
if deploy_pos:
|
| 984 |
+
used_positions.add(deploy_pos)
|
| 985 |
+
is_building_threat = threatens_building(target_fire)
|
| 986 |
+
recommendations.append(Recommendation(
|
| 987 |
+
reason=f"{'🏢 BUILDING THREAT! ' if is_building_threat else ''}Move to cover fire at ({target_fire['x']}, {target_fire['y']})",
|
| 988 |
+
suggested_unit_type=unit["type"],
|
| 989 |
+
target_x=deploy_pos[0],
|
| 990 |
+
target_y=deploy_pos[1],
|
| 991 |
+
action="move",
|
| 992 |
+
source_x=unit["x"],
|
| 993 |
+
source_y=unit["y"]
|
| 994 |
+
))
|
| 995 |
+
|
| 996 |
+
# SMART deploy calculation: only deploy what we actually need
|
| 997 |
+
available_slots = assessment.max_units - assessment.unit_count
|
| 998 |
+
remaining_fires = priority_fires[len(recommendations):]
|
| 999 |
+
remaining_building_threats = sum(1 for f in remaining_fires if threatens_building(f))
|
| 1000 |
+
|
| 1001 |
+
# Calculate smart deploy count
|
| 1002 |
+
if remaining_building_threats > 0:
|
| 1003 |
+
# Building emergency! Deploy enough to cover ALL building threats
|
| 1004 |
+
smart_deploy_count = max(remaining_building_threats, min(len(remaining_fires), available_slots))
|
| 1005 |
+
elif len(remaining_fires) <= 2:
|
| 1006 |
+
# Few fires - deploy just enough
|
| 1007 |
+
smart_deploy_count = len(remaining_fires)
|
| 1008 |
+
elif len(remaining_fires) <= 5:
|
| 1009 |
+
# Moderate fires - deploy to cover + small buffer
|
| 1010 |
+
smart_deploy_count = min(len(remaining_fires) + 1, available_slots)
|
| 1011 |
+
else:
|
| 1012 |
+
# Many fires - deploy more but not all
|
| 1013 |
+
smart_deploy_count = min(len(remaining_fires), available_slots)
|
| 1014 |
+
|
| 1015 |
+
# Deploy to remaining uncovered fires (up to smart_deploy_count)
|
| 1016 |
+
for i, fire in enumerate(remaining_fires[:smart_deploy_count]):
|
| 1017 |
+
deploy_pos = self._find_deploy_position(
|
| 1018 |
+
fire["x"], fire["y"], world_state,
|
| 1019 |
+
exclude_positions=used_positions | unit_positions
|
| 1020 |
+
)
|
| 1021 |
+
|
| 1022 |
+
if deploy_pos:
|
| 1023 |
+
used_positions.add(deploy_pos)
|
| 1024 |
+
is_building_threat = threatens_building(fire)
|
| 1025 |
+
# Use fire_truck for building threats and high intensity (40% power)
|
| 1026 |
+
unit_type = "fire_truck" if is_building_threat or fire.get("intensity", 0) > 0.5 else "helicopter"
|
| 1027 |
+
recommendations.append(Recommendation(
|
| 1028 |
+
reason=f"{'🏢 BUILDING THREAT! ' if is_building_threat else ''}Deploy to cover fire at ({fire['x']}, {fire['y']})",
|
| 1029 |
+
suggested_unit_type=unit_type,
|
| 1030 |
+
target_x=deploy_pos[0],
|
| 1031 |
+
target_y=deploy_pos[1],
|
| 1032 |
+
action="deploy"
|
| 1033 |
+
))
|
| 1034 |
+
|
| 1035 |
+
return recommendations[:4] # Cap at 4 for UI display
|
| 1036 |
+
|
| 1037 |
+
# =========================================================================
|
| 1038 |
+
# After-Action Report
|
| 1039 |
+
# =========================================================================
|
| 1040 |
+
|
| 1041 |
+
def generate_after_action_report(self, context: dict) -> AfterActionReport:
|
| 1042 |
+
"""
|
| 1043 |
+
Build an after-action report using Assessment / Planning / Execution transcripts.
|
| 1044 |
+
"""
|
| 1045 |
+
after_action_config = PROMPTS_CONFIG.get("after_action", {})
|
| 1046 |
+
system = after_action_config.get("system", "")
|
| 1047 |
+
output_format = after_action_config.get("output_format", "")
|
| 1048 |
+
|
| 1049 |
+
outcome = context.get("outcome", "unknown")
|
| 1050 |
+
default_summary = context.get("summary_text") or f"Mission outcome: {outcome}"
|
| 1051 |
+
report = AfterActionReport(
|
| 1052 |
+
summary=default_summary,
|
| 1053 |
+
strengths=[],
|
| 1054 |
+
improvements=[],
|
| 1055 |
+
next_actions=[],
|
| 1056 |
+
outcome=outcome,
|
| 1057 |
+
)
|
| 1058 |
+
report.charts = {
|
| 1059 |
+
"metrics": context.get("chart_points") or [],
|
| 1060 |
+
"threat_levels": context.get("threat_history") or [],
|
| 1061 |
+
"action_density": context.get("action_history") or [],
|
| 1062 |
+
}
|
| 1063 |
+
report.player_actions = context.get("player_actions_context") or {}
|
| 1064 |
+
|
| 1065 |
+
if not self.client:
|
| 1066 |
+
report.error = "Missing HF_TOKEN – unable to generate AI after-action report."
|
| 1067 |
+
return report
|
| 1068 |
+
|
| 1069 |
+
def _section(title: str, body: str) -> str:
|
| 1070 |
+
if not body:
|
| 1071 |
+
return f"{title}\n(no data available)\n"
|
| 1072 |
+
return f"{title}\n{body}\n"
|
| 1073 |
+
|
| 1074 |
+
header_lines = [
|
| 1075 |
+
f"Mission Outcome: {context.get('outcome_label', outcome)}",
|
| 1076 |
+
f"Tick: {context.get('tick', 0)}",
|
| 1077 |
+
f"Fires Remaining: {context.get('fires_remaining', 0)}",
|
| 1078 |
+
f"Units Active: {context.get('units_active', 0)}",
|
| 1079 |
+
f"Building Integrity: {context.get('building_integrity_percent', 'N/A')}",
|
| 1080 |
+
]
|
| 1081 |
+
|
| 1082 |
+
mission_summary = context.get("summary_text", "")
|
| 1083 |
+
if mission_summary:
|
| 1084 |
+
header_lines.append(f"Mission Summary: {mission_summary}")
|
| 1085 |
+
|
| 1086 |
+
cycle_summaries = context.get("cycle_summaries") or []
|
| 1087 |
+
if cycle_summaries:
|
| 1088 |
+
summary_lines = []
|
| 1089 |
+
for entry in cycle_summaries:
|
| 1090 |
+
tick = entry.get("tick", "?")
|
| 1091 |
+
headline = entry.get("headline", "No headline")
|
| 1092 |
+
threat = entry.get("threat_level", "N/A")
|
| 1093 |
+
highlights = entry.get("key_highlights") or []
|
| 1094 |
+
risks = entry.get("risks") or []
|
| 1095 |
+
next_focus = entry.get("next_focus") or []
|
| 1096 |
+
block = [
|
| 1097 |
+
f"- [Tick {tick}] {headline} (Threat: {threat})",
|
| 1098 |
+
]
|
| 1099 |
+
if highlights:
|
| 1100 |
+
block.append(" • Highlights: " + "; ".join(highlights))
|
| 1101 |
+
if risks:
|
| 1102 |
+
block.append(" • Risks: " + "; ".join(risks))
|
| 1103 |
+
if next_focus:
|
| 1104 |
+
block.append(" • Next Focus: " + "; ".join(next_focus))
|
| 1105 |
+
summary_lines.append("\n".join(block))
|
| 1106 |
+
history_block = "\n".join(summary_lines)
|
| 1107 |
+
else:
|
| 1108 |
+
history_block = "- No prior cycles captured."
|
| 1109 |
+
|
| 1110 |
+
user_sections = [
|
| 1111 |
+
"Mission Status Summary:",
|
| 1112 |
+
"\n".join(header_lines),
|
| 1113 |
+
"",
|
| 1114 |
+
_section("Stage 1 · Assessment", context.get("assessment_md", "")),
|
| 1115 |
+
_section("Stage 2 · Planning", context.get("planning_md", "")),
|
| 1116 |
+
_section("Stage 3 · Execution", context.get("execution_md", "")),
|
| 1117 |
+
_section("Player Manual Actions", context.get("player_actions_md", "")),
|
| 1118 |
+
"Historical Cycle Summaries:",
|
| 1119 |
+
history_block,
|
| 1120 |
+
]
|
| 1121 |
+
|
| 1122 |
+
if output_format:
|
| 1123 |
+
user_sections.append("Please reply strictly using the JSON schema below:")
|
| 1124 |
+
user_sections.append(output_format)
|
| 1125 |
+
|
| 1126 |
+
user_message = "\n\n".join(user_sections)
|
| 1127 |
+
|
| 1128 |
+
response = self._call_llm(system or "You are a mission debrief analyst.", user_message)
|
| 1129 |
+
if not response:
|
| 1130 |
+
report.error = "Failed to retrieve AI response."
|
| 1131 |
+
return report
|
| 1132 |
+
|
| 1133 |
+
def _coerce_list(value) -> list[str]:
|
| 1134 |
+
if isinstance(value, list):
|
| 1135 |
+
return [str(item).strip() for item in value if str(item).strip()]
|
| 1136 |
+
if isinstance(value, str) and value.strip():
|
| 1137 |
+
return [value.strip()]
|
| 1138 |
+
return []
|
| 1139 |
+
|
| 1140 |
+
report.summary = str(response.get("summary", report.summary)).strip() or report.summary
|
| 1141 |
+
report.strengths = _coerce_list(response.get("strengths"))
|
| 1142 |
+
report.improvements = _coerce_list(response.get("improvements"))
|
| 1143 |
+
report.next_actions = _coerce_list(response.get("next_actions"))
|
| 1144 |
+
|
| 1145 |
+
if not (report.strengths or report.improvements or report.next_actions):
|
| 1146 |
+
report.error = "AI response did not contain any usable sections."
|
| 1147 |
+
|
| 1148 |
+
return report
|
| 1149 |
+
|
| 1150 |
+
# =========================================================================
|
| 1151 |
+
# Main Entry Point
|
| 1152 |
+
# =========================================================================
|
| 1153 |
+
|
| 1154 |
+
def analyze(self, world_state: dict) -> AdvisorResponse:
|
| 1155 |
+
"""
|
| 1156 |
+
Main entry point: Run multi-stage analysis pipeline.
|
| 1157 |
+
|
| 1158 |
+
Stage 1: Assessment - Analyze the situation
|
| 1159 |
+
Stage 2: Planning - Decide strategy
|
| 1160 |
+
Stage 3: Execution - Generate specific actions
|
| 1161 |
+
"""
|
| 1162 |
+
# Stage 1: Assessment
|
| 1163 |
+
assessment = self.assess(world_state)
|
| 1164 |
+
|
| 1165 |
+
# Stage 2: Planning
|
| 1166 |
+
plan = self.plan(world_state, assessment)
|
| 1167 |
+
|
| 1168 |
+
# Stage 3: Execution
|
| 1169 |
+
recommendations = self.execute(world_state, assessment, plan)
|
| 1170 |
+
|
| 1171 |
+
# Build thinking summary
|
| 1172 |
+
thinking_parts = [
|
| 1173 |
+
f"📊 Scanning {assessment.fire_count} active fires...",
|
| 1174 |
+
]
|
| 1175 |
+
if assessment.uncovered_fires:
|
| 1176 |
+
thinking_parts.append(f"🚨 ALERT: {len(assessment.uncovered_fires)} fire(s) with NO coverage!")
|
| 1177 |
+
if assessment.building_threats:
|
| 1178 |
+
thinking_parts.append(f"🏢 {len(assessment.building_threats)} fire(s) threatening buildings!")
|
| 1179 |
+
if assessment.ineffective_units:
|
| 1180 |
+
thinking_parts.append(f"🔄 {len(assessment.ineffective_units)} idle unit(s) should be repositioned")
|
| 1181 |
+
thinking_parts.append(f"🎯 Strategy: {plan.strategy.upper()} - {plan.reasoning}")
|
| 1182 |
+
|
| 1183 |
+
# Generate summary based on threat level
|
| 1184 |
+
priority_emoji = {
|
| 1185 |
+
"CRITICAL": "🔴",
|
| 1186 |
+
"HIGH": "🟠",
|
| 1187 |
+
"MODERATE": "🟡",
|
| 1188 |
+
"LOW": "🟢"
|
| 1189 |
+
}
|
| 1190 |
+
emoji = priority_emoji.get(assessment.threat_level, "⚪")
|
| 1191 |
+
|
| 1192 |
+
if assessment.threat_level == "CRITICAL":
|
| 1193 |
+
summary = f"{emoji} CRITICAL: {assessment.summary}. Immediate action required!"
|
| 1194 |
+
elif assessment.threat_level == "HIGH":
|
| 1195 |
+
summary = f"{emoji} HIGH: {assessment.summary}. Rapid response needed."
|
| 1196 |
+
elif assessment.threat_level == "MODERATE":
|
| 1197 |
+
summary = f"{emoji} MODERATE: {assessment.summary}. Tactical deployment advised."
|
| 1198 |
+
else:
|
| 1199 |
+
summary = f"{emoji} LOW: {assessment.summary}. Monitoring situation."
|
| 1200 |
+
|
| 1201 |
+
return AdvisorResponse(
|
| 1202 |
+
summary=summary,
|
| 1203 |
+
recommendations=recommendations,
|
| 1204 |
+
thinking="\n".join(thinking_parts),
|
| 1205 |
+
analysis=f"{assessment.fire_count} fires | {assessment.unit_count}/{assessment.max_units} units | {assessment.building_integrity:.0%} building integrity",
|
| 1206 |
+
priority=assessment.threat_level,
|
| 1207 |
+
assessment=assessment,
|
| 1208 |
+
plan=plan
|
| 1209 |
+
)
|
| 1210 |
+
|
| 1211 |
+
# =========================================================================
|
| 1212 |
+
# Helper Methods
|
| 1213 |
+
# =========================================================================
|
| 1214 |
+
|
| 1215 |
+
def _find_deploy_position(
|
| 1216 |
+
self,
|
| 1217 |
+
fire_x: int,
|
| 1218 |
+
fire_y: int,
|
| 1219 |
+
world_state: dict,
|
| 1220 |
+
exclude_positions: set = None
|
| 1221 |
+
) -> tuple[int, int] | None:
|
| 1222 |
+
"""
|
| 1223 |
+
Find a valid deployment position adjacent to a fire.
|
| 1224 |
+
Units cannot deploy on burning cells, so we find the nearest empty cell.
|
| 1225 |
+
"""
|
| 1226 |
+
if exclude_positions is None:
|
| 1227 |
+
exclude_positions = set()
|
| 1228 |
+
|
| 1229 |
+
fires = world_state.get("fires", [])
|
| 1230 |
+
units = world_state.get("units", [])
|
| 1231 |
+
buildings = world_state.get("buildings", [])
|
| 1232 |
+
width = world_state.get("width", 10)
|
| 1233 |
+
height = world_state.get("height", 10)
|
| 1234 |
+
|
| 1235 |
+
fire_positions = set((f["x"], f["y"]) for f in fires)
|
| 1236 |
+
unit_positions = set((u["x"], u["y"]) for u in units)
|
| 1237 |
+
building_positions = set((b["x"], b["y"]) for b in buildings)
|
| 1238 |
+
|
| 1239 |
+
# Check positions at increasing distances
|
| 1240 |
+
for distance in [1, 2, 3]:
|
| 1241 |
+
candidates = []
|
| 1242 |
+
for dx in range(-distance, distance + 1):
|
| 1243 |
+
for dy in range(-distance, distance + 1):
|
| 1244 |
+
if abs(dx) != distance and abs(dy) != distance:
|
| 1245 |
+
continue
|
| 1246 |
+
|
| 1247 |
+
nx, ny = fire_x + dx, fire_y + dy
|
| 1248 |
+
|
| 1249 |
+
# Check bounds
|
| 1250 |
+
if not (0 <= nx < width and 0 <= ny < height):
|
| 1251 |
+
continue
|
| 1252 |
+
|
| 1253 |
+
# Skip invalid positions
|
| 1254 |
+
if (nx, ny) in fire_positions:
|
| 1255 |
+
continue
|
| 1256 |
+
if (nx, ny) in unit_positions:
|
| 1257 |
+
continue
|
| 1258 |
+
if (nx, ny) in building_positions:
|
| 1259 |
+
continue
|
| 1260 |
+
if (nx, ny) in exclude_positions:
|
| 1261 |
+
continue
|
| 1262 |
+
|
| 1263 |
+
# Valid candidate
|
| 1264 |
+
dist_to_fire = abs(nx - fire_x) + abs(ny - fire_y)
|
| 1265 |
+
candidates.append((nx, ny, dist_to_fire))
|
| 1266 |
+
|
| 1267 |
+
if candidates:
|
| 1268 |
+
candidates.sort(key=lambda c: c[2])
|
| 1269 |
+
return (candidates[0][0], candidates[0][1])
|
| 1270 |
+
|
| 1271 |
+
return None
|
| 1272 |
+
|
| 1273 |
+
|
| 1274 |
+
# Backward compatibility
|
| 1275 |
+
def _fallback_analyze(self, world_state: dict) -> AdvisorResponse:
|
| 1276 |
+
"""Fallback method for service.py compatibility."""
|
| 1277 |
+
return self.analyze(world_state)
|
| 1278 |
+
|
| 1279 |
+
# Add method to class
|
| 1280 |
+
AdvisorAgent._fallback_analyze = _fallback_analyze
|
app.py
ADDED
|
@@ -0,0 +1,1865 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue MCP - Gradio Application
|
| 3 |
+
|
| 4 |
+
Main entry point for the Hugging Face Space deployment.
|
| 5 |
+
Provides Gradio UI for simulation control and visualization.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import html
|
| 9 |
+
import gradio as gr
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
from service import get_service, SimulationService
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# =============================================================================
|
| 16 |
+
# Grid Visualization (Clickable)
|
| 17 |
+
# =============================================================================
|
| 18 |
+
|
| 19 |
+
def get_cell_info(state: dict, x: int, y: int) -> tuple[str, str]:
|
| 20 |
+
"""Get cell display info (emoji, css class)."""
|
| 21 |
+
fires = {(f["x"], f["y"]): f["intensity"] for f in state.get("fires", [])}
|
| 22 |
+
units = {(u["x"], u["y"]): u["type"] for u in state.get("units", [])}
|
| 23 |
+
buildings = {(b["x"], b["y"]) for b in state.get("buildings", [])}
|
| 24 |
+
|
| 25 |
+
# Default: forest
|
| 26 |
+
content = "🌲"
|
| 27 |
+
bg_color = "#1b4332"
|
| 28 |
+
|
| 29 |
+
# Building (from dynamic positions)
|
| 30 |
+
if (x, y) in buildings:
|
| 31 |
+
content = "🏢"
|
| 32 |
+
bg_color = "#495057"
|
| 33 |
+
|
| 34 |
+
# Fire - show intensity level with different visuals
|
| 35 |
+
if (x, y) in fires:
|
| 36 |
+
intensity = fires[(x, y)]
|
| 37 |
+
# Display fire intensity as percentage
|
| 38 |
+
pct = int(intensity * 100)
|
| 39 |
+
|
| 40 |
+
if intensity >= 0.9:
|
| 41 |
+
# Extreme fire - inferno
|
| 42 |
+
content = f"🔥{pct}"
|
| 43 |
+
bg_color = "#7f1d1d" # Very dark red
|
| 44 |
+
elif intensity >= 0.7:
|
| 45 |
+
# High fire
|
| 46 |
+
content = f"🔥{pct}"
|
| 47 |
+
bg_color = "#b91c1c" # Dark red
|
| 48 |
+
elif intensity >= 0.5:
|
| 49 |
+
# Medium-high fire
|
| 50 |
+
content = f"🔥{pct}"
|
| 51 |
+
bg_color = "#dc2626" # Red
|
| 52 |
+
elif intensity >= 0.3:
|
| 53 |
+
# Medium fire
|
| 54 |
+
content = f"🔥{pct}"
|
| 55 |
+
bg_color = "#ea580c" # Orange-red
|
| 56 |
+
elif intensity >= 0.1:
|
| 57 |
+
# Low fire
|
| 58 |
+
content = f"🔥{pct}"
|
| 59 |
+
bg_color = "#f97316" # Orange
|
| 60 |
+
else:
|
| 61 |
+
# Smoldering / almost out
|
| 62 |
+
content = f"💨{pct}"
|
| 63 |
+
bg_color = "#fbbf24" # Yellow-orange
|
| 64 |
+
|
| 65 |
+
# Unit (overwrites fire display)
|
| 66 |
+
if (x, y) in units:
|
| 67 |
+
unit_type = units[(x, y)]
|
| 68 |
+
content = "🚒" if unit_type == "fire_truck" else "🚁"
|
| 69 |
+
bg_color = "#0077b6"
|
| 70 |
+
|
| 71 |
+
return content, bg_color
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def render_status_html(state: dict, is_thinking: bool = False, thinking_stage: int = 0) -> str:
|
| 75 |
+
"""Render simulation status as compact HTML bar."""
|
| 76 |
+
if state.get("status") == "idle":
|
| 77 |
+
return "<div class='status-compact'>🎮 Click Start to begin</div>"
|
| 78 |
+
|
| 79 |
+
tick = state.get("tick", 0)
|
| 80 |
+
status = state.get("status", "unknown")
|
| 81 |
+
building = state.get("building_integrity", 1.0)
|
| 82 |
+
fires = len(state.get("fires", []))
|
| 83 |
+
units = len(state.get("units", []))
|
| 84 |
+
|
| 85 |
+
status_colors = {
|
| 86 |
+
"running": "#4ade80",
|
| 87 |
+
"success": "#22d3ee",
|
| 88 |
+
"fail": "#f87171"
|
| 89 |
+
}
|
| 90 |
+
status_color = status_colors.get(status, "#888")
|
| 91 |
+
health_color = "#4ade80" if building > 0.6 else "#fbbf24" if building > 0.5 else "#f87171"
|
| 92 |
+
fire_color = "#22d3ee" if fires == 0 else "#f87171"
|
| 93 |
+
|
| 94 |
+
thinking_html = ""
|
| 95 |
+
if is_thinking:
|
| 96 |
+
stage_info = {
|
| 97 |
+
1: ("📊", "Assessing"),
|
| 98 |
+
2: ("🎯", "Planning"),
|
| 99 |
+
3: ("⚡", "Executing"),
|
| 100 |
+
4: ("🧭", "Summarizing"),
|
| 101 |
+
}
|
| 102 |
+
icon, label = stage_info.get(thinking_stage, ("🤔", "Thinking"))
|
| 103 |
+
thinking_html = f"""
|
| 104 |
+
<div class="status-item ai-thinking-inline">
|
| 105 |
+
<span class="ai-pulse">{icon}</span>
|
| 106 |
+
<span class="ai-label">AI</span>
|
| 107 |
+
<strong>{label}...</strong>
|
| 108 |
+
</div>
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
html = f"""
|
| 112 |
+
<div class="status-compact">
|
| 113 |
+
<div class="status-item">
|
| 114 |
+
<span>⏱️</span>
|
| 115 |
+
<strong>{tick}</strong>
|
| 116 |
+
</div>
|
| 117 |
+
<div class="status-item">
|
| 118 |
+
<span class="status-badge" style="background: {status_color};">{status.upper()}</span>
|
| 119 |
+
</div>
|
| 120 |
+
<div class="status-item status-health">
|
| 121 |
+
<span>🏢</span>
|
| 122 |
+
<div class="progress-mini">
|
| 123 |
+
<div style="background: {health_color}; width: {building*100}%; height: 100%;"></div>
|
| 124 |
+
</div>
|
| 125 |
+
<span style="color: {health_color}; font-weight: bold;">{building:.0%}</span>
|
| 126 |
+
</div>
|
| 127 |
+
<div class="status-item">
|
| 128 |
+
<span>🔥</span>
|
| 129 |
+
<strong style="color: {fire_color};">{fires}</strong>
|
| 130 |
+
</div>
|
| 131 |
+
<div class="status-item">
|
| 132 |
+
<span>🚒</span>
|
| 133 |
+
<strong>{units}</strong>
|
| 134 |
+
</div>
|
| 135 |
+
{thinking_html}
|
| 136 |
+
</div>
|
| 137 |
+
"""
|
| 138 |
+
return html
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _chat_signature(messages: list[dict]) -> tuple:
|
| 142 |
+
"""Create a hashable signature for chatbot messages (role, content, metadata)."""
|
| 143 |
+
signature = []
|
| 144 |
+
for msg in messages or []:
|
| 145 |
+
metadata = msg.get("metadata") or {}
|
| 146 |
+
metadata_tuple = tuple(sorted(metadata.items()))
|
| 147 |
+
signature.append(
|
| 148 |
+
(
|
| 149 |
+
msg.get("role", "assistant"),
|
| 150 |
+
msg.get("content", ""),
|
| 151 |
+
metadata_tuple,
|
| 152 |
+
)
|
| 153 |
+
)
|
| 154 |
+
return tuple(signature)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def render_compact_status(state: dict) -> str:
|
| 158 |
+
"""Render a compact horizontal status bar."""
|
| 159 |
+
if state.get("status") == "idle":
|
| 160 |
+
return """
|
| 161 |
+
<div class="status-bar">
|
| 162 |
+
<div class="status-item">
|
| 163 |
+
<span style="color: #888;">🎮 Click Start to begin simulation</span>
|
| 164 |
+
</div>
|
| 165 |
+
</div>
|
| 166 |
+
"""
|
| 167 |
+
|
| 168 |
+
tick = state.get("tick", 0)
|
| 169 |
+
status = state.get("status", "unknown")
|
| 170 |
+
building = state.get("building_integrity", 1.0)
|
| 171 |
+
fires = len(state.get("fires", []))
|
| 172 |
+
units = len(state.get("units", []))
|
| 173 |
+
|
| 174 |
+
# Status colors
|
| 175 |
+
status_colors = {
|
| 176 |
+
"running": "#4ade80",
|
| 177 |
+
"success": "#22d3ee",
|
| 178 |
+
"fail": "#f87171"
|
| 179 |
+
}
|
| 180 |
+
status_color = status_colors.get(status, "#888")
|
| 181 |
+
health_color = "#4ade80" if building > 0.6 else "#fbbf24" if building > 0.5 else "#f87171"
|
| 182 |
+
fire_color = "#22d3ee" if fires == 0 else "#f87171"
|
| 183 |
+
|
| 184 |
+
html = f"""
|
| 185 |
+
<div class="status-bar">
|
| 186 |
+
<div class="status-item">
|
| 187 |
+
<span>⏱️ Tick:</span>
|
| 188 |
+
<strong>{tick}</strong>
|
| 189 |
+
</div>
|
| 190 |
+
<div class="status-item">
|
| 191 |
+
<span class="status-badge" style="background: {status_color}; color: #000;">{status.upper()}</span>
|
| 192 |
+
</div>
|
| 193 |
+
<div class="status-item">
|
| 194 |
+
<span>🏢</span>
|
| 195 |
+
<div class="progress-mini">
|
| 196 |
+
<div style="background: {health_color}; width: {building*100}%; height: 100%;"></div>
|
| 197 |
+
</div>
|
| 198 |
+
<span style="color: {health_color}; font-weight: bold;">{building:.0%}</span>
|
| 199 |
+
</div>
|
| 200 |
+
<div class="status-item">
|
| 201 |
+
<span>🔥</span>
|
| 202 |
+
<span style="color: {fire_color}; font-weight: bold;">{fires}</span>
|
| 203 |
+
</div>
|
| 204 |
+
<div class="status-item">
|
| 205 |
+
<span>🚒 {units}</span>
|
| 206 |
+
</div>
|
| 207 |
+
</div>
|
| 208 |
+
"""
|
| 209 |
+
return html
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def render_ai_thinking_toast(is_thinking: bool, thinking_stage: int) -> str:
|
| 213 |
+
"""Render AI thinking toast notification (iOS-style)."""
|
| 214 |
+
if not is_thinking:
|
| 215 |
+
return ""
|
| 216 |
+
|
| 217 |
+
stage_info = {
|
| 218 |
+
1: ("📊", "Stage 1: Assessment", "Querying MCP tools & analyzing situation..."),
|
| 219 |
+
2: ("🎯", "Stage 2: Planning", "Formulating tactical strategy..."),
|
| 220 |
+
3: ("⚡", "Stage 3: Execution", "Generating MCP deployment commands..."),
|
| 221 |
+
4: ("🧭", "Stage 4: Summary", "Consolidating per-cycle findings..."),
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
icon, title, subtitle = stage_info.get(thinking_stage, ("🤔", "AI Thinking", "Processing..."))
|
| 225 |
+
|
| 226 |
+
return f"""
|
| 227 |
+
<div class="ai-toast-container">
|
| 228 |
+
<div class="ai-toast">
|
| 229 |
+
<div class="ai-toast-icon">{icon}</div>
|
| 230 |
+
<div class="ai-toast-content">
|
| 231 |
+
<div class="ai-toast-title">{title}</div>
|
| 232 |
+
<div class="ai-toast-subtitle">{subtitle}</div>
|
| 233 |
+
</div>
|
| 234 |
+
<div class="ai-toast-spinner"></div>
|
| 235 |
+
</div>
|
| 236 |
+
</div>
|
| 237 |
+
"""
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def render_game_result(status: str, report_payload: Optional[dict] = None) -> str:
|
| 241 |
+
"""Render game result popup with after-action report."""
|
| 242 |
+
if status not in {"success", "fail"}:
|
| 243 |
+
return ""
|
| 244 |
+
|
| 245 |
+
report_payload = report_payload or {}
|
| 246 |
+
outcome_config = {
|
| 247 |
+
"success": {
|
| 248 |
+
"icon": "🎉",
|
| 249 |
+
"title": "VICTORY!",
|
| 250 |
+
"color": "#22d3ee",
|
| 251 |
+
"subtitle": "All fires extinguished!",
|
| 252 |
+
},
|
| 253 |
+
"fail": {
|
| 254 |
+
"icon": "💀",
|
| 255 |
+
"title": "GAME OVER",
|
| 256 |
+
"color": "#f87171",
|
| 257 |
+
"subtitle": "Building integrity dropped below 50%.",
|
| 258 |
+
},
|
| 259 |
+
}[status]
|
| 260 |
+
|
| 261 |
+
def _render_report_cards(title: str, items, css_class: str) -> str:
|
| 262 |
+
normalized = []
|
| 263 |
+
for item in items or []:
|
| 264 |
+
text = str(item).strip()
|
| 265 |
+
if text:
|
| 266 |
+
normalized.append(html.escape(text))
|
| 267 |
+
if not normalized:
|
| 268 |
+
return ""
|
| 269 |
+
bullets = "".join(f"<li>{value}</li>" for value in normalized)
|
| 270 |
+
icon = ""
|
| 271 |
+
label = html.escape(title)
|
| 272 |
+
parts = title.split(" ", 1)
|
| 273 |
+
if len(parts) == 2 and not parts[0].isalnum():
|
| 274 |
+
icon = html.escape(parts[0])
|
| 275 |
+
label = html.escape(parts[1])
|
| 276 |
+
icon_html = f"<span class='report-card-icon'>{icon}</span>" if icon else ""
|
| 277 |
+
return f"""
|
| 278 |
+
<div class="report-card {css_class}">
|
| 279 |
+
<div class="report-card-header">
|
| 280 |
+
{icon_html}
|
| 281 |
+
<div class="report-card-title">{label}</div>
|
| 282 |
+
</div>
|
| 283 |
+
<div class="report-card-body">
|
| 284 |
+
<ul>{bullets}</ul>
|
| 285 |
+
</div>
|
| 286 |
+
</div>
|
| 287 |
+
"""
|
| 288 |
+
|
| 289 |
+
def _render_chart_card(
|
| 290 |
+
points: list[dict],
|
| 291 |
+
value_key: str,
|
| 292 |
+
title: str,
|
| 293 |
+
color: str,
|
| 294 |
+
value_formatter=None,
|
| 295 |
+
axis_formatter=None,
|
| 296 |
+
) -> str:
|
| 297 |
+
if not points:
|
| 298 |
+
return ""
|
| 299 |
+
series = []
|
| 300 |
+
for pt in points:
|
| 301 |
+
tick = pt.get("tick")
|
| 302 |
+
value = pt.get(value_key)
|
| 303 |
+
if value is None or tick is None:
|
| 304 |
+
continue
|
| 305 |
+
try:
|
| 306 |
+
value = float(value)
|
| 307 |
+
except (TypeError, ValueError):
|
| 308 |
+
continue
|
| 309 |
+
series.append((tick, value, pt))
|
| 310 |
+
if not series:
|
| 311 |
+
return ""
|
| 312 |
+
values = [item[1] for item in series]
|
| 313 |
+
min_v = min(values)
|
| 314 |
+
max_v = max(values)
|
| 315 |
+
span = max(max_v - min_v, 1e-6)
|
| 316 |
+
coords = []
|
| 317 |
+
for idx, (_, value, _) in enumerate(series):
|
| 318 |
+
x = 0 if len(series) == 1 else (idx / (len(series) - 1)) * 100
|
| 319 |
+
if max_v == min_v:
|
| 320 |
+
y = 30
|
| 321 |
+
else:
|
| 322 |
+
y = 60 - ((value - min_v) / span) * 60
|
| 323 |
+
coords.append(f"{x:.2f},{y:.2f}")
|
| 324 |
+
last_tick, last_value, last_point = series[-1]
|
| 325 |
+
display_value = (
|
| 326 |
+
value_formatter(last_value, last_point)
|
| 327 |
+
if value_formatter
|
| 328 |
+
else f"{last_value:.0f}"
|
| 329 |
+
)
|
| 330 |
+
axis_format = axis_formatter or (lambda v: f"{v:.0f}")
|
| 331 |
+
axis_top_label = axis_format(max_v)
|
| 332 |
+
axis_bottom_label = axis_format(min_v)
|
| 333 |
+
first_tick = series[0][0]
|
| 334 |
+
last_coord = coords[-1].split(",")
|
| 335 |
+
return f"""
|
| 336 |
+
<div class="report-chart-card">
|
| 337 |
+
<div class="chart-title">{title}</div>
|
| 338 |
+
<div class="mini-chart">
|
| 339 |
+
<svg viewBox="0 0 100 60" preserveAspectRatio="none">
|
| 340 |
+
<polyline points="{' '.join(coords)}" fill="none" stroke="{color}" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" />
|
| 341 |
+
<circle cx="{last_coord[0]}" cy="{last_coord[1]}" r="2.5" fill="{color}" />
|
| 342 |
+
</svg>
|
| 343 |
+
</div>
|
| 344 |
+
<div class="chart-axis-values">
|
| 345 |
+
<span>{axis_top_label}</span>
|
| 346 |
+
<span>{axis_bottom_label}</span>
|
| 347 |
+
</div>
|
| 348 |
+
<div class="chart-x-axis">
|
| 349 |
+
<span>Tick {int(first_tick)}</span>
|
| 350 |
+
<span>Tick {int(last_tick)}</span>
|
| 351 |
+
</div>
|
| 352 |
+
<div class="chart-meta">
|
| 353 |
+
<span>Tick {int(last_tick)}</span>
|
| 354 |
+
<span class="chart-value" style="color: {color};">{display_value}</span>
|
| 355 |
+
</div>
|
| 356 |
+
</div>
|
| 357 |
+
"""
|
| 358 |
+
|
| 359 |
+
def _render_chart_section(charts_payload) -> str:
|
| 360 |
+
if not charts_payload:
|
| 361 |
+
return ""
|
| 362 |
+
if isinstance(charts_payload, list):
|
| 363 |
+
metric_points = charts_payload
|
| 364 |
+
threat_points = []
|
| 365 |
+
else:
|
| 366 |
+
metric_points = charts_payload.get("metrics") or []
|
| 367 |
+
threat_points = charts_payload.get("threat_levels") or []
|
| 368 |
+
fire_chart = _render_chart_card(
|
| 369 |
+
metric_points,
|
| 370 |
+
"fires",
|
| 371 |
+
"🔥 Fire Load",
|
| 372 |
+
"#f97316",
|
| 373 |
+
lambda v, _: f"{int(round(v))} fires",
|
| 374 |
+
lambda v: f"{int(round(v))} fires",
|
| 375 |
+
)
|
| 376 |
+
unit_chart = _render_chart_card(
|
| 377 |
+
metric_points,
|
| 378 |
+
"units",
|
| 379 |
+
"🚒 Units Deployed",
|
| 380 |
+
"#38bdf8",
|
| 381 |
+
lambda v, pt: f"{int(round(v))}/{int(pt.get('max_units', 0) or 0)} units",
|
| 382 |
+
lambda v: f"{int(round(v))} units",
|
| 383 |
+
)
|
| 384 |
+
integrity_chart = _render_chart_card(
|
| 385 |
+
metric_points,
|
| 386 |
+
"building_integrity",
|
| 387 |
+
"🏢 Building Integrity",
|
| 388 |
+
"#4ade80",
|
| 389 |
+
lambda v, _: f"{max(0, min(1, v)) * 100:.0f}%",
|
| 390 |
+
lambda v: f"{max(0, min(1, v)) * 100:.0f}%",
|
| 391 |
+
)
|
| 392 |
+
threat_chart = _render_threat_chart(threat_points)
|
| 393 |
+
charts = "".join(filter(None, [fire_chart, unit_chart, integrity_chart, threat_chart]))
|
| 394 |
+
if not charts:
|
| 395 |
+
return ""
|
| 396 |
+
return f"""
|
| 397 |
+
<div class="report-chart-grid">
|
| 398 |
+
{charts}
|
| 399 |
+
</div>
|
| 400 |
+
"""
|
| 401 |
+
|
| 402 |
+
def _render_player_actions_block(payload: Optional[dict]) -> str:
|
| 403 |
+
if not payload:
|
| 404 |
+
return ""
|
| 405 |
+
summary = html.escape(payload.get("summary", "") or "Player has not performed any manual actions this run.")
|
| 406 |
+
counts = payload.get("counts") or {}
|
| 407 |
+
chips = []
|
| 408 |
+
chip_meta = [
|
| 409 |
+
("deploy_unit", "🚒 Deploy Units"),
|
| 410 |
+
("remove_unit", "♻️ Remove Units"),
|
| 411 |
+
("add_fire", "🔥 Ignite Fires"),
|
| 412 |
+
]
|
| 413 |
+
for key, label in chip_meta:
|
| 414 |
+
value = counts.get(key, 0) or 0
|
| 415 |
+
try:
|
| 416 |
+
value = int(value)
|
| 417 |
+
except (TypeError, ValueError):
|
| 418 |
+
value = 0
|
| 419 |
+
chip_class = "" if value > 0 else "muted"
|
| 420 |
+
chips.append(f"<span class='player-action-chip {chip_class}'>{label}: {value}</span>")
|
| 421 |
+
chips_html = "".join(chips)
|
| 422 |
+
return f"""
|
| 423 |
+
<div class="player-actions-block">
|
| 424 |
+
<div class="player-actions-header">🙋 Player Manual Actions</div>
|
| 425 |
+
<p class="player-actions-summary">{summary}</p>
|
| 426 |
+
<div class="player-actions-chips">{chips_html}</div>
|
| 427 |
+
</div>
|
| 428 |
+
"""
|
| 429 |
+
|
| 430 |
+
def _render_threat_chart(points: list[dict]) -> str:
|
| 431 |
+
if not points:
|
| 432 |
+
return ""
|
| 433 |
+
threat_map = {"CRITICAL": 4, "HIGH": 3, "MODERATE": 2, "LOW": 1}
|
| 434 |
+
normalized = []
|
| 435 |
+
for entry in points:
|
| 436 |
+
tick = entry.get("tick")
|
| 437 |
+
if tick is None:
|
| 438 |
+
continue
|
| 439 |
+
level = (entry.get("threat_level") or "").upper()
|
| 440 |
+
value = entry.get("value")
|
| 441 |
+
if value is None:
|
| 442 |
+
value = threat_map.get(level, 0)
|
| 443 |
+
normalized.append({
|
| 444 |
+
"tick": tick,
|
| 445 |
+
"threat_value": value,
|
| 446 |
+
"threat_label": entry.get("threat_level", level.title()),
|
| 447 |
+
})
|
| 448 |
+
if not normalized:
|
| 449 |
+
return ""
|
| 450 |
+
reverse_map = {v: k.title() for k, v in threat_map.items()}
|
| 451 |
+
def format_label(val):
|
| 452 |
+
return reverse_map.get(int(round(val)), f"{val:.0f}")
|
| 453 |
+
return _render_chart_card(
|
| 454 |
+
normalized,
|
| 455 |
+
"threat_value",
|
| 456 |
+
"🧭 Threat Level",
|
| 457 |
+
"#c084fc",
|
| 458 |
+
lambda v, pt: pt.get("threat_label", format_label(v)),
|
| 459 |
+
format_label,
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
after_action_status = report_payload.get("status", "idle")
|
| 464 |
+
report = report_payload.get("report", {}) or {}
|
| 465 |
+
|
| 466 |
+
if after_action_status == "pending":
|
| 467 |
+
report_section = """
|
| 468 |
+
<div class="result-report pending">
|
| 469 |
+
<div class="report-spinner"></div>
|
| 470 |
+
<div class="report-text">
|
| 471 |
+
<strong>AI battle report is being generated...</strong>
|
| 472 |
+
<p>The AI is consolidating Stage 4 cycle summaries and mission metrics into the final report.</p>
|
| 473 |
+
</div>
|
| 474 |
+
</div>
|
| 475 |
+
"""
|
| 476 |
+
elif after_action_status == "ready" and report:
|
| 477 |
+
summary = html.escape(report.get("summary", ""))
|
| 478 |
+
charts_html = _render_chart_section(report.get("charts") or [])
|
| 479 |
+
player_actions_html = _render_player_actions_block(report.get("player_actions"))
|
| 480 |
+
strengths = _render_report_cards("✅ What Went Well", report.get("strengths", []), "positive")
|
| 481 |
+
improvements = _render_report_cards("⚠️ Needs Improvement", report.get("improvements", []), "risk")
|
| 482 |
+
actions = _render_report_cards("🛠 Actionable Suggestions", report.get("next_actions", []), "action")
|
| 483 |
+
cards = strengths + improvements + actions
|
| 484 |
+
if not cards:
|
| 485 |
+
cards = "<p class='report-empty'>AI has not provided any concrete items yet.</p>"
|
| 486 |
+
report_section = f"""
|
| 487 |
+
<div class="result-report ready">
|
| 488 |
+
<div class="result-report-header">
|
| 489 |
+
<span>AI Battle Report</span>
|
| 490 |
+
<span class="report-badge">Complete</span>
|
| 491 |
+
</div>
|
| 492 |
+
<p class="report-summary">{summary}</p>
|
| 493 |
+
{charts_html}
|
| 494 |
+
{player_actions_html}
|
| 495 |
+
<div class="report-card-grid">{cards}</div>
|
| 496 |
+
</div>
|
| 497 |
+
"""
|
| 498 |
+
elif after_action_status == "error":
|
| 499 |
+
error_msg = html.escape(report_payload.get("error") or "AI report generation failed.")
|
| 500 |
+
report_section = f"""
|
| 501 |
+
<div class="result-report error">
|
| 502 |
+
<div class="result-report-header">
|
| 503 |
+
<span>AI Battle Report</span>
|
| 504 |
+
<span class="report-badge danger">Error</span>
|
| 505 |
+
</div>
|
| 506 |
+
<p class="report-error">⚠️ {error_msg}</p>
|
| 507 |
+
</div>
|
| 508 |
+
"""
|
| 509 |
+
else:
|
| 510 |
+
report_section = """
|
| 511 |
+
<div class="result-report idle">
|
| 512 |
+
<p>Waiting for the AI to finish the battle analysis...</p>
|
| 513 |
+
</div>
|
| 514 |
+
"""
|
| 515 |
+
|
| 516 |
+
return f"""
|
| 517 |
+
<div class="game-result-overlay" id="result-overlay">
|
| 518 |
+
<div class="game-result-box">
|
| 519 |
+
<div class="result-icon" style="color: {outcome_config['color']};">{outcome_config['icon']}</div>
|
| 520 |
+
<div class="result-title" style="color: {outcome_config['color']};">{outcome_config['title']}</div>
|
| 521 |
+
<div class="result-subtitle">{outcome_config['subtitle']}</div>
|
| 522 |
+
{report_section}
|
| 523 |
+
<button class="result-ok-btn" onclick="document.getElementById('result-overlay').style.display='none'">OK</button>
|
| 524 |
+
</div>
|
| 525 |
+
</div>
|
| 526 |
+
"""
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
# =============================================================================
|
| 530 |
+
# Gradio Event Handlers
|
| 531 |
+
# =============================================================================
|
| 532 |
+
|
| 533 |
+
def _get_combined_advisor_messages(service) -> list[dict]:
|
| 534 |
+
"""Return history + current advisor messages as one list."""
|
| 535 |
+
history = service.get_advisor_history_chat_messages() or []
|
| 536 |
+
current = service.get_advisor_chat_messages() or []
|
| 537 |
+
combined = history + current
|
| 538 |
+
if not combined:
|
| 539 |
+
return [{
|
| 540 |
+
"role": "assistant",
|
| 541 |
+
"content": "No AI analysis yet. Press **Start** to begin the advisor cycle."
|
| 542 |
+
}]
|
| 543 |
+
return combined
|
| 544 |
+
|
| 545 |
+
def start_or_resume_simulation(fire_count: int, fire_intensity: float, building_count: int, max_units: int, seed: Optional[int]):
|
| 546 |
+
"""Handle Start/Resume button click."""
|
| 547 |
+
service = get_service()
|
| 548 |
+
|
| 549 |
+
# Check if there's a paused simulation to resume
|
| 550 |
+
if service.is_paused():
|
| 551 |
+
state = service.resume()
|
| 552 |
+
else:
|
| 553 |
+
# Start new simulation
|
| 554 |
+
_reset_advisor_display_cache()
|
| 555 |
+
actual_seed = int(seed) if seed and seed > 0 else None
|
| 556 |
+
state = service.start(
|
| 557 |
+
seed=actual_seed,
|
| 558 |
+
fire_count=int(fire_count),
|
| 559 |
+
fire_intensity=fire_intensity,
|
| 560 |
+
building_count=int(building_count),
|
| 561 |
+
max_units=int(max_units)
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
# Generate button updates
|
| 565 |
+
updates = get_all_button_updates(state)
|
| 566 |
+
|
| 567 |
+
# Get thinking state
|
| 568 |
+
is_thinking = service.is_thinking()
|
| 569 |
+
thinking_stage = service.get_thinking_stage()
|
| 570 |
+
|
| 571 |
+
return [
|
| 572 |
+
gr.Timer(active=True), # Start the timer
|
| 573 |
+
gr.Timer(active=False), # Ensure report poller is off initially
|
| 574 |
+
render_game_result(state.get("status", ""), state.get("after_action_report")),
|
| 575 |
+
_get_combined_advisor_messages(service),
|
| 576 |
+
service.get_event_log_text(),
|
| 577 |
+
gr.update(interactive=False), # start btn
|
| 578 |
+
gr.update(interactive=True), # pause btn
|
| 579 |
+
render_status_html(state, is_thinking, thinking_stage),
|
| 580 |
+
] + updates
|
| 581 |
+
|
| 582 |
+
|
| 583 |
+
def pause_simulation():
|
| 584 |
+
"""Handle Pause button click."""
|
| 585 |
+
service = get_service()
|
| 586 |
+
state = service.pause()
|
| 587 |
+
|
| 588 |
+
updates = get_all_button_updates(state)
|
| 589 |
+
|
| 590 |
+
return [
|
| 591 |
+
gr.Timer(active=False), # Stop the timer
|
| 592 |
+
gr.Timer(active=False),
|
| 593 |
+
render_game_result(state.get("status", ""), state.get("after_action_report")),
|
| 594 |
+
_get_combined_advisor_messages(service),
|
| 595 |
+
service.get_event_log_text(),
|
| 596 |
+
gr.update(interactive=True), # start btn (can resume)
|
| 597 |
+
gr.update(interactive=False), # pause btn
|
| 598 |
+
render_status_html(state),
|
| 599 |
+
] + updates
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
def reset_simulation(fire_count: int, fire_intensity: float, building_count: int, max_units: int, seed: Optional[int]):
|
| 603 |
+
"""Handle Reset button click."""
|
| 604 |
+
service = get_service()
|
| 605 |
+
_reset_advisor_display_cache()
|
| 606 |
+
actual_seed = int(seed) if seed and seed > 0 else None
|
| 607 |
+
state = service.reset(
|
| 608 |
+
seed=actual_seed,
|
| 609 |
+
fire_count=int(fire_count),
|
| 610 |
+
fire_intensity=fire_intensity,
|
| 611 |
+
building_count=int(building_count),
|
| 612 |
+
max_units=int(max_units)
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
updates = get_all_button_updates(state)
|
| 616 |
+
|
| 617 |
+
return [
|
| 618 |
+
gr.Timer(active=False), # Stop the timer
|
| 619 |
+
gr.Timer(active=False),
|
| 620 |
+
"", # Clear result popup
|
| 621 |
+
_get_combined_advisor_messages(service),
|
| 622 |
+
service.get_event_log_text(),
|
| 623 |
+
gr.update(interactive=True),
|
| 624 |
+
gr.update(interactive=False),
|
| 625 |
+
render_status_html(state),
|
| 626 |
+
] + updates
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def deploy_at_cell(x: int, y: int, selection: str):
|
| 630 |
+
"""Deploy unit or fire at specific cell, or remove if unit already exists there."""
|
| 631 |
+
service = get_service()
|
| 632 |
+
|
| 633 |
+
# Get thinking state
|
| 634 |
+
is_thinking = service.is_thinking()
|
| 635 |
+
thinking_stage = service.get_thinking_stage()
|
| 636 |
+
|
| 637 |
+
# Only allow deployment when simulation is actively running (not paused)
|
| 638 |
+
if not service.is_running():
|
| 639 |
+
gr.Warning("⚠️ Please start the simulation first!")
|
| 640 |
+
state = service.get_state()
|
| 641 |
+
updates = get_all_button_updates(state)
|
| 642 |
+
return [
|
| 643 |
+
render_game_result(state.get("status", ""), state.get("after_action_report")),
|
| 644 |
+
_get_combined_advisor_messages(service),
|
| 645 |
+
service.get_event_log_text(),
|
| 646 |
+
render_status_html(state, is_thinking, thinking_stage),
|
| 647 |
+
] + updates
|
| 648 |
+
|
| 649 |
+
# Handle fire placement
|
| 650 |
+
if selection == "🔥 Fire":
|
| 651 |
+
result = service.add_fire(x, y, intensity=0.5)
|
| 652 |
+
if result.get("status") != "ok":
|
| 653 |
+
error_msg = result.get("message", "Unknown error")
|
| 654 |
+
gr.Warning(f"⚠️ {error_msg}")
|
| 655 |
+
# Check if there's already a unit at this position
|
| 656 |
+
elif service.has_unit_at(x, y):
|
| 657 |
+
# Remove the existing unit
|
| 658 |
+
result = service.remove_unit(x, y)
|
| 659 |
+
else:
|
| 660 |
+
# Deploy new unit
|
| 661 |
+
unit_type_key = "fire_truck" if selection == "🚒 Truck" else "helicopter"
|
| 662 |
+
result = service.deploy_unit(unit_type_key, x, y, "player")
|
| 663 |
+
|
| 664 |
+
# Show warning notification only on failure
|
| 665 |
+
if result.get("status") != "ok":
|
| 666 |
+
error_msg = result.get("message", "Unknown error")
|
| 667 |
+
gr.Warning(f"⚠️ {error_msg}")
|
| 668 |
+
|
| 669 |
+
state = service.get_state()
|
| 670 |
+
updates = get_all_button_updates(state)
|
| 671 |
+
|
| 672 |
+
# Refresh thinking state after action
|
| 673 |
+
is_thinking = service.is_thinking()
|
| 674 |
+
thinking_stage = service.get_thinking_stage()
|
| 675 |
+
|
| 676 |
+
return [
|
| 677 |
+
render_game_result(state.get("status", ""), state.get("after_action_report")),
|
| 678 |
+
_get_combined_advisor_messages(service),
|
| 679 |
+
service.get_event_log_text(),
|
| 680 |
+
render_status_html(state, is_thinking, thinking_stage),
|
| 681 |
+
] + updates
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def poll_after_action_report():
|
| 685 |
+
"""Poll after-action report status independently of the main simulation timer."""
|
| 686 |
+
service = get_service()
|
| 687 |
+
state = service.get_state()
|
| 688 |
+
status = state.get("status", "idle")
|
| 689 |
+
report_payload = state.get("after_action_report")
|
| 690 |
+
after_action_status = (report_payload or {}).get("status", "idle")
|
| 691 |
+
|
| 692 |
+
report_timer_update = gr.update()
|
| 693 |
+
if status not in ["success", "fail"] or after_action_status != "pending":
|
| 694 |
+
report_timer_update = gr.Timer(active=False)
|
| 695 |
+
|
| 696 |
+
return [
|
| 697 |
+
report_timer_update,
|
| 698 |
+
render_game_result(status, report_payload),
|
| 699 |
+
]
|
| 700 |
+
|
| 701 |
+
|
| 702 |
+
def get_all_button_updates(state: dict) -> list:
|
| 703 |
+
"""Generate updates for all grid buttons."""
|
| 704 |
+
updates = []
|
| 705 |
+
for y in range(10):
|
| 706 |
+
for x in range(10):
|
| 707 |
+
content, bg_color = get_cell_info(state, x, y)
|
| 708 |
+
updates.append(gr.update(value=content))
|
| 709 |
+
return updates
|
| 710 |
+
|
| 711 |
+
|
| 712 |
+
# Cache for preventing unnecessary UI updates (flicker prevention)
|
| 713 |
+
_last_status_html = None
|
| 714 |
+
_last_advisor_signature = ()
|
| 715 |
+
_history_messages_cache: list[dict] = []
|
| 716 |
+
_current_cycle_messages_cache: list[dict] = []
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
def _reset_advisor_display_cache():
|
| 720 |
+
"""Clear cached advisor/chatbot renders (prevents stale history after reset)."""
|
| 721 |
+
global _last_advisor_signature, _history_messages_cache, _current_cycle_messages_cache, _last_status_html
|
| 722 |
+
_last_advisor_signature = ()
|
| 723 |
+
_history_messages_cache = []
|
| 724 |
+
_current_cycle_messages_cache = []
|
| 725 |
+
_last_status_html = None
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
def refresh_display():
|
| 729 |
+
"""
|
| 730 |
+
Single timer refresh (every 1 second).
|
| 731 |
+
Uses unified change tracking to update only changed components.
|
| 732 |
+
"""
|
| 733 |
+
service = get_service()
|
| 734 |
+
|
| 735 |
+
# Get all changes in one call
|
| 736 |
+
changes = service.get_changed_components()
|
| 737 |
+
state = changes["state"]
|
| 738 |
+
status = state.get("status", "idle")
|
| 739 |
+
report_payload_state = state.get("after_action_report") or {}
|
| 740 |
+
after_action_status = report_payload_state.get("status", "idle")
|
| 741 |
+
|
| 742 |
+
# Get AI thinking state
|
| 743 |
+
is_thinking = service.is_thinking()
|
| 744 |
+
thinking_stage = service.get_thinking_stage()
|
| 745 |
+
|
| 746 |
+
# Timer control - stop when game ends
|
| 747 |
+
timer_update = gr.update()
|
| 748 |
+
if status in ["success", "fail"]:
|
| 749 |
+
timer_update = gr.Timer(active=False)
|
| 750 |
+
|
| 751 |
+
report_timer_update = gr.Timer(active=False)
|
| 752 |
+
if status in ["success", "fail"] and after_action_status == "pending":
|
| 753 |
+
report_timer_update = gr.Timer(active=True)
|
| 754 |
+
|
| 755 |
+
# Result popup - only when state changes
|
| 756 |
+
if changes["result_changed"]:
|
| 757 |
+
result_state = changes["result_state"]
|
| 758 |
+
payload = changes.get("result_payload") or {}
|
| 759 |
+
report_payload = payload.get("after_action")
|
| 760 |
+
result_popup = render_game_result(result_state, report_payload)
|
| 761 |
+
else:
|
| 762 |
+
result_popup = gr.update()
|
| 763 |
+
|
| 764 |
+
# Advisor display (chatbot) showing combined history + current cycle
|
| 765 |
+
global _last_advisor_signature, _history_messages_cache, _current_cycle_messages_cache
|
| 766 |
+
if changes["history_changed"]:
|
| 767 |
+
history_messages = changes.get("advisor_history")
|
| 768 |
+
if history_messages is not None:
|
| 769 |
+
_history_messages_cache = history_messages
|
| 770 |
+
current_cycle_messages = changes.get("advisor_messages")
|
| 771 |
+
if current_cycle_messages is not None:
|
| 772 |
+
_current_cycle_messages_cache = current_cycle_messages
|
| 773 |
+
combined_messages = (_history_messages_cache or []) + (_current_cycle_messages_cache or [])
|
| 774 |
+
if combined_messages:
|
| 775 |
+
signature = _chat_signature(combined_messages)
|
| 776 |
+
if signature != _last_advisor_signature:
|
| 777 |
+
_last_advisor_signature = signature
|
| 778 |
+
advisor_display = gr.update(value=combined_messages)
|
| 779 |
+
else:
|
| 780 |
+
advisor_display = gr.skip()
|
| 781 |
+
else:
|
| 782 |
+
advisor_display = gr.skip()
|
| 783 |
+
|
| 784 |
+
# Event log - only when content changes
|
| 785 |
+
event_log = changes["event_log"] if changes["event_log_changed"] else gr.update()
|
| 786 |
+
|
| 787 |
+
# Buttons - only when state changes
|
| 788 |
+
if changes["buttons_changed"]:
|
| 789 |
+
start_enabled, pause_enabled = changes["button_states"]
|
| 790 |
+
start_btn_update = gr.update(interactive=start_enabled)
|
| 791 |
+
pause_btn_update = gr.update(interactive=pause_enabled)
|
| 792 |
+
else:
|
| 793 |
+
start_btn_update = gr.update()
|
| 794 |
+
pause_btn_update = gr.update()
|
| 795 |
+
|
| 796 |
+
# Status bar - use cache to prevent flicker (only update when content changes)
|
| 797 |
+
global _last_status_html
|
| 798 |
+
new_status_html = render_status_html(state, is_thinking, thinking_stage)
|
| 799 |
+
if new_status_html == _last_status_html:
|
| 800 |
+
status_html_update = gr.skip()
|
| 801 |
+
else:
|
| 802 |
+
_last_status_html = new_status_html
|
| 803 |
+
status_html_update = new_status_html
|
| 804 |
+
|
| 805 |
+
# Grid buttons - only update if changed
|
| 806 |
+
if changes["grid_changed"]:
|
| 807 |
+
updates = get_all_button_updates(state)
|
| 808 |
+
else:
|
| 809 |
+
updates = [gr.update() for _ in range(100)]
|
| 810 |
+
|
| 811 |
+
return [
|
| 812 |
+
timer_update,
|
| 813 |
+
report_timer_update,
|
| 814 |
+
result_popup,
|
| 815 |
+
advisor_display,
|
| 816 |
+
event_log,
|
| 817 |
+
start_btn_update,
|
| 818 |
+
pause_btn_update,
|
| 819 |
+
status_html_update,
|
| 820 |
+
] + updates
|
| 821 |
+
|
| 822 |
+
|
| 823 |
+
# =============================================================================
|
| 824 |
+
# Gradio UI Definition
|
| 825 |
+
# =============================================================================
|
| 826 |
+
|
| 827 |
+
CUSTOM_CSS = """
|
| 828 |
+
.advisor-chatbot {
|
| 829 |
+
border: 1px solid var(--progress-bg);
|
| 830 |
+
border-radius: 12px;
|
| 831 |
+
background: var(--status-bg);
|
| 832 |
+
padding: 4px;
|
| 833 |
+
}
|
| 834 |
+
|
| 835 |
+
.advisor-chatbot .wrap {
|
| 836 |
+
background: transparent !important;
|
| 837 |
+
}
|
| 838 |
+
|
| 839 |
+
.advisor-chatbot .top-panel,
|
| 840 |
+
.advisor-chatbot .icon-button-wrapper {
|
| 841 |
+
display: none !important;
|
| 842 |
+
}
|
| 843 |
+
|
| 844 |
+
.advisor-chatbot .message {
|
| 845 |
+
font-family: "JetBrains Mono", "SFMono-Regular", ui-monospace, monospace !important;
|
| 846 |
+
font-size: 13px !important;
|
| 847 |
+
line-height: 1.5 !important;
|
| 848 |
+
color: var(--status-text);
|
| 849 |
+
}
|
| 850 |
+
|
| 851 |
+
.advisor-chatbot .message .metadata-title {
|
| 852 |
+
font-weight: 600;
|
| 853 |
+
}
|
| 854 |
+
|
| 855 |
+
/* Theme-aware CSS variables */
|
| 856 |
+
:root {
|
| 857 |
+
--status-bg: #f8fafc;
|
| 858 |
+
--status-text: #1e293b;
|
| 859 |
+
--status-muted: #64748b;
|
| 860 |
+
--progress-bg: #e2e8f0;
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
/* Dark mode overrides */
|
| 864 |
+
.dark {
|
| 865 |
+
--status-bg: #1a1a2e;
|
| 866 |
+
--status-text: #ffffff;
|
| 867 |
+
--status-muted: #94a3b8;
|
| 868 |
+
--progress-bg: #333333;
|
| 869 |
+
}
|
| 870 |
+
|
| 871 |
+
/* Fixed size grid buttons - force square shape */
|
| 872 |
+
.grid-btn {
|
| 873 |
+
width: 52px !important;
|
| 874 |
+
height: 52px !important;
|
| 875 |
+
min-width: 52px !important;
|
| 876 |
+
min-height: 52px !important;
|
| 877 |
+
max-width: 52px !important;
|
| 878 |
+
max-height: 52px !important;
|
| 879 |
+
padding: 0 !important;
|
| 880 |
+
font-size: 16px !important;
|
| 881 |
+
border-radius: 5px !important;
|
| 882 |
+
margin: 1px !important;
|
| 883 |
+
flex-shrink: 0 !important;
|
| 884 |
+
flex-grow: 0 !important;
|
| 885 |
+
line-height: 52px !important;
|
| 886 |
+
box-sizing: border-box !important;
|
| 887 |
+
font-weight: bold !important;
|
| 888 |
+
text-shadow: 1px 1px 2px rgba(0,0,0,0.5) !important;
|
| 889 |
+
}
|
| 890 |
+
|
| 891 |
+
/* Coordinate label - same size as grid-btn but transparent */
|
| 892 |
+
.coord-label {
|
| 893 |
+
width: 52px !important;
|
| 894 |
+
height: 52px !important;
|
| 895 |
+
min-width: 52px !important;
|
| 896 |
+
min-height: 52px !important;
|
| 897 |
+
max-width: 52px !important;
|
| 898 |
+
max-height: 52px !important;
|
| 899 |
+
padding: 0 !important;
|
| 900 |
+
font-size: 14px !important;
|
| 901 |
+
font-weight: bold !important;
|
| 902 |
+
border-radius: 5px !important;
|
| 903 |
+
margin: 1px !important;
|
| 904 |
+
flex-shrink: 0 !important;
|
| 905 |
+
flex-grow: 0 !important;
|
| 906 |
+
line-height: 52px !important;
|
| 907 |
+
box-sizing: border-box !important;
|
| 908 |
+
background: transparent !important;
|
| 909 |
+
border: none !important;
|
| 910 |
+
color: #888 !important;
|
| 911 |
+
cursor: default !important;
|
| 912 |
+
pointer-events: none !important;
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
/* Fixed row layout - prevent wrapping, tight spacing */
|
| 916 |
+
.grid-row {
|
| 917 |
+
display: flex !important;
|
| 918 |
+
flex-wrap: nowrap !important;
|
| 919 |
+
gap: 0 !important;
|
| 920 |
+
align-items: center !important;
|
| 921 |
+
margin: 0 !important;
|
| 922 |
+
padding: 0 !important;
|
| 923 |
+
min-height: 0 !important;
|
| 924 |
+
line-height: 1 !important;
|
| 925 |
+
}
|
| 926 |
+
|
| 927 |
+
/* Override Gradio's default row gap */
|
| 928 |
+
.grid-row.row {
|
| 929 |
+
gap: 0 !important;
|
| 930 |
+
}
|
| 931 |
+
|
| 932 |
+
.grid-row > div {
|
| 933 |
+
flex-shrink: 0 !important;
|
| 934 |
+
flex-grow: 0 !important;
|
| 935 |
+
margin: 0 !important;
|
| 936 |
+
padding: 0 !important;
|
| 937 |
+
}
|
| 938 |
+
|
| 939 |
+
/* Ensure button containers have no extra spacing */
|
| 940 |
+
.grid-row > div > button {
|
| 941 |
+
margin: 1px !important;
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
/* Override Gradio column gap for grid container */
|
| 945 |
+
.grid-container {
|
| 946 |
+
gap: 0 !important;
|
| 947 |
+
row-gap: 0 !important;
|
| 948 |
+
column-gap: 0 !important;
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
.grid-container.column {
|
| 952 |
+
gap: 0 !important;
|
| 953 |
+
row-gap: 0 !important;
|
| 954 |
+
column-gap: 0 !important;
|
| 955 |
+
}
|
| 956 |
+
|
| 957 |
+
/* Target any parent column that contains grid-row */
|
| 958 |
+
div.column:has(.grid-row) {
|
| 959 |
+
gap: 0 !important;
|
| 960 |
+
row-gap: 0 !important;
|
| 961 |
+
column-gap: 0 !important;
|
| 962 |
+
}
|
| 963 |
+
|
| 964 |
+
.log-box textarea {
|
| 965 |
+
font-family: "JetBrains Mono", "SFMono-Regular", ui-monospace, monospace !important;
|
| 966 |
+
font-size: 13px !important;
|
| 967 |
+
}
|
| 968 |
+
|
| 969 |
+
/* Theme-aware legend box */
|
| 970 |
+
.legend-box {
|
| 971 |
+
background: var(--status-bg);
|
| 972 |
+
padding: 10px;
|
| 973 |
+
border-radius: 8px;
|
| 974 |
+
color: var(--status-text);
|
| 975 |
+
border: 1px solid var(--progress-bg);
|
| 976 |
+
}
|
| 977 |
+
|
| 978 |
+
.status-panel {
|
| 979 |
+
background: var(--status-bg);
|
| 980 |
+
border: 1px solid var(--progress-bg);
|
| 981 |
+
border-radius: 12px;
|
| 982 |
+
padding: 12px;
|
| 983 |
+
color: var(--status-text);
|
| 984 |
+
}
|
| 985 |
+
|
| 986 |
+
.status-panel .status-compact {
|
| 987 |
+
margin: 0;
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
/* Subtitle styling */
|
| 991 |
+
.subtitle {
|
| 992 |
+
color: var(--status-muted) !important;
|
| 993 |
+
font-size: 14px !important;
|
| 994 |
+
margin-top: -8px !important;
|
| 995 |
+
margin-bottom: 12px !important;
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
/* AI Advisor highlight styling */
|
| 999 |
+
.ai-advisor-panel {
|
| 1000 |
+
background: linear-gradient(135deg, rgba(59, 130, 246, 0.1), rgba(147, 51, 234, 0.1));
|
| 1001 |
+
border: 2px solid rgba(99, 102, 241, 0.3);
|
| 1002 |
+
border-radius: 12px;
|
| 1003 |
+
padding: 16px;
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
.dark .ai-advisor-panel {
|
| 1007 |
+
background: linear-gradient(135deg, rgba(59, 130, 246, 0.15), rgba(147, 51, 234, 0.15));
|
| 1008 |
+
border-color: rgba(99, 102, 241, 0.4);
|
| 1009 |
+
}
|
| 1010 |
+
|
| 1011 |
+
/* Accordion styling - more compact */
|
| 1012 |
+
.gradio-accordion {
|
| 1013 |
+
margin-bottom: 8px !important;
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
/* Control buttons row spacing */
|
| 1017 |
+
.control-buttons-row {
|
| 1018 |
+
margin-top: 16px !important;
|
| 1019 |
+
}
|
| 1020 |
+
|
| 1021 |
+
/* How to play inline styling */
|
| 1022 |
+
.how-to-play {
|
| 1023 |
+
background: var(--status-bg);
|
| 1024 |
+
border: 1px solid var(--progress-bg);
|
| 1025 |
+
border-radius: 8px;
|
| 1026 |
+
padding: 12px 16px !important;
|
| 1027 |
+
margin: 8px 0 !important;
|
| 1028 |
+
font-size: 14px;
|
| 1029 |
+
}
|
| 1030 |
+
|
| 1031 |
+
.how-to-play p {
|
| 1032 |
+
margin: 4px 0 !important;
|
| 1033 |
+
}
|
| 1034 |
+
|
| 1035 |
+
/* Main heading styling */
|
| 1036 |
+
h1 {
|
| 1037 |
+
margin-bottom: 4px !important;
|
| 1038 |
+
}
|
| 1039 |
+
|
| 1040 |
+
h2 {
|
| 1041 |
+
margin-top: 12px !important;
|
| 1042 |
+
margin-bottom: 8px !important;
|
| 1043 |
+
}
|
| 1044 |
+
|
| 1045 |
+
/* Section spacing */
|
| 1046 |
+
.section-gap {
|
| 1047 |
+
margin-top: 16px !important;
|
| 1048 |
+
}
|
| 1049 |
+
|
| 1050 |
+
/* Compact status bar */
|
| 1051 |
+
.status-bar {
|
| 1052 |
+
display: flex;
|
| 1053 |
+
flex-wrap: wrap;
|
| 1054 |
+
gap: 12px;
|
| 1055 |
+
padding: 10px 16px;
|
| 1056 |
+
background: var(--status-bg);
|
| 1057 |
+
border-radius: 8px;
|
| 1058 |
+
font-size: 14px;
|
| 1059 |
+
align-items: center;
|
| 1060 |
+
border: 1px solid var(--progress-bg);
|
| 1061 |
+
}
|
| 1062 |
+
|
| 1063 |
+
.status-bar .status-item {
|
| 1064 |
+
display: flex;
|
| 1065 |
+
align-items: center;
|
| 1066 |
+
gap: 6px;
|
| 1067 |
+
}
|
| 1068 |
+
|
| 1069 |
+
.status-bar .status-badge {
|
| 1070 |
+
padding: 2px 8px;
|
| 1071 |
+
border-radius: 4px;
|
| 1072 |
+
font-weight: bold;
|
| 1073 |
+
font-size: 12px;
|
| 1074 |
+
}
|
| 1075 |
+
|
| 1076 |
+
.status-bar .progress-mini {
|
| 1077 |
+
width: 80px;
|
| 1078 |
+
height: 6px;
|
| 1079 |
+
background: var(--progress-bg);
|
| 1080 |
+
border-radius: 3px;
|
| 1081 |
+
overflow: hidden;
|
| 1082 |
+
}
|
| 1083 |
+
|
| 1084 |
+
/* Compact status bar */
|
| 1085 |
+
.status-compact {
|
| 1086 |
+
display: flex;
|
| 1087 |
+
flex-wrap: wrap;
|
| 1088 |
+
gap: 12px;
|
| 1089 |
+
padding: 8px 14px;
|
| 1090 |
+
background: var(--status-bg);
|
| 1091 |
+
border-radius: 8px;
|
| 1092 |
+
font-size: 14px;
|
| 1093 |
+
align-items: center;
|
| 1094 |
+
border: 1px solid var(--progress-bg);
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
.status-compact .status-item {
|
| 1098 |
+
display: flex;
|
| 1099 |
+
align-items: center;
|
| 1100 |
+
gap: 5px;
|
| 1101 |
+
}
|
| 1102 |
+
|
| 1103 |
+
.status-compact .status-badge {
|
| 1104 |
+
padding: 2px 8px;
|
| 1105 |
+
border-radius: 4px;
|
| 1106 |
+
font-weight: bold;
|
| 1107 |
+
font-size: 11px;
|
| 1108 |
+
color: #000;
|
| 1109 |
+
}
|
| 1110 |
+
|
| 1111 |
+
.status-compact .status-health {
|
| 1112 |
+
flex: 1;
|
| 1113 |
+
min-width: 120px;
|
| 1114 |
+
}
|
| 1115 |
+
|
| 1116 |
+
.status-compact .progress-mini {
|
| 1117 |
+
flex: 1;
|
| 1118 |
+
min-width: 60px;
|
| 1119 |
+
max-width: 100px;
|
| 1120 |
+
height: 8px;
|
| 1121 |
+
background: var(--progress-bg);
|
| 1122 |
+
border-radius: 4px;
|
| 1123 |
+
overflow: hidden;
|
| 1124 |
+
}
|
| 1125 |
+
|
| 1126 |
+
/* AI Thinking indicator in status bar */
|
| 1127 |
+
.status-compact .ai-thinking-inline {
|
| 1128 |
+
background: linear-gradient(135deg, rgba(99, 102, 241, 0.2), rgba(147, 51, 234, 0.2));
|
| 1129 |
+
padding: 4px 10px;
|
| 1130 |
+
border-radius: 6px;
|
| 1131 |
+
border: 1px solid rgba(99, 102, 241, 0.4);
|
| 1132 |
+
animation: ai-glow 1.5s ease-in-out infinite;
|
| 1133 |
+
}
|
| 1134 |
+
|
| 1135 |
+
.status-compact .ai-label {
|
| 1136 |
+
font-weight: bold;
|
| 1137 |
+
margin-right: 4px;
|
| 1138 |
+
color: #a78bfa;
|
| 1139 |
+
}
|
| 1140 |
+
|
| 1141 |
+
.status-compact .ai-pulse {
|
| 1142 |
+
animation: pulse-icon 1s ease-in-out infinite;
|
| 1143 |
+
}
|
| 1144 |
+
|
| 1145 |
+
@keyframes ai-glow {
|
| 1146 |
+
0%, 100% { box-shadow: 0 0 5px rgba(99, 102, 241, 0.3); }
|
| 1147 |
+
50% { box-shadow: 0 0 15px rgba(99, 102, 241, 0.6); }
|
| 1148 |
+
}
|
| 1149 |
+
|
| 1150 |
+
@keyframes pulse-icon {
|
| 1151 |
+
0%, 100% { transform: scale(1); }
|
| 1152 |
+
50% { transform: scale(1.2); }
|
| 1153 |
+
}
|
| 1154 |
+
|
| 1155 |
+
/* Win/Lose overlay */
|
| 1156 |
+
.game-result-overlay {
|
| 1157 |
+
position: fixed;
|
| 1158 |
+
top: 0;
|
| 1159 |
+
left: 0;
|
| 1160 |
+
width: 100%;
|
| 1161 |
+
height: 100%;
|
| 1162 |
+
display: flex;
|
| 1163 |
+
justify-content: center;
|
| 1164 |
+
align-items: center;
|
| 1165 |
+
background: rgba(0,0,0,0.7);
|
| 1166 |
+
z-index: 9999;
|
| 1167 |
+
}
|
| 1168 |
+
|
| 1169 |
+
.game-result-box {
|
| 1170 |
+
background: var(--status-bg);
|
| 1171 |
+
padding: 32px 40px;
|
| 1172 |
+
border-radius: 20px;
|
| 1173 |
+
text-align: center;
|
| 1174 |
+
animation: popup 0.3s ease-out;
|
| 1175 |
+
width: min(90%, 90%);
|
| 1176 |
+
max-height: 90vh;
|
| 1177 |
+
overflow-y: auto;
|
| 1178 |
+
box-shadow: 0 30px 80px rgba(0, 0, 0, 0.45);
|
| 1179 |
+
}
|
| 1180 |
+
|
| 1181 |
+
.result-icon {
|
| 1182 |
+
font-size: 60px;
|
| 1183 |
+
margin-bottom: 12px;
|
| 1184 |
+
}
|
| 1185 |
+
|
| 1186 |
+
.result-title {
|
| 1187 |
+
font-size: 36px;
|
| 1188 |
+
font-weight: 800;
|
| 1189 |
+
letter-spacing: 1px;
|
| 1190 |
+
}
|
| 1191 |
+
|
| 1192 |
+
.result-subtitle {
|
| 1193 |
+
font-size: 18px;
|
| 1194 |
+
color: var(--status-muted);
|
| 1195 |
+
margin-bottom: 24px;
|
| 1196 |
+
}
|
| 1197 |
+
|
| 1198 |
+
.result-report {
|
| 1199 |
+
background: rgba(15, 23, 42, 0.6);
|
| 1200 |
+
border: 1px solid rgba(255, 255, 255, 0.08);
|
| 1201 |
+
border-radius: 16px;
|
| 1202 |
+
padding: 20px 24px;
|
| 1203 |
+
margin-bottom: 24px;
|
| 1204 |
+
text-align: left;
|
| 1205 |
+
}
|
| 1206 |
+
|
| 1207 |
+
.result-report-header {
|
| 1208 |
+
display: flex;
|
| 1209 |
+
justify-content: space-between;
|
| 1210 |
+
align-items: center;
|
| 1211 |
+
font-weight: 600;
|
| 1212 |
+
margin-bottom: 12px;
|
| 1213 |
+
}
|
| 1214 |
+
|
| 1215 |
+
.report-badge {
|
| 1216 |
+
font-size: 12px;
|
| 1217 |
+
text-transform: uppercase;
|
| 1218 |
+
letter-spacing: 0.5px;
|
| 1219 |
+
padding: 4px 10px;
|
| 1220 |
+
border-radius: 999px;
|
| 1221 |
+
background: rgba(56, 189, 248, 0.15);
|
| 1222 |
+
color: #22d3ee;
|
| 1223 |
+
border: 1px solid rgba(34, 211, 238, 0.3);
|
| 1224 |
+
}
|
| 1225 |
+
|
| 1226 |
+
.report-badge.danger {
|
| 1227 |
+
background: rgba(248, 113, 113, 0.15);
|
| 1228 |
+
color: #f87171;
|
| 1229 |
+
border-color: rgba(248, 113, 113, 0.4);
|
| 1230 |
+
}
|
| 1231 |
+
|
| 1232 |
+
.result-report.ready {
|
| 1233 |
+
background: rgba(23, 27, 45, 0.9);
|
| 1234 |
+
}
|
| 1235 |
+
|
| 1236 |
+
.result-report.pending,
|
| 1237 |
+
.result-report.error {
|
| 1238 |
+
display: flex;
|
| 1239 |
+
align-items: center;
|
| 1240 |
+
gap: 16px;
|
| 1241 |
+
}
|
| 1242 |
+
|
| 1243 |
+
.report-spinner {
|
| 1244 |
+
width: 32px;
|
| 1245 |
+
height: 32px;
|
| 1246 |
+
border: 3px solid rgba(255, 255, 255, 0.2);
|
| 1247 |
+
border-top-color: #22d3ee;
|
| 1248 |
+
border-radius: 50%;
|
| 1249 |
+
animation: report-spin 0.8s linear infinite;
|
| 1250 |
+
}
|
| 1251 |
+
|
| 1252 |
+
@keyframes report-spin {
|
| 1253 |
+
to { transform: rotate(360deg); }
|
| 1254 |
+
}
|
| 1255 |
+
|
| 1256 |
+
.report-summary {
|
| 1257 |
+
font-size: 16px;
|
| 1258 |
+
margin-bottom: 16px;
|
| 1259 |
+
color: var(--status-text);
|
| 1260 |
+
}
|
| 1261 |
+
|
| 1262 |
+
.player-actions-block {
|
| 1263 |
+
background: rgba(15, 23, 42, 0.75);
|
| 1264 |
+
border: 1px dashed rgba(248, 250, 252, 0.2);
|
| 1265 |
+
border-radius: 14px;
|
| 1266 |
+
padding: 16px 18px;
|
| 1267 |
+
margin-bottom: 18px;
|
| 1268 |
+
}
|
| 1269 |
+
|
| 1270 |
+
.player-actions-header {
|
| 1271 |
+
font-weight: 700;
|
| 1272 |
+
letter-spacing: 0.08em;
|
| 1273 |
+
text-transform: uppercase;
|
| 1274 |
+
font-size: 13px;
|
| 1275 |
+
color: #facc15;
|
| 1276 |
+
margin-bottom: 6px;
|
| 1277 |
+
}
|
| 1278 |
+
|
| 1279 |
+
.player-actions-summary {
|
| 1280 |
+
margin: 0 0 12px 0;
|
| 1281 |
+
color: var(--status-text);
|
| 1282 |
+
font-size: 14px;
|
| 1283 |
+
}
|
| 1284 |
+
|
| 1285 |
+
.player-actions-chips {
|
| 1286 |
+
display: flex;
|
| 1287 |
+
flex-wrap: wrap;
|
| 1288 |
+
gap: 8px;
|
| 1289 |
+
margin-bottom: 12px;
|
| 1290 |
+
}
|
| 1291 |
+
|
| 1292 |
+
.player-action-chip {
|
| 1293 |
+
padding: 4px 10px;
|
| 1294 |
+
border-radius: 999px;
|
| 1295 |
+
border: 1px solid rgba(248, 113, 113, 0.35);
|
| 1296 |
+
background: rgba(248, 113, 113, 0.08);
|
| 1297 |
+
font-size: 12px;
|
| 1298 |
+
color: #fed7aa;
|
| 1299 |
+
letter-spacing: 0.03em;
|
| 1300 |
+
}
|
| 1301 |
+
|
| 1302 |
+
.player-action-chip.muted {
|
| 1303 |
+
opacity: 0.4;
|
| 1304 |
+
}
|
| 1305 |
+
|
| 1306 |
+
.player-actions-list {
|
| 1307 |
+
list-style: none;
|
| 1308 |
+
margin: 0;
|
| 1309 |
+
padding: 0;
|
| 1310 |
+
display: flex;
|
| 1311 |
+
flex-direction: column;
|
| 1312 |
+
gap: 8px;
|
| 1313 |
+
}
|
| 1314 |
+
|
| 1315 |
+
.player-actions-list li {
|
| 1316 |
+
display: flex;
|
| 1317 |
+
justify-content: space-between;
|
| 1318 |
+
gap: 12px;
|
| 1319 |
+
padding: 8px 12px;
|
| 1320 |
+
border-radius: 10px;
|
| 1321 |
+
border: 1px solid rgba(148, 163, 184, 0.2);
|
| 1322 |
+
background: rgba(15, 23, 42, 0.9);
|
| 1323 |
+
font-size: 13px;
|
| 1324 |
+
}
|
| 1325 |
+
|
| 1326 |
+
.player-actions-list .action-tick {
|
| 1327 |
+
font-weight: 600;
|
| 1328 |
+
color: #38bdf8;
|
| 1329 |
+
}
|
| 1330 |
+
|
| 1331 |
+
.player-actions-list .action-desc {
|
| 1332 |
+
flex: 1;
|
| 1333 |
+
text-align: right;
|
| 1334 |
+
color: #e2e8f0;
|
| 1335 |
+
}
|
| 1336 |
+
|
| 1337 |
+
.player-actions-empty {
|
| 1338 |
+
margin: 0;
|
| 1339 |
+
font-size: 13px;
|
| 1340 |
+
color: var(--status-muted);
|
| 1341 |
+
text-align: center;
|
| 1342 |
+
}
|
| 1343 |
+
|
| 1344 |
+
.report-card-grid {
|
| 1345 |
+
display: grid;
|
| 1346 |
+
grid-template-columns: repeat(auto-fit, minmax(260px, 1fr));
|
| 1347 |
+
gap: 18px;
|
| 1348 |
+
}
|
| 1349 |
+
|
| 1350 |
+
@media (min-width: 1100px) {
|
| 1351 |
+
.report-card-grid {
|
| 1352 |
+
grid-template-columns: repeat(3, minmax(0, 1fr));
|
| 1353 |
+
}
|
| 1354 |
+
}
|
| 1355 |
+
|
| 1356 |
+
.report-card {
|
| 1357 |
+
background: rgba(15, 23, 42, 0.8);
|
| 1358 |
+
border-radius: 16px;
|
| 1359 |
+
padding: 18px;
|
| 1360 |
+
border: 1px solid rgba(255, 255, 255, 0.08);
|
| 1361 |
+
display: flex;
|
| 1362 |
+
flex-direction: column;
|
| 1363 |
+
gap: 12px;
|
| 1364 |
+
min-height: 220px;
|
| 1365 |
+
}
|
| 1366 |
+
|
| 1367 |
+
.report-card-header {
|
| 1368 |
+
display: flex;
|
| 1369 |
+
align-items: center;
|
| 1370 |
+
gap: 12px;
|
| 1371 |
+
text-transform: uppercase;
|
| 1372 |
+
letter-spacing: 0.08em;
|
| 1373 |
+
font-size: 13px;
|
| 1374 |
+
font-weight: 700;
|
| 1375 |
+
color: #e2e8f0;
|
| 1376 |
+
}
|
| 1377 |
+
|
| 1378 |
+
.report-card-icon {
|
| 1379 |
+
font-size: 22px;
|
| 1380 |
+
line-height: 1;
|
| 1381 |
+
}
|
| 1382 |
+
|
| 1383 |
+
.report-card-title {
|
| 1384 |
+
flex: 1;
|
| 1385 |
+
}
|
| 1386 |
+
|
| 1387 |
+
.report-card-body {
|
| 1388 |
+
flex: 1;
|
| 1389 |
+
}
|
| 1390 |
+
|
| 1391 |
+
.report-card ul {
|
| 1392 |
+
list-style: disc;
|
| 1393 |
+
margin: 0 0 0 18px;
|
| 1394 |
+
padding: 0;
|
| 1395 |
+
color: var(--status-text);
|
| 1396 |
+
line-height: 1.45;
|
| 1397 |
+
}
|
| 1398 |
+
|
| 1399 |
+
.report-card ul li {
|
| 1400 |
+
margin-bottom: 6px;
|
| 1401 |
+
}
|
| 1402 |
+
|
| 1403 |
+
.report-chart-grid {
|
| 1404 |
+
margin-top: 20px;
|
| 1405 |
+
display: grid;
|
| 1406 |
+
grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
|
| 1407 |
+
gap: 16px;
|
| 1408 |
+
}
|
| 1409 |
+
|
| 1410 |
+
.report-chart-card {
|
| 1411 |
+
background: rgba(13, 20, 35, 0.85);
|
| 1412 |
+
border-radius: 14px;
|
| 1413 |
+
padding: 14px 16px 18px;
|
| 1414 |
+
border: 1px solid rgba(255, 255, 255, 0.08);
|
| 1415 |
+
display: flex;
|
| 1416 |
+
flex-direction: column;
|
| 1417 |
+
gap: 8px;
|
| 1418 |
+
}
|
| 1419 |
+
|
| 1420 |
+
.mini-chart {
|
| 1421 |
+
width: 100%;
|
| 1422 |
+
height: 80px;
|
| 1423 |
+
}
|
| 1424 |
+
|
| 1425 |
+
.mini-chart svg {
|
| 1426 |
+
width: 100%;
|
| 1427 |
+
height: 80px;
|
| 1428 |
+
}
|
| 1429 |
+
|
| 1430 |
+
.chart-title {
|
| 1431 |
+
font-size: 13px;
|
| 1432 |
+
font-weight: 600;
|
| 1433 |
+
color: #cbd5f5;
|
| 1434 |
+
letter-spacing: 0.04em;
|
| 1435 |
+
text-transform: uppercase;
|
| 1436 |
+
}
|
| 1437 |
+
|
| 1438 |
+
.chart-meta {
|
| 1439 |
+
display: flex;
|
| 1440 |
+
justify-content: space-between;
|
| 1441 |
+
font-size: 12px;
|
| 1442 |
+
color: var(--status-muted);
|
| 1443 |
+
}
|
| 1444 |
+
|
| 1445 |
+
.chart-value {
|
| 1446 |
+
font-weight: 700;
|
| 1447 |
+
}
|
| 1448 |
+
|
| 1449 |
+
.chart-axis-values,
|
| 1450 |
+
.chart-x-axis {
|
| 1451 |
+
display: flex;
|
| 1452 |
+
justify-content: space-between;
|
| 1453 |
+
font-size: 11px;
|
| 1454 |
+
color: var(--status-muted);
|
| 1455 |
+
}
|
| 1456 |
+
|
| 1457 |
+
.chart-axis-values {
|
| 1458 |
+
margin-top: 4px;
|
| 1459 |
+
}
|
| 1460 |
+
|
| 1461 |
+
.chart-x-axis {
|
| 1462 |
+
margin-top: 6px;
|
| 1463 |
+
}
|
| 1464 |
+
|
| 1465 |
+
|
| 1466 |
+
.report-card.positive {
|
| 1467 |
+
border-color: rgba(34, 197, 94, 0.3);
|
| 1468 |
+
}
|
| 1469 |
+
|
| 1470 |
+
.report-card.risk {
|
| 1471 |
+
border-color: rgba(248, 113, 113, 0.35);
|
| 1472 |
+
}
|
| 1473 |
+
|
| 1474 |
+
.report-card.action {
|
| 1475 |
+
border-color: rgba(59, 130, 246, 0.35);
|
| 1476 |
+
}
|
| 1477 |
+
|
| 1478 |
+
.report-error {
|
| 1479 |
+
color: #f87171;
|
| 1480 |
+
margin: 0;
|
| 1481 |
+
}
|
| 1482 |
+
|
| 1483 |
+
.report-empty {
|
| 1484 |
+
color: var(--status-muted);
|
| 1485 |
+
margin: 0;
|
| 1486 |
+
}
|
| 1487 |
+
|
| 1488 |
+
.result-ok-btn {
|
| 1489 |
+
width: 100%;
|
| 1490 |
+
padding: 14px 0;
|
| 1491 |
+
border: none;
|
| 1492 |
+
border-radius: 999px;
|
| 1493 |
+
background: linear-gradient(90deg, #22d3ee, #3b82f6);
|
| 1494 |
+
color: #0f172a;
|
| 1495 |
+
font-weight: 700;
|
| 1496 |
+
font-size: 16px;
|
| 1497 |
+
cursor: pointer;
|
| 1498 |
+
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
| 1499 |
+
}
|
| 1500 |
+
|
| 1501 |
+
.result-ok-btn:hover {
|
| 1502 |
+
transform: translateY(-1px);
|
| 1503 |
+
box-shadow: 0 10px 25px rgba(34, 211, 238, 0.3);
|
| 1504 |
+
}
|
| 1505 |
+
|
| 1506 |
+
.report-text p {
|
| 1507 |
+
margin: 4px 0 0 0;
|
| 1508 |
+
color: var(--status-muted);
|
| 1509 |
+
}
|
| 1510 |
+
|
| 1511 |
+
@keyframes popup {
|
| 1512 |
+
from { transform: scale(0.8); opacity: 0; }
|
| 1513 |
+
to { transform: scale(1); opacity: 1; }
|
| 1514 |
+
}
|
| 1515 |
+
|
| 1516 |
+
/* AI Thinking indicator animation */
|
| 1517 |
+
.ai-thinking {
|
| 1518 |
+
background: linear-gradient(90deg, rgba(99, 102, 241, 0.2), rgba(147, 51, 234, 0.2));
|
| 1519 |
+
border-radius: 6px;
|
| 1520 |
+
padding: 4px 10px !important;
|
| 1521 |
+
animation: thinking-pulse 1.5s ease-in-out infinite;
|
| 1522 |
+
}
|
| 1523 |
+
|
| 1524 |
+
.thinking-indicator {
|
| 1525 |
+
color: #a855f7;
|
| 1526 |
+
font-weight: bold;
|
| 1527 |
+
font-size: 13px;
|
| 1528 |
+
}
|
| 1529 |
+
|
| 1530 |
+
.dark .thinking-indicator {
|
| 1531 |
+
color: #c084fc;
|
| 1532 |
+
}
|
| 1533 |
+
|
| 1534 |
+
@keyframes thinking-pulse {
|
| 1535 |
+
0%, 100% { opacity: 1; background: linear-gradient(90deg, rgba(99, 102, 241, 0.2), rgba(147, 51, 234, 0.2)); }
|
| 1536 |
+
50% { opacity: 0.7; background: linear-gradient(90deg, rgba(99, 102, 241, 0.4), rgba(147, 51, 234, 0.4)); }
|
| 1537 |
+
}
|
| 1538 |
+
|
| 1539 |
+
/* iOS-style AI Thinking Toast */
|
| 1540 |
+
.ai-toast-container {
|
| 1541 |
+
position: absolute;
|
| 1542 |
+
top: 10px;
|
| 1543 |
+
left: 50%;
|
| 1544 |
+
transform: translateX(-50%);
|
| 1545 |
+
z-index: 100;
|
| 1546 |
+
animation: toast-slide-in 0.3s ease-out;
|
| 1547 |
+
}
|
| 1548 |
+
|
| 1549 |
+
.ai-toast {
|
| 1550 |
+
display: flex;
|
| 1551 |
+
align-items: center;
|
| 1552 |
+
gap: 12px;
|
| 1553 |
+
background: rgba(30, 30, 40, 0.95);
|
| 1554 |
+
backdrop-filter: blur(20px);
|
| 1555 |
+
-webkit-backdrop-filter: blur(20px);
|
| 1556 |
+
border-radius: 16px;
|
| 1557 |
+
padding: 12px 18px;
|
| 1558 |
+
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.3),
|
| 1559 |
+
0 0 0 1px rgba(255, 255, 255, 0.1);
|
| 1560 |
+
min-width: 280px;
|
| 1561 |
+
}
|
| 1562 |
+
|
| 1563 |
+
.ai-toast-icon {
|
| 1564 |
+
font-size: 28px;
|
| 1565 |
+
line-height: 1;
|
| 1566 |
+
}
|
| 1567 |
+
|
| 1568 |
+
.ai-toast-content {
|
| 1569 |
+
flex: 1;
|
| 1570 |
+
}
|
| 1571 |
+
|
| 1572 |
+
.ai-toast-title {
|
| 1573 |
+
color: #ffffff;
|
| 1574 |
+
font-weight: 600;
|
| 1575 |
+
font-size: 15px;
|
| 1576 |
+
margin-bottom: 2px;
|
| 1577 |
+
}
|
| 1578 |
+
|
| 1579 |
+
.ai-toast-subtitle {
|
| 1580 |
+
color: rgba(255, 255, 255, 0.6);
|
| 1581 |
+
font-size: 13px;
|
| 1582 |
+
}
|
| 1583 |
+
|
| 1584 |
+
.ai-toast-spinner {
|
| 1585 |
+
width: 20px;
|
| 1586 |
+
height: 20px;
|
| 1587 |
+
border: 2px solid rgba(255, 255, 255, 0.2);
|
| 1588 |
+
border-top-color: #a855f7;
|
| 1589 |
+
border-radius: 50%;
|
| 1590 |
+
animation: toast-spin 0.8s linear infinite;
|
| 1591 |
+
}
|
| 1592 |
+
|
| 1593 |
+
@keyframes toast-slide-in {
|
| 1594 |
+
from {
|
| 1595 |
+
opacity: 0;
|
| 1596 |
+
transform: translateX(-50%) translateY(-20px);
|
| 1597 |
+
}
|
| 1598 |
+
to {
|
| 1599 |
+
opacity: 1;
|
| 1600 |
+
transform: translateX(-50%) translateY(0);
|
| 1601 |
+
}
|
| 1602 |
+
}
|
| 1603 |
+
|
| 1604 |
+
@keyframes toast-spin {
|
| 1605 |
+
to { transform: rotate(360deg); }
|
| 1606 |
+
}
|
| 1607 |
+
|
| 1608 |
+
/* Grid wrapper for toast positioning */
|
| 1609 |
+
.grid-wrapper {
|
| 1610 |
+
position: relative;
|
| 1611 |
+
}
|
| 1612 |
+
"""
|
| 1613 |
+
|
| 1614 |
+
|
| 1615 |
+
def create_app() -> gr.Blocks:
|
| 1616 |
+
"""Create the Gradio application."""
|
| 1617 |
+
|
| 1618 |
+
with gr.Blocks() as app:
|
| 1619 |
+
# Inject CSS via HTML style tag (most compatible method)
|
| 1620 |
+
gr.HTML(f"<style>{CUSTOM_CSS}</style>")
|
| 1621 |
+
|
| 1622 |
+
# Header with title and instructions
|
| 1623 |
+
gr.Markdown("# 🔥 Fire Rescue Simulator Game")
|
| 1624 |
+
gr.Markdown("*An interactive game where you watch AI Agent autonomously fight fires using MCP tools!*")
|
| 1625 |
+
gr.Markdown("""
|
| 1626 |
+
**🎮 How to Play:**
|
| 1627 |
+
- Click **Start**
|
| 1628 |
+
- **Auto-Execute** is optional: turn it OFF to fully control every action manually.
|
| 1629 |
+
- Watch AI's process: **Reasoning → Planning → Execution → Summary**
|
| 1630 |
+
- Select placement action:
|
| 1631 |
+
- 🚒 **Fire Truck:** High power (40%), covers 1 tile outward from its center — best for intense fires & building threats
|
| 1632 |
+
- 🚁 **Helicopter:** Wide coverage (25%), covers 2 tiles outward from its center — best for large-area control
|
| 1633 |
+
- **Settings & Controls:** Use the panel below to quickly tune scenario difficulty (fires, buildings, units, randomness) before sending the team in
|
| 1634 |
+
|
| 1635 |
+
**🏆 Win:** Extinguish all fires | **💀 Lose:** Building ≤ 50%
|
| 1636 |
+
""", elem_classes=["how-to-play"])
|
| 1637 |
+
|
| 1638 |
+
# Collapsible Controls Section
|
| 1639 |
+
with gr.Accordion("⚙️ Settings & Controls", open=False):
|
| 1640 |
+
with gr.Row():
|
| 1641 |
+
with gr.Column(scale=1):
|
| 1642 |
+
fire_count = gr.Slider(
|
| 1643 |
+
minimum=1, maximum=30, value=15, step=1,
|
| 1644 |
+
label="🔥 Initial Fire Count",
|
| 1645 |
+
info="Number of fire starting points (1-25)"
|
| 1646 |
+
)
|
| 1647 |
+
with gr.Column(scale=1):
|
| 1648 |
+
fire_intensity = gr.Slider(
|
| 1649 |
+
minimum=0.2, maximum=0.9, value=0.6, step=0.05,
|
| 1650 |
+
label="🌡️ Fire Intensity",
|
| 1651 |
+
info="Initial fire strength (0.2-0.9)"
|
| 1652 |
+
)
|
| 1653 |
+
with gr.Row():
|
| 1654 |
+
with gr.Column(scale=1):
|
| 1655 |
+
building_count = gr.Slider(
|
| 1656 |
+
minimum=1, maximum=35, value=20, step=1,
|
| 1657 |
+
label="🏢 Building Count",
|
| 1658 |
+
info="Number of buildings (connected cluster)"
|
| 1659 |
+
)
|
| 1660 |
+
with gr.Column(scale=1):
|
| 1661 |
+
max_units = gr.Slider(
|
| 1662 |
+
minimum=1, maximum=20, value=10, step=1,
|
| 1663 |
+
label="🚒 Max Units",
|
| 1664 |
+
info="Maximum deployable units (1-20)"
|
| 1665 |
+
)
|
| 1666 |
+
with gr.Row():
|
| 1667 |
+
with gr.Column(scale=1):
|
| 1668 |
+
seed_input = gr.Number(
|
| 1669 |
+
value=0, label="Random Seed (0 = random)", precision=0
|
| 1670 |
+
)
|
| 1671 |
+
|
| 1672 |
+
|
| 1673 |
+
# Control buttons - always visible
|
| 1674 |
+
with gr.Row(elem_classes=["control-buttons-row"]):
|
| 1675 |
+
start_btn = gr.Button("▶️ Start", variant="primary", scale=1)
|
| 1676 |
+
pause_btn = gr.Button("⏸️ Pause", variant="stop", interactive=False, scale=1)
|
| 1677 |
+
reset_btn = gr.Button("🔄 Reset", scale=1)
|
| 1678 |
+
|
| 1679 |
+
# Game result popup (hidden by default)
|
| 1680 |
+
result_popup = gr.HTML(value="", visible=True)
|
| 1681 |
+
|
| 1682 |
+
# Store grid buttons for updates
|
| 1683 |
+
grid_buttons = []
|
| 1684 |
+
|
| 1685 |
+
# Main content: AI Advisor (left) + Simulation Grid (right)
|
| 1686 |
+
with gr.Row(elem_classes=["section-gap"]):
|
| 1687 |
+
# Left column: AI Advisor - THE STAR OF THE SHOW
|
| 1688 |
+
with gr.Column(scale=2, min_width=300):
|
| 1689 |
+
service = get_service()
|
| 1690 |
+
advisor_interval_ticks = getattr(service, "advisor_interval", 10)
|
| 1691 |
+
tick_interval_seconds = getattr(service, "tick_interval", 1.0)
|
| 1692 |
+
advisor_interval_seconds = advisor_interval_ticks * tick_interval_seconds
|
| 1693 |
+
if isinstance(advisor_interval_seconds, float) and advisor_interval_seconds.is_integer():
|
| 1694 |
+
advisor_interval_display = int(advisor_interval_seconds)
|
| 1695 |
+
else:
|
| 1696 |
+
advisor_interval_display = round(advisor_interval_seconds, 1)
|
| 1697 |
+
gr.Markdown(
|
| 1698 |
+
f"## 🤖 AI Tactical Advisor · (refreshes ~every {advisor_interval_display} seconds)"
|
| 1699 |
+
)
|
| 1700 |
+
auto_execute_toggle = gr.Checkbox(
|
| 1701 |
+
label="🎮 Auto-Execute",
|
| 1702 |
+
value=True,
|
| 1703 |
+
info="Automatically execute AI recommendations",
|
| 1704 |
+
)
|
| 1705 |
+
|
| 1706 |
+
advisor_initial_messages = _get_combined_advisor_messages(service)
|
| 1707 |
+
|
| 1708 |
+
with gr.Accordion("📜 AI Analysis History", open=True):
|
| 1709 |
+
gr.Markdown(
|
| 1710 |
+
"⚠️ **Heads-up**: This timeline refreshes whenever a new AI cycle starts. "
|
| 1711 |
+
"Hit `Pause` first if you want to read the full reasoning without it updating mid-run. "
|
| 1712 |
+
"All completed cycles remain in this view—just scroll to review earlier ticks."
|
| 1713 |
+
)
|
| 1714 |
+
advisor_display = gr.Chatbot(
|
| 1715 |
+
value=advisor_initial_messages,
|
| 1716 |
+
height=500,
|
| 1717 |
+
render_markdown=True,
|
| 1718 |
+
show_label=False,
|
| 1719 |
+
layout="panel",
|
| 1720 |
+
elem_classes=["advisor-chatbot"],
|
| 1721 |
+
avatar_images=(None, None),
|
| 1722 |
+
)
|
| 1723 |
+
|
| 1724 |
+
# Collapsible Event Log
|
| 1725 |
+
with gr.Accordion("📋 Event Log & Deploy Status", open=False):
|
| 1726 |
+
event_log_display = gr.Textbox(
|
| 1727 |
+
value="No events yet...",
|
| 1728 |
+
label="Events",
|
| 1729 |
+
lines=5,
|
| 1730 |
+
max_lines=10,
|
| 1731 |
+
interactive=False,
|
| 1732 |
+
elem_classes=["log-box"]
|
| 1733 |
+
)
|
| 1734 |
+
|
| 1735 |
+
# Right column: Simulation Grid
|
| 1736 |
+
with gr.Column(scale=2):
|
| 1737 |
+
gr.Markdown("## 🗺️ Simulation Grid")
|
| 1738 |
+
|
| 1739 |
+
# Click to place selector
|
| 1740 |
+
with gr.Row():
|
| 1741 |
+
place_selector = gr.Radio(
|
| 1742 |
+
choices=["🚒 Truck", "🚁 Heli", "🔥 Fire"],
|
| 1743 |
+
value="🚒 Truck",
|
| 1744 |
+
label="Click Map to Place",
|
| 1745 |
+
scale=2
|
| 1746 |
+
)
|
| 1747 |
+
|
| 1748 |
+
# Legend
|
| 1749 |
+
gr.HTML("""
|
| 1750 |
+
<div class="legend-box">
|
| 1751 |
+
<span style="margin-right: 15px;">🌲 Forest</span>
|
| 1752 |
+
<span style="margin-right: 15px;">🏢 Building</span>
|
| 1753 |
+
<span style="margin-right: 15px;">🔥 Fire</span>
|
| 1754 |
+
<span style="margin-right: 15px;">💨 Smoke</span>
|
| 1755 |
+
<span style="margin-right: 15px;">🚒 Truck</span>
|
| 1756 |
+
<span>🚁 Heli</span>
|
| 1757 |
+
</div>
|
| 1758 |
+
""")
|
| 1759 |
+
|
| 1760 |
+
# Status display with progress bars - below legend (includes AI thinking indicator)
|
| 1761 |
+
status_display = gr.HTML(
|
| 1762 |
+
value=render_status_html({"status": "idle"}),
|
| 1763 |
+
elem_classes=["status-panel"]
|
| 1764 |
+
)
|
| 1765 |
+
|
| 1766 |
+
# Grid container with no gap
|
| 1767 |
+
with gr.Column(elem_classes=["grid-container"]):
|
| 1768 |
+
# X-axis labels (top row)
|
| 1769 |
+
with gr.Row(elem_classes=["grid-row"]):
|
| 1770 |
+
# Empty corner
|
| 1771 |
+
gr.Button(value="", elem_classes=["coord-label"], interactive=False, min_width=52)
|
| 1772 |
+
# X coordinates 0-9
|
| 1773 |
+
for x in range(10):
|
| 1774 |
+
gr.Button(value=str(x), elem_classes=["coord-label"], interactive=False, min_width=52)
|
| 1775 |
+
|
| 1776 |
+
# Grid buttons with Y-axis labels
|
| 1777 |
+
for y in range(10):
|
| 1778 |
+
with gr.Row(elem_classes=["grid-row"]):
|
| 1779 |
+
# Y-axis label
|
| 1780 |
+
gr.Button(value=str(y), elem_classes=["coord-label"], interactive=False, min_width=52)
|
| 1781 |
+
# Grid cells
|
| 1782 |
+
for x in range(10):
|
| 1783 |
+
btn = gr.Button(
|
| 1784 |
+
value="🌲",
|
| 1785 |
+
elem_classes=["grid-btn"],
|
| 1786 |
+
min_width=52,
|
| 1787 |
+
)
|
| 1788 |
+
grid_buttons.append((x, y, btn))
|
| 1789 |
+
|
| 1790 |
+
# Timers: main simulation refresh + after-action poller
|
| 1791 |
+
timer = gr.Timer(value=1.0, active=False)
|
| 1792 |
+
report_timer = gr.Timer(value=1.0, active=False)
|
| 1793 |
+
|
| 1794 |
+
# Collect all button outputs for updates
|
| 1795 |
+
all_buttons = [btn for (_, _, btn) in grid_buttons]
|
| 1796 |
+
|
| 1797 |
+
# Event handlers for simulation controls
|
| 1798 |
+
start_btn.click(
|
| 1799 |
+
fn=start_or_resume_simulation,
|
| 1800 |
+
inputs=[fire_count, fire_intensity, building_count, max_units, seed_input],
|
| 1801 |
+
outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons
|
| 1802 |
+
)
|
| 1803 |
+
|
| 1804 |
+
pause_btn.click(
|
| 1805 |
+
fn=pause_simulation,
|
| 1806 |
+
inputs=[],
|
| 1807 |
+
outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons
|
| 1808 |
+
)
|
| 1809 |
+
|
| 1810 |
+
reset_btn.click(
|
| 1811 |
+
fn=reset_simulation,
|
| 1812 |
+
inputs=[fire_count, fire_intensity, building_count, max_units, seed_input],
|
| 1813 |
+
outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons
|
| 1814 |
+
)
|
| 1815 |
+
|
| 1816 |
+
# Event handlers for grid buttons (click to place)
|
| 1817 |
+
for x, y, btn in grid_buttons:
|
| 1818 |
+
btn.click(
|
| 1819 |
+
fn=lambda sel, _x=x, _y=y: deploy_at_cell(_x, _y, sel),
|
| 1820 |
+
inputs=[place_selector],
|
| 1821 |
+
outputs=[result_popup, advisor_display, event_log_display, status_display] + all_buttons
|
| 1822 |
+
)
|
| 1823 |
+
|
| 1824 |
+
# Timer tick handler - updates all components with change tracking
|
| 1825 |
+
timer.tick(
|
| 1826 |
+
fn=refresh_display,
|
| 1827 |
+
outputs=[timer, report_timer, result_popup, advisor_display, event_log_display, start_btn, pause_btn, status_display] + all_buttons
|
| 1828 |
+
)
|
| 1829 |
+
|
| 1830 |
+
report_timer.tick(
|
| 1831 |
+
fn=poll_after_action_report,
|
| 1832 |
+
outputs=[report_timer, result_popup]
|
| 1833 |
+
)
|
| 1834 |
+
|
| 1835 |
+
# Auto-execute toggle handler
|
| 1836 |
+
def on_auto_execute_toggle(enabled: bool):
|
| 1837 |
+
service = get_service()
|
| 1838 |
+
service.set_auto_execute(enabled)
|
| 1839 |
+
# No return value needed since outputs=[]
|
| 1840 |
+
|
| 1841 |
+
auto_execute_toggle.change(
|
| 1842 |
+
fn=on_auto_execute_toggle,
|
| 1843 |
+
inputs=[auto_execute_toggle],
|
| 1844 |
+
outputs=[]
|
| 1845 |
+
)
|
| 1846 |
+
|
| 1847 |
+
return app
|
| 1848 |
+
|
| 1849 |
+
|
| 1850 |
+
def launch_simple():
|
| 1851 |
+
"""Launch simple Gradio app (for HF Spaces / local development)."""
|
| 1852 |
+
gradio_app = create_app()
|
| 1853 |
+
gradio_app.launch(
|
| 1854 |
+
server_name="0.0.0.0",
|
| 1855 |
+
server_port=7860,
|
| 1856 |
+
ssr_mode=False, # Disable SSR for better compatibility
|
| 1857 |
+
footer_links=["gradio", "settings"], # Hide API docs button
|
| 1858 |
+
)
|
| 1859 |
+
|
| 1860 |
+
|
| 1861 |
+
# For Hugging Face Spaces - must be named 'demo' for HF Spaces auto-detection
|
| 1862 |
+
demo = create_app()
|
| 1863 |
+
|
| 1864 |
+
if __name__ == "__main__":
|
| 1865 |
+
launch_simple()
|
fire_rescue_mcp/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue MCP Server
|
| 3 |
+
|
| 4 |
+
Pure MCP layer - provides tools for external LLM clients to interact with the simulation.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .mcp_server import mcp, run_server
|
| 8 |
+
|
| 9 |
+
__version__ = "0.3.0"
|
| 10 |
+
__all__ = [
|
| 11 |
+
"mcp",
|
| 12 |
+
"run_server",
|
| 13 |
+
]
|
fire_rescue_mcp/mcp_server.py
ADDED
|
@@ -0,0 +1,706 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue MCP Server
|
| 3 |
+
|
| 4 |
+
Provides MCP tools for fire rescue simulation.
|
| 5 |
+
MCP tools only return DATA - all decisions are made by AI.
|
| 6 |
+
|
| 7 |
+
Core Operations:
|
| 8 |
+
- reset_scenario: Initialize a new simulation
|
| 9 |
+
- get_world_state: Get current world state snapshot
|
| 10 |
+
- deploy_unit: Deploy a firefighting unit
|
| 11 |
+
- move_unit: Move an existing unit to a new position
|
| 12 |
+
- step_simulation: Advance simulation by ticks
|
| 13 |
+
|
| 14 |
+
Data Query Tools:
|
| 15 |
+
- find_idle_units: Get units not covering any fires
|
| 16 |
+
- find_uncovered_fires: Get fires with no unit coverage
|
| 17 |
+
- find_building_threats: Get fires near buildings
|
| 18 |
+
- analyze_coverage: Get comprehensive coverage data
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import sys
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
from typing import Optional
|
| 24 |
+
|
| 25 |
+
from mcp.server.fastmcp import FastMCP
|
| 26 |
+
|
| 27 |
+
# Add parent directory to path for imports
|
| 28 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 29 |
+
|
| 30 |
+
from models import CellType, UnitType
|
| 31 |
+
from simulation import SimulationEngine, SimulationConfig
|
| 32 |
+
|
| 33 |
+
# Create FastMCP server instance
|
| 34 |
+
mcp = FastMCP("Fire-Rescue Simulation")
|
| 35 |
+
|
| 36 |
+
# Shared simulation engine instance
|
| 37 |
+
_engine: Optional[SimulationEngine] = None
|
| 38 |
+
|
| 39 |
+
# Unit effective ranges (matching SimulationConfig, Chebyshev/square distance)
|
| 40 |
+
FIRE_TRUCK_RANGE = 1 # Square coverage radius (includes 8 neighbors)
|
| 41 |
+
HELICOPTER_RANGE = 2 # Square coverage radius (extends two cells in all directions)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_engine() -> SimulationEngine:
|
| 45 |
+
"""Get or create the simulation engine singleton."""
|
| 46 |
+
global _engine
|
| 47 |
+
if _engine is None:
|
| 48 |
+
_engine = SimulationEngine()
|
| 49 |
+
return _engine
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _get_unit_effective_range(unit_type: str) -> int:
|
| 53 |
+
"""Get the effective range for a unit type (for coverage analysis)."""
|
| 54 |
+
if unit_type == "fire_truck":
|
| 55 |
+
return FIRE_TRUCK_RANGE
|
| 56 |
+
elif unit_type == "helicopter":
|
| 57 |
+
return HELICOPTER_RANGE
|
| 58 |
+
return 2 # Default
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _calculate_distance(x1: int, y1: int, x2: int, y2: int) -> int:
|
| 62 |
+
"""Calculate Chebyshev distance (square radius) between two points."""
|
| 63 |
+
return max(abs(x1 - x2), abs(y1 - y2))
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def _is_in_range(ux: int, uy: int, fx: int, fy: int, unit_type: str) -> bool:
|
| 67 |
+
"""Check if a fire position is within unit's effective range."""
|
| 68 |
+
effective_range = _get_unit_effective_range(unit_type)
|
| 69 |
+
return _calculate_distance(ux, uy, fx, fy) <= effective_range
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def generate_emoji_map(engine: SimulationEngine) -> str:
|
| 73 |
+
"""
|
| 74 |
+
Generate an emoji-based visualization of the current world state.
|
| 75 |
+
|
| 76 |
+
Legend (matching Gradio UI):
|
| 77 |
+
- 🌲 Forest (no fire)
|
| 78 |
+
- 🏢 Building (no fire)
|
| 79 |
+
- 🔥 Fire (intensity >= 10%)
|
| 80 |
+
- 💨 Smoke (smoldering, intensity < 10%)
|
| 81 |
+
- 🚒 Fire Truck
|
| 82 |
+
- 🚁 Helicopter
|
| 83 |
+
"""
|
| 84 |
+
if engine.world is None:
|
| 85 |
+
return "No map available"
|
| 86 |
+
|
| 87 |
+
world = engine.world
|
| 88 |
+
|
| 89 |
+
# Create unit position lookup
|
| 90 |
+
unit_positions = {}
|
| 91 |
+
for unit in world.units:
|
| 92 |
+
key = (unit.x, unit.y)
|
| 93 |
+
if key not in unit_positions:
|
| 94 |
+
unit_positions[key] = []
|
| 95 |
+
unit_positions[key].append(unit.unit_type.value)
|
| 96 |
+
|
| 97 |
+
# Build the map with coordinates
|
| 98 |
+
lines = []
|
| 99 |
+
|
| 100 |
+
# Header with X coordinates
|
| 101 |
+
header = " " + "".join(f"{x:2}" for x in range(world.width))
|
| 102 |
+
lines.append(header)
|
| 103 |
+
|
| 104 |
+
for y in range(world.height):
|
| 105 |
+
row_chars = []
|
| 106 |
+
for x in range(world.width):
|
| 107 |
+
cell = world.grid[y][x]
|
| 108 |
+
pos = (x, y)
|
| 109 |
+
|
| 110 |
+
# Priority: Units > Fire > Terrain
|
| 111 |
+
if pos in unit_positions:
|
| 112 |
+
if "fire_truck" in unit_positions[pos]:
|
| 113 |
+
row_chars.append("🚒")
|
| 114 |
+
else:
|
| 115 |
+
row_chars.append("🚁")
|
| 116 |
+
elif cell.fire_intensity > 0:
|
| 117 |
+
# Show fire intensity (matching Gradio: >=10% = fire, <10% = smoke)
|
| 118 |
+
if cell.fire_intensity >= 0.1:
|
| 119 |
+
row_chars.append("🔥")
|
| 120 |
+
else:
|
| 121 |
+
row_chars.append("💨")
|
| 122 |
+
else:
|
| 123 |
+
# Show terrain
|
| 124 |
+
if cell.cell_type == CellType.BUILDING:
|
| 125 |
+
row_chars.append("🏢")
|
| 126 |
+
elif cell.cell_type == CellType.FOREST:
|
| 127 |
+
row_chars.append("🌲")
|
| 128 |
+
else:
|
| 129 |
+
row_chars.append("⬜")
|
| 130 |
+
|
| 131 |
+
lines.append(f"{y:2} " + "".join(row_chars))
|
| 132 |
+
|
| 133 |
+
# Add legend (matching Gradio UI)
|
| 134 |
+
lines.append("")
|
| 135 |
+
lines.append("Legend: 🌲Forest 🏢Building 🔥Fire 💨Smoke 🚒Truck 🚁Heli")
|
| 136 |
+
|
| 137 |
+
return "\n".join(lines)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@mcp.tool()
|
| 141 |
+
def reset_scenario(
|
| 142 |
+
seed: Optional[int] = None,
|
| 143 |
+
fire_count: int = 10,
|
| 144 |
+
fire_intensity: float = 0.5,
|
| 145 |
+
building_count: int = 20,
|
| 146 |
+
max_units: int = 10
|
| 147 |
+
) -> dict:
|
| 148 |
+
"""
|
| 149 |
+
Reset and initialize a new fire rescue simulation scenario.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
seed: Random seed for reproducibility (optional)
|
| 153 |
+
fire_count: Number of initial fire points (1-25, default: 10)
|
| 154 |
+
fire_intensity: Initial fire intensity (0.2-0.9, default: 0.5)
|
| 155 |
+
building_count: Number of buildings to place (1-35, default: 20)
|
| 156 |
+
max_units: Maximum deployable units (1-20, default: 10)
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
Status, summary, and emoji map of the initial state
|
| 160 |
+
"""
|
| 161 |
+
engine = get_engine()
|
| 162 |
+
|
| 163 |
+
world = engine.reset(
|
| 164 |
+
seed=seed,
|
| 165 |
+
fire_count=fire_count,
|
| 166 |
+
fire_intensity=fire_intensity,
|
| 167 |
+
building_count=building_count,
|
| 168 |
+
max_units=max_units
|
| 169 |
+
)
|
| 170 |
+
fires = world.get_fires()
|
| 171 |
+
|
| 172 |
+
return {
|
| 173 |
+
"status": "ok",
|
| 174 |
+
"tick": world.tick,
|
| 175 |
+
"grid_size": f"{world.width}x{world.height}",
|
| 176 |
+
"initial_fires": len(fires),
|
| 177 |
+
"buildings": len(world.building_positions),
|
| 178 |
+
"max_units": world.max_units,
|
| 179 |
+
"max_ticks": world.max_ticks,
|
| 180 |
+
"emoji_map": generate_emoji_map(engine)
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@mcp.tool()
|
| 185 |
+
def get_world_state() -> dict:
|
| 186 |
+
"""
|
| 187 |
+
Get the current world state snapshot with emoji map visualization.
|
| 188 |
+
|
| 189 |
+
Returns complete state of the simulation including:
|
| 190 |
+
- Current tick number
|
| 191 |
+
- Emoji map showing the battlefield visually
|
| 192 |
+
- Fire locations and intensities
|
| 193 |
+
- Deployed units and their positions
|
| 194 |
+
- Building integrity and forest burn ratio
|
| 195 |
+
- Recent events
|
| 196 |
+
|
| 197 |
+
The emoji_map provides a visual overview:
|
| 198 |
+
🌲 Forest | 🏢 Building | 🔥 Fire (>=10%) | 💨 Smoke (<10%)
|
| 199 |
+
🚒 Fire Truck | 🚁 Helicopter
|
| 200 |
+
"""
|
| 201 |
+
engine = get_engine()
|
| 202 |
+
|
| 203 |
+
if engine.world is None:
|
| 204 |
+
return {
|
| 205 |
+
"status": "error",
|
| 206 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
state = engine.get_state()
|
| 210 |
+
state["emoji_map"] = generate_emoji_map(engine)
|
| 211 |
+
|
| 212 |
+
return state
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
@mcp.tool()
|
| 216 |
+
def deploy_unit(
|
| 217 |
+
unit_type: str,
|
| 218 |
+
x: int,
|
| 219 |
+
y: int,
|
| 220 |
+
source: str = "player"
|
| 221 |
+
) -> dict:
|
| 222 |
+
"""
|
| 223 |
+
Deploy a firefighting unit at the specified position.
|
| 224 |
+
|
| 225 |
+
Args:
|
| 226 |
+
unit_type: Type of unit - "fire_truck" or "helicopter"
|
| 227 |
+
x: X coordinate (0 to grid_width-1)
|
| 228 |
+
y: Y coordinate (0 to grid_height-1)
|
| 229 |
+
source: Who initiated the deployment - "player", "player_accept", "auto_accept_ai"
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
Status and details of the deployed unit
|
| 233 |
+
"""
|
| 234 |
+
engine = get_engine()
|
| 235 |
+
return engine.deploy_unit(unit_type, x, y, source)
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
@mcp.tool()
|
| 239 |
+
def step_simulation(ticks: int = 1) -> dict:
|
| 240 |
+
"""
|
| 241 |
+
Advance the simulation by the specified number of ticks.
|
| 242 |
+
|
| 243 |
+
Args:
|
| 244 |
+
ticks: Number of ticks to advance (default: 1)
|
| 245 |
+
|
| 246 |
+
Returns:
|
| 247 |
+
Current world state with emoji map after advancing
|
| 248 |
+
"""
|
| 249 |
+
engine = get_engine()
|
| 250 |
+
|
| 251 |
+
if engine.world is None:
|
| 252 |
+
return {
|
| 253 |
+
"status": "error",
|
| 254 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
for _ in range(ticks):
|
| 258 |
+
engine.step()
|
| 259 |
+
|
| 260 |
+
state = engine.get_state()
|
| 261 |
+
state["emoji_map"] = generate_emoji_map(engine)
|
| 262 |
+
|
| 263 |
+
return state
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
@mcp.tool()
|
| 267 |
+
def move_unit(
|
| 268 |
+
source_x: int,
|
| 269 |
+
source_y: int,
|
| 270 |
+
target_x: int,
|
| 271 |
+
target_y: int
|
| 272 |
+
) -> dict:
|
| 273 |
+
"""
|
| 274 |
+
Move an existing unit from source position to target position.
|
| 275 |
+
Useful for repositioning idle units to cover uncovered fires.
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
source_x: Current X coordinate of the unit
|
| 279 |
+
source_y: Current Y coordinate of the unit
|
| 280 |
+
target_x: New X coordinate to move to
|
| 281 |
+
target_y: New Y coordinate to move to
|
| 282 |
+
|
| 283 |
+
Returns:
|
| 284 |
+
Status and details of the move operation
|
| 285 |
+
"""
|
| 286 |
+
engine = get_engine()
|
| 287 |
+
|
| 288 |
+
if engine.world is None:
|
| 289 |
+
return {
|
| 290 |
+
"status": "error",
|
| 291 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
# Find unit at source position
|
| 295 |
+
unit_to_move = None
|
| 296 |
+
for unit in engine.world.units:
|
| 297 |
+
if unit.x == source_x and unit.y == source_y:
|
| 298 |
+
unit_to_move = unit
|
| 299 |
+
break
|
| 300 |
+
|
| 301 |
+
if unit_to_move is None:
|
| 302 |
+
return {
|
| 303 |
+
"status": "error",
|
| 304 |
+
"message": f"No unit found at source position ({source_x}, {source_y})"
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
unit_type = unit_to_move.unit_type.value
|
| 308 |
+
|
| 309 |
+
# Remove unit from source
|
| 310 |
+
remove_result = engine.remove_unit_at(source_x, source_y)
|
| 311 |
+
if remove_result.get("status") != "ok":
|
| 312 |
+
return {
|
| 313 |
+
"status": "error",
|
| 314 |
+
"message": f"Failed to remove unit: {remove_result.get('message')}"
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
# Deploy unit at target
|
| 318 |
+
deploy_result = engine.deploy_unit(unit_type, target_x, target_y, "ai_move")
|
| 319 |
+
if deploy_result.get("status") != "ok":
|
| 320 |
+
# Restore unit at original position
|
| 321 |
+
engine.deploy_unit(unit_type, source_x, source_y, "ai_restore")
|
| 322 |
+
return {
|
| 323 |
+
"status": "error",
|
| 324 |
+
"message": f"Failed to deploy at target: {deploy_result.get('message')}. Unit restored to original position."
|
| 325 |
+
}
|
| 326 |
+
|
| 327 |
+
return {
|
| 328 |
+
"status": "ok",
|
| 329 |
+
"unit_type": unit_type,
|
| 330 |
+
"source": {"x": source_x, "y": source_y},
|
| 331 |
+
"target": {"x": target_x, "y": target_y},
|
| 332 |
+
"message": f"Moved {unit_type} from ({source_x}, {source_y}) to ({target_x}, {target_y})"
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
|
| 336 |
+
@mcp.tool()
|
| 337 |
+
def remove_unit(
|
| 338 |
+
x: int,
|
| 339 |
+
y: int
|
| 340 |
+
) -> dict:
|
| 341 |
+
"""
|
| 342 |
+
Remove an existing unit at the specified position.
|
| 343 |
+
Use this to free up a deployment slot, then deploy_unit to place a new unit elsewhere.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
x: X coordinate of the unit to remove
|
| 347 |
+
y: Y coordinate of the unit to remove
|
| 348 |
+
|
| 349 |
+
Returns:
|
| 350 |
+
Status and details of the removed unit
|
| 351 |
+
|
| 352 |
+
Example use cases:
|
| 353 |
+
- Remove ineffective truck, then deploy helicopter at better position
|
| 354 |
+
- Free up deployment slot when unit is no longer needed
|
| 355 |
+
- Reposition unit: remove_unit + deploy_unit at new location
|
| 356 |
+
"""
|
| 357 |
+
engine = get_engine()
|
| 358 |
+
|
| 359 |
+
if engine.world is None:
|
| 360 |
+
return {
|
| 361 |
+
"status": "error",
|
| 362 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
# Find unit at position
|
| 366 |
+
unit_to_remove = None
|
| 367 |
+
for unit in engine.world.units:
|
| 368 |
+
if unit.x == x and unit.y == y:
|
| 369 |
+
unit_to_remove = unit
|
| 370 |
+
break
|
| 371 |
+
|
| 372 |
+
if unit_to_remove is None:
|
| 373 |
+
return {
|
| 374 |
+
"status": "error",
|
| 375 |
+
"message": f"No unit found at position ({x}, {y})"
|
| 376 |
+
}
|
| 377 |
+
|
| 378 |
+
removed_unit_type = unit_to_remove.unit_type.value
|
| 379 |
+
|
| 380 |
+
# Remove the unit
|
| 381 |
+
remove_result = engine.remove_unit_at(x, y)
|
| 382 |
+
if remove_result.get("status") != "ok":
|
| 383 |
+
return {
|
| 384 |
+
"status": "error",
|
| 385 |
+
"message": f"Failed to remove unit: {remove_result.get('message')}"
|
| 386 |
+
}
|
| 387 |
+
|
| 388 |
+
return {
|
| 389 |
+
"status": "ok",
|
| 390 |
+
"removed_unit_type": removed_unit_type,
|
| 391 |
+
"position": {"x": x, "y": y},
|
| 392 |
+
"message": f"Removed {removed_unit_type} at ({x}, {y}). You can now deploy a new unit."
|
| 393 |
+
}
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
@mcp.tool()
|
| 397 |
+
def find_idle_units() -> dict:
|
| 398 |
+
"""
|
| 399 |
+
Find units that are not covering any fires (idle/ineffective units).
|
| 400 |
+
|
| 401 |
+
An idle unit is one where NO fires exist within its effective range:
|
| 402 |
+
- Fire Truck effective range: 2 cells
|
| 403 |
+
- Helicopter effective range: 3 cells
|
| 404 |
+
|
| 405 |
+
Returns:
|
| 406 |
+
List of idle units and effective units with their positions
|
| 407 |
+
"""
|
| 408 |
+
engine = get_engine()
|
| 409 |
+
|
| 410 |
+
if engine.world is None:
|
| 411 |
+
return {
|
| 412 |
+
"status": "error",
|
| 413 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 414 |
+
}
|
| 415 |
+
|
| 416 |
+
world = engine.world
|
| 417 |
+
fires = world.get_fires()
|
| 418 |
+
fire_positions = [(f.x, f.y, f.intensity) for f in fires]
|
| 419 |
+
|
| 420 |
+
idle_units = []
|
| 421 |
+
effective_units = []
|
| 422 |
+
|
| 423 |
+
for unit in world.units:
|
| 424 |
+
unit_type = unit.unit_type.value
|
| 425 |
+
has_fire_in_range = False
|
| 426 |
+
|
| 427 |
+
# Check if any fire is within this unit's effective range
|
| 428 |
+
for fx, fy, intensity in fire_positions:
|
| 429 |
+
if _is_in_range(unit.x, unit.y, fx, fy, unit_type):
|
| 430 |
+
has_fire_in_range = True
|
| 431 |
+
break
|
| 432 |
+
|
| 433 |
+
unit_info = {
|
| 434 |
+
"x": unit.x,
|
| 435 |
+
"y": unit.y,
|
| 436 |
+
"type": unit_type,
|
| 437 |
+
"effective_range": _get_unit_effective_range(unit_type)
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
if has_fire_in_range:
|
| 441 |
+
effective_units.append(unit_info)
|
| 442 |
+
else:
|
| 443 |
+
idle_units.append(unit_info)
|
| 444 |
+
|
| 445 |
+
return {
|
| 446 |
+
"status": "ok",
|
| 447 |
+
"idle_units": idle_units,
|
| 448 |
+
"idle_count": len(idle_units),
|
| 449 |
+
"effective_units": effective_units,
|
| 450 |
+
"effective_count": len(effective_units),
|
| 451 |
+
"total_units": len(world.units)
|
| 452 |
+
}
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
@mcp.tool()
|
| 456 |
+
def find_uncovered_fires() -> dict:
|
| 457 |
+
"""
|
| 458 |
+
Find fires that have NO unit coverage.
|
| 459 |
+
|
| 460 |
+
An uncovered fire is one where NO unit is within effective range:
|
| 461 |
+
- Fire Truck range: 2 cells
|
| 462 |
+
- Helicopter range: 3 cells
|
| 463 |
+
|
| 464 |
+
Returns:
|
| 465 |
+
List of uncovered fires with their positions, intensity, and building threat status
|
| 466 |
+
"""
|
| 467 |
+
engine = get_engine()
|
| 468 |
+
|
| 469 |
+
if engine.world is None:
|
| 470 |
+
return {
|
| 471 |
+
"status": "error",
|
| 472 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 473 |
+
}
|
| 474 |
+
|
| 475 |
+
world = engine.world
|
| 476 |
+
fires = world.get_fires()
|
| 477 |
+
units = world.units
|
| 478 |
+
building_positions = set(world.building_positions)
|
| 479 |
+
|
| 480 |
+
uncovered_fires = []
|
| 481 |
+
covered_fires = []
|
| 482 |
+
|
| 483 |
+
for fire in fires:
|
| 484 |
+
is_covered = False
|
| 485 |
+
|
| 486 |
+
# Check if any unit covers this fire
|
| 487 |
+
for unit in units:
|
| 488 |
+
unit_type = unit.unit_type.value
|
| 489 |
+
if _is_in_range(unit.x, unit.y, fire.x, fire.y, unit_type):
|
| 490 |
+
is_covered = True
|
| 491 |
+
break
|
| 492 |
+
|
| 493 |
+
# Check if fire threatens any building (within 2 cells)
|
| 494 |
+
threatens_building = False
|
| 495 |
+
for bx, by in building_positions:
|
| 496 |
+
if _calculate_distance(fire.x, fire.y, bx, by) <= 2:
|
| 497 |
+
threatens_building = True
|
| 498 |
+
break
|
| 499 |
+
|
| 500 |
+
fire_info = {
|
| 501 |
+
"x": fire.x,
|
| 502 |
+
"y": fire.y,
|
| 503 |
+
"intensity": round(fire.intensity, 2),
|
| 504 |
+
"threatens_building": threatens_building
|
| 505 |
+
}
|
| 506 |
+
|
| 507 |
+
if is_covered:
|
| 508 |
+
covered_fires.append(fire_info)
|
| 509 |
+
else:
|
| 510 |
+
uncovered_fires.append(fire_info)
|
| 511 |
+
|
| 512 |
+
return {
|
| 513 |
+
"status": "ok",
|
| 514 |
+
"uncovered_fires": uncovered_fires,
|
| 515 |
+
"uncovered_count": len(uncovered_fires),
|
| 516 |
+
"covered_fires": covered_fires,
|
| 517 |
+
"covered_count": len(covered_fires),
|
| 518 |
+
"total_fires": len(fires),
|
| 519 |
+
"coverage_ratio": round(len(covered_fires) / len(fires), 2) if fires else 1.0
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
|
| 523 |
+
@mcp.tool()
|
| 524 |
+
def find_building_threats() -> dict:
|
| 525 |
+
"""
|
| 526 |
+
Find fires that are threatening buildings (within 2 cells of any building).
|
| 527 |
+
|
| 528 |
+
Returns:
|
| 529 |
+
List of building-threatening fires with their positions, threatened buildings, and coverage status
|
| 530 |
+
"""
|
| 531 |
+
engine = get_engine()
|
| 532 |
+
|
| 533 |
+
if engine.world is None:
|
| 534 |
+
return {
|
| 535 |
+
"status": "error",
|
| 536 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 537 |
+
}
|
| 538 |
+
|
| 539 |
+
world = engine.world
|
| 540 |
+
fires = world.get_fires()
|
| 541 |
+
units = world.units
|
| 542 |
+
building_positions = set(world.building_positions)
|
| 543 |
+
|
| 544 |
+
building_threats = []
|
| 545 |
+
|
| 546 |
+
for fire in fires:
|
| 547 |
+
# Check if fire threatens any building
|
| 548 |
+
threatened_buildings = []
|
| 549 |
+
for bx, by in building_positions:
|
| 550 |
+
dist = _calculate_distance(fire.x, fire.y, bx, by)
|
| 551 |
+
if dist <= 2:
|
| 552 |
+
threatened_buildings.append({"x": bx, "y": by, "distance": dist})
|
| 553 |
+
|
| 554 |
+
if not threatened_buildings:
|
| 555 |
+
continue
|
| 556 |
+
|
| 557 |
+
# Check if this fire is covered
|
| 558 |
+
is_covered = False
|
| 559 |
+
covering_unit = None
|
| 560 |
+
for unit in units:
|
| 561 |
+
unit_type = unit.unit_type.value
|
| 562 |
+
if _is_in_range(unit.x, unit.y, fire.x, fire.y, unit_type):
|
| 563 |
+
is_covered = True
|
| 564 |
+
covering_unit = {"x": unit.x, "y": unit.y, "type": unit_type}
|
| 565 |
+
break
|
| 566 |
+
|
| 567 |
+
building_threats.append({
|
| 568 |
+
"fire": {"x": fire.x, "y": fire.y, "intensity": round(fire.intensity, 2)},
|
| 569 |
+
"threatened_buildings": threatened_buildings,
|
| 570 |
+
"is_covered": is_covered,
|
| 571 |
+
"covering_unit": covering_unit
|
| 572 |
+
})
|
| 573 |
+
|
| 574 |
+
uncovered_threats = [t for t in building_threats if not t["is_covered"]]
|
| 575 |
+
|
| 576 |
+
return {
|
| 577 |
+
"status": "ok",
|
| 578 |
+
"building_threats": building_threats,
|
| 579 |
+
"total_threats": len(building_threats),
|
| 580 |
+
"uncovered_threats": len(uncovered_threats),
|
| 581 |
+
"building_integrity": round(world.building_integrity, 2)
|
| 582 |
+
}
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
@mcp.tool()
|
| 586 |
+
def analyze_coverage() -> dict:
|
| 587 |
+
"""
|
| 588 |
+
Get comprehensive coverage analysis data.
|
| 589 |
+
|
| 590 |
+
This tool combines information from multiple analyses:
|
| 591 |
+
- Idle units (not covering any fire)
|
| 592 |
+
- Uncovered fires (no unit in range)
|
| 593 |
+
- Building threats (fires near buildings)
|
| 594 |
+
- High intensity fires
|
| 595 |
+
|
| 596 |
+
Returns:
|
| 597 |
+
Comprehensive data about fires, units, and coverage status
|
| 598 |
+
"""
|
| 599 |
+
engine = get_engine()
|
| 600 |
+
|
| 601 |
+
if engine.world is None:
|
| 602 |
+
return {
|
| 603 |
+
"status": "error",
|
| 604 |
+
"message": "Simulation not initialized. Call reset_scenario first."
|
| 605 |
+
}
|
| 606 |
+
|
| 607 |
+
world = engine.world
|
| 608 |
+
fires = world.get_fires()
|
| 609 |
+
units = world.units
|
| 610 |
+
building_positions = set(world.building_positions)
|
| 611 |
+
|
| 612 |
+
# Analyze fires
|
| 613 |
+
fire_analysis = {
|
| 614 |
+
"total": len(fires),
|
| 615 |
+
"high_intensity": [], # >70%
|
| 616 |
+
"building_threats": [], # within 2 cells of building
|
| 617 |
+
"uncovered": [] # no unit in range
|
| 618 |
+
}
|
| 619 |
+
|
| 620 |
+
# Analyze units
|
| 621 |
+
unit_analysis = {
|
| 622 |
+
"total": len(units),
|
| 623 |
+
"max_units": world.max_units,
|
| 624 |
+
"available_slots": world.max_units - len(units),
|
| 625 |
+
"idle": [], # no fire in range
|
| 626 |
+
"effective": [] # has fire in range
|
| 627 |
+
}
|
| 628 |
+
|
| 629 |
+
# Process fires
|
| 630 |
+
for fire in fires:
|
| 631 |
+
fire_info = {"x": fire.x, "y": fire.y, "intensity": round(fire.intensity, 2)}
|
| 632 |
+
|
| 633 |
+
# High intensity check
|
| 634 |
+
if fire.intensity > 0.7:
|
| 635 |
+
fire_analysis["high_intensity"].append(fire_info)
|
| 636 |
+
|
| 637 |
+
# Building threat check
|
| 638 |
+
for bx, by in building_positions:
|
| 639 |
+
if _calculate_distance(fire.x, fire.y, bx, by) <= 2:
|
| 640 |
+
fire_analysis["building_threats"].append(fire_info)
|
| 641 |
+
break
|
| 642 |
+
|
| 643 |
+
# Coverage check
|
| 644 |
+
is_covered = False
|
| 645 |
+
for unit in units:
|
| 646 |
+
if _is_in_range(unit.x, unit.y, fire.x, fire.y, unit.unit_type.value):
|
| 647 |
+
is_covered = True
|
| 648 |
+
break
|
| 649 |
+
if not is_covered:
|
| 650 |
+
fire_analysis["uncovered"].append(fire_info)
|
| 651 |
+
|
| 652 |
+
# Process units
|
| 653 |
+
fire_positions = [(f.x, f.y) for f in fires]
|
| 654 |
+
for unit in units:
|
| 655 |
+
unit_info = {"x": unit.x, "y": unit.y, "type": unit.unit_type.value}
|
| 656 |
+
|
| 657 |
+
has_fire = False
|
| 658 |
+
for fx, fy in fire_positions:
|
| 659 |
+
if _is_in_range(unit.x, unit.y, fx, fy, unit.unit_type.value):
|
| 660 |
+
has_fire = True
|
| 661 |
+
break
|
| 662 |
+
|
| 663 |
+
if has_fire:
|
| 664 |
+
unit_analysis["effective"].append(unit_info)
|
| 665 |
+
else:
|
| 666 |
+
unit_analysis["idle"].append(unit_info)
|
| 667 |
+
|
| 668 |
+
# Calculate coverage ratio
|
| 669 |
+
coverage_ratio = 1.0
|
| 670 |
+
if fires:
|
| 671 |
+
covered_count = len(fires) - len(fire_analysis["uncovered"])
|
| 672 |
+
coverage_ratio = covered_count / len(fires)
|
| 673 |
+
|
| 674 |
+
return {
|
| 675 |
+
"status": "ok",
|
| 676 |
+
"building_integrity": round(world.building_integrity, 2),
|
| 677 |
+
"coverage_ratio": round(coverage_ratio, 2),
|
| 678 |
+
"fire_analysis": {
|
| 679 |
+
"total_fires": fire_analysis["total"],
|
| 680 |
+
"high_intensity_count": len(fire_analysis["high_intensity"]),
|
| 681 |
+
"high_intensity_fires": fire_analysis["high_intensity"],
|
| 682 |
+
"building_threat_count": len(fire_analysis["building_threats"]),
|
| 683 |
+
"building_threat_fires": fire_analysis["building_threats"],
|
| 684 |
+
"uncovered_count": len(fire_analysis["uncovered"]),
|
| 685 |
+
"uncovered_fires": fire_analysis["uncovered"]
|
| 686 |
+
},
|
| 687 |
+
"unit_analysis": {
|
| 688 |
+
"deployed": unit_analysis["total"],
|
| 689 |
+
"max_units": unit_analysis["max_units"],
|
| 690 |
+
"available_slots": unit_analysis["available_slots"],
|
| 691 |
+
"idle_count": len(unit_analysis["idle"]),
|
| 692 |
+
"idle_units": unit_analysis["idle"],
|
| 693 |
+
"effective_count": len(unit_analysis["effective"]),
|
| 694 |
+
"effective_units": unit_analysis["effective"]
|
| 695 |
+
},
|
| 696 |
+
"emoji_map": generate_emoji_map(engine)
|
| 697 |
+
}
|
| 698 |
+
|
| 699 |
+
|
| 700 |
+
def run_server():
|
| 701 |
+
"""Run the MCP server with stdio transport."""
|
| 702 |
+
mcp.run()
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
if __name__ == "__main__":
|
| 706 |
+
run_server()
|
models.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue - Data Models
|
| 3 |
+
|
| 4 |
+
Defines core data structures for the fire rescue simulation.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass, field
|
| 8 |
+
from enum import Enum
|
| 9 |
+
from typing import Optional
|
| 10 |
+
import random
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class UnitType(str, Enum):
|
| 14 |
+
"""Types of firefighting units available."""
|
| 15 |
+
FIRE_TRUCK = "fire_truck"
|
| 16 |
+
HELICOPTER = "helicopter"
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class CellType(str, Enum):
|
| 20 |
+
"""Types of terrain cells in the grid."""
|
| 21 |
+
EMPTY = "empty"
|
| 22 |
+
BUILDING = "building"
|
| 23 |
+
FOREST = "forest"
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class SimulationStatus(str, Enum):
|
| 27 |
+
"""Status of the simulation."""
|
| 28 |
+
IDLE = "idle"
|
| 29 |
+
RUNNING = "running"
|
| 30 |
+
SUCCESS = "success"
|
| 31 |
+
FAIL = "fail"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class FireLevel(str, Enum):
|
| 35 |
+
"""Initial fire intensity levels."""
|
| 36 |
+
LOW = "low"
|
| 37 |
+
MEDIUM = "medium"
|
| 38 |
+
HIGH = "high"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class Fire:
|
| 43 |
+
"""Represents a fire cell in the grid."""
|
| 44 |
+
x: int
|
| 45 |
+
y: int
|
| 46 |
+
intensity: float # 0.0 to 1.0
|
| 47 |
+
|
| 48 |
+
def to_dict(self) -> dict:
|
| 49 |
+
return {
|
| 50 |
+
"x": self.x,
|
| 51 |
+
"y": self.y,
|
| 52 |
+
"intensity": round(self.intensity, 2)
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclass
|
| 57 |
+
class Unit:
|
| 58 |
+
"""Represents a firefighting unit."""
|
| 59 |
+
id: str
|
| 60 |
+
unit_type: UnitType
|
| 61 |
+
owner: str # "player" or "ai"
|
| 62 |
+
x: int
|
| 63 |
+
y: int
|
| 64 |
+
cooldown: int = 0 # Ticks until next action
|
| 65 |
+
|
| 66 |
+
def to_dict(self) -> dict:
|
| 67 |
+
return {
|
| 68 |
+
"id": self.id,
|
| 69 |
+
"type": self.unit_type.value,
|
| 70 |
+
"owner": self.owner,
|
| 71 |
+
"x": self.x,
|
| 72 |
+
"y": self.y
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@dataclass
|
| 77 |
+
class Event:
|
| 78 |
+
"""Represents a simulation event."""
|
| 79 |
+
tick: int
|
| 80 |
+
event_type: str
|
| 81 |
+
details: dict = field(default_factory=dict)
|
| 82 |
+
|
| 83 |
+
def to_dict(self) -> dict:
|
| 84 |
+
return {
|
| 85 |
+
"tick": self.tick,
|
| 86 |
+
"type": self.event_type,
|
| 87 |
+
**self.details
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@dataclass
|
| 92 |
+
class Cell:
|
| 93 |
+
"""Represents a cell in the grid."""
|
| 94 |
+
x: int
|
| 95 |
+
y: int
|
| 96 |
+
cell_type: CellType
|
| 97 |
+
fire_intensity: float = 0.0 # 0.0 = no fire, 1.0 = max fire
|
| 98 |
+
damage: float = 0.0 # Accumulated damage (0.0 to 1.0)
|
| 99 |
+
|
| 100 |
+
def is_on_fire(self) -> bool:
|
| 101 |
+
return self.fire_intensity > 0.0
|
| 102 |
+
|
| 103 |
+
def is_destroyed(self) -> bool:
|
| 104 |
+
return self.damage >= 1.0
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@dataclass
|
| 108 |
+
class WorldState:
|
| 109 |
+
"""
|
| 110 |
+
Represents the complete state of the simulation world.
|
| 111 |
+
Uses a 2D grid system.
|
| 112 |
+
"""
|
| 113 |
+
width: int
|
| 114 |
+
height: int
|
| 115 |
+
tick: int = 0
|
| 116 |
+
status: SimulationStatus = SimulationStatus.IDLE
|
| 117 |
+
|
| 118 |
+
# Grid cells
|
| 119 |
+
grid: list[list[Cell]] = field(default_factory=list)
|
| 120 |
+
|
| 121 |
+
# Units on the field
|
| 122 |
+
units: list[Unit] = field(default_factory=list)
|
| 123 |
+
|
| 124 |
+
# Recent events for logging
|
| 125 |
+
recent_events: list[Event] = field(default_factory=list)
|
| 126 |
+
|
| 127 |
+
# Global metrics
|
| 128 |
+
building_integrity: float = 1.0 # Average building health (0.0 to 1.0)
|
| 129 |
+
forest_burn_ratio: float = 0.0 # Percentage of forest burned (0.0 to 1.0)
|
| 130 |
+
|
| 131 |
+
# Configuration
|
| 132 |
+
max_ticks: int = 200
|
| 133 |
+
max_units: int = 10
|
| 134 |
+
seed: Optional[int] = None
|
| 135 |
+
|
| 136 |
+
# Building positions (for dynamic placement)
|
| 137 |
+
building_positions: list[tuple[int, int]] = field(default_factory=list)
|
| 138 |
+
|
| 139 |
+
# Unit ID counter
|
| 140 |
+
_unit_counter: int = 0
|
| 141 |
+
|
| 142 |
+
def initialize_grid(
|
| 143 |
+
self,
|
| 144 |
+
seed: Optional[int] = None,
|
| 145 |
+
fire_count: int = 4,
|
| 146 |
+
fire_intensity: float = 0.6,
|
| 147 |
+
building_count: int = 16
|
| 148 |
+
):
|
| 149 |
+
"""
|
| 150 |
+
Initialize the grid with terrain and initial fires.
|
| 151 |
+
|
| 152 |
+
Args:
|
| 153 |
+
seed: Random seed for reproducibility
|
| 154 |
+
fire_count: Number of initial fire points (1-25)
|
| 155 |
+
fire_intensity: Initial fire intensity (0.0-1.0)
|
| 156 |
+
building_count: Number of buildings to place (1-25)
|
| 157 |
+
"""
|
| 158 |
+
if seed is not None:
|
| 159 |
+
random.seed(seed)
|
| 160 |
+
self.seed = seed
|
| 161 |
+
|
| 162 |
+
# Clamp values to valid ranges
|
| 163 |
+
fire_count = max(1, min(25, fire_count))
|
| 164 |
+
fire_intensity = max(0.0, min(1.0, fire_intensity))
|
| 165 |
+
building_count = max(1, min(25, building_count))
|
| 166 |
+
|
| 167 |
+
# Generate random building positions (connected cluster)
|
| 168 |
+
self.building_positions = self._generate_building_positions(building_count)
|
| 169 |
+
building_set = set(self.building_positions)
|
| 170 |
+
|
| 171 |
+
self.grid = []
|
| 172 |
+
for y in range(self.height):
|
| 173 |
+
row = []
|
| 174 |
+
for x in range(self.width):
|
| 175 |
+
# Default to forest
|
| 176 |
+
cell_type = CellType.FOREST
|
| 177 |
+
|
| 178 |
+
# Place buildings at generated positions
|
| 179 |
+
if (x, y) in building_set:
|
| 180 |
+
cell_type = CellType.BUILDING
|
| 181 |
+
|
| 182 |
+
row.append(Cell(x=x, y=y, cell_type=cell_type))
|
| 183 |
+
self.grid.append(row)
|
| 184 |
+
|
| 185 |
+
# Place initial fires
|
| 186 |
+
fires_placed = 0
|
| 187 |
+
attempts = 0
|
| 188 |
+
max_attempts = 100
|
| 189 |
+
|
| 190 |
+
while fires_placed < fire_count and attempts < max_attempts:
|
| 191 |
+
x = random.randint(0, self.width - 1)
|
| 192 |
+
y = random.randint(0, self.height - 1)
|
| 193 |
+
|
| 194 |
+
# Only place fire on forest initially (not buildings)
|
| 195 |
+
cell = self.grid[y][x]
|
| 196 |
+
if cell.cell_type == CellType.FOREST and cell.fire_intensity == 0:
|
| 197 |
+
cell.fire_intensity = fire_intensity
|
| 198 |
+
fires_placed += 1
|
| 199 |
+
|
| 200 |
+
attempts += 1
|
| 201 |
+
|
| 202 |
+
def _generate_building_positions(self, count: int) -> list[tuple[int, int]]:
|
| 203 |
+
"""
|
| 204 |
+
Generate random building positions ensuring connectivity.
|
| 205 |
+
|
| 206 |
+
Buildings grow as a connected cluster from a random starting point.
|
| 207 |
+
At least 2 buildings will be adjacent (if count >= 2).
|
| 208 |
+
"""
|
| 209 |
+
if count <= 0:
|
| 210 |
+
return []
|
| 211 |
+
|
| 212 |
+
positions = []
|
| 213 |
+
|
| 214 |
+
# Start with a random position (avoid edges for better growth)
|
| 215 |
+
start_x = random.randint(2, self.width - 3)
|
| 216 |
+
start_y = random.randint(2, self.height - 3)
|
| 217 |
+
positions.append((start_x, start_y))
|
| 218 |
+
|
| 219 |
+
if count == 1:
|
| 220 |
+
return positions
|
| 221 |
+
|
| 222 |
+
# Grow the cluster by adding adjacent cells
|
| 223 |
+
directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # 4-directional adjacency
|
| 224 |
+
|
| 225 |
+
while len(positions) < count:
|
| 226 |
+
# Get all possible adjacent positions to existing buildings
|
| 227 |
+
candidates = set()
|
| 228 |
+
for (px, py) in positions:
|
| 229 |
+
for dx, dy in directions:
|
| 230 |
+
nx, ny = px + dx, py + dy
|
| 231 |
+
# Check bounds and not already a building
|
| 232 |
+
if 0 <= nx < self.width and 0 <= ny < self.height:
|
| 233 |
+
if (nx, ny) not in positions:
|
| 234 |
+
candidates.add((nx, ny))
|
| 235 |
+
|
| 236 |
+
if not candidates:
|
| 237 |
+
# No more valid positions (unlikely but handle it)
|
| 238 |
+
break
|
| 239 |
+
|
| 240 |
+
# Randomly select one candidate
|
| 241 |
+
new_pos = random.choice(list(candidates))
|
| 242 |
+
positions.append(new_pos)
|
| 243 |
+
|
| 244 |
+
return positions
|
| 245 |
+
|
| 246 |
+
def get_cell(self, x: int, y: int) -> Optional[Cell]:
|
| 247 |
+
"""Get cell at position, returns None if out of bounds."""
|
| 248 |
+
if 0 <= x < self.width and 0 <= y < self.height:
|
| 249 |
+
return self.grid[y][x]
|
| 250 |
+
return None
|
| 251 |
+
|
| 252 |
+
def get_fires(self) -> list[Fire]:
|
| 253 |
+
"""Get list of all active fires."""
|
| 254 |
+
fires = []
|
| 255 |
+
for row in self.grid:
|
| 256 |
+
for cell in row:
|
| 257 |
+
if cell.is_on_fire():
|
| 258 |
+
fires.append(Fire(x=cell.x, y=cell.y, intensity=cell.fire_intensity))
|
| 259 |
+
return fires
|
| 260 |
+
|
| 261 |
+
def generate_unit_id(self, unit_type: UnitType) -> str:
|
| 262 |
+
"""Generate a unique unit ID."""
|
| 263 |
+
self._unit_counter += 1
|
| 264 |
+
prefix = "truck" if unit_type == UnitType.FIRE_TRUCK else "heli"
|
| 265 |
+
return f"{prefix}_{self._unit_counter}"
|
| 266 |
+
|
| 267 |
+
def add_unit(self, unit_type: UnitType, x: int, y: int, source: str) -> Optional[Unit]:
|
| 268 |
+
"""Add a new unit to the world. Returns None if limit reached or position invalid."""
|
| 269 |
+
if len(self.units) >= self.max_units:
|
| 270 |
+
return None
|
| 271 |
+
|
| 272 |
+
if not (0 <= x < self.width and 0 <= y < self.height):
|
| 273 |
+
return None
|
| 274 |
+
|
| 275 |
+
# Check cell conditions - cannot deploy on fire or buildings
|
| 276 |
+
cell = self.get_cell(x, y)
|
| 277 |
+
if cell is None:
|
| 278 |
+
return None
|
| 279 |
+
|
| 280 |
+
# Cannot deploy on burning cells
|
| 281 |
+
if cell.fire_intensity > 0:
|
| 282 |
+
return None
|
| 283 |
+
|
| 284 |
+
# Cannot deploy on buildings
|
| 285 |
+
if cell.cell_type == CellType.BUILDING:
|
| 286 |
+
return None
|
| 287 |
+
|
| 288 |
+
unit = Unit(
|
| 289 |
+
id=self.generate_unit_id(unit_type),
|
| 290 |
+
unit_type=unit_type,
|
| 291 |
+
owner="player",
|
| 292 |
+
x=x,
|
| 293 |
+
y=y
|
| 294 |
+
)
|
| 295 |
+
self.units.append(unit)
|
| 296 |
+
|
| 297 |
+
# Record event
|
| 298 |
+
self.recent_events.append(Event(
|
| 299 |
+
tick=self.tick,
|
| 300 |
+
event_type="deploy_unit",
|
| 301 |
+
details={
|
| 302 |
+
"by": source,
|
| 303 |
+
"unit_type": unit_type.value,
|
| 304 |
+
"x": x,
|
| 305 |
+
"y": y
|
| 306 |
+
}
|
| 307 |
+
))
|
| 308 |
+
|
| 309 |
+
# Keep only recent events (last 20)
|
| 310 |
+
if len(self.recent_events) > 20:
|
| 311 |
+
self.recent_events = self.recent_events[-20:]
|
| 312 |
+
|
| 313 |
+
return unit
|
| 314 |
+
|
| 315 |
+
def calculate_metrics(self):
|
| 316 |
+
"""Recalculate global metrics (building damage ratio)."""
|
| 317 |
+
total_buildings = 0
|
| 318 |
+
burning_buildings = 0
|
| 319 |
+
|
| 320 |
+
for row in self.grid:
|
| 321 |
+
for cell in row:
|
| 322 |
+
if cell.cell_type == CellType.BUILDING:
|
| 323 |
+
total_buildings += 1
|
| 324 |
+
# Building is burning if it has fire on it
|
| 325 |
+
if cell.fire_intensity > 0:
|
| 326 |
+
burning_buildings += 1
|
| 327 |
+
|
| 328 |
+
# Building integrity: ratio of non-burning buildings
|
| 329 |
+
if total_buildings > 0:
|
| 330 |
+
self.building_integrity = (total_buildings - burning_buildings) / total_buildings
|
| 331 |
+
else:
|
| 332 |
+
self.building_integrity = 1.0
|
| 333 |
+
|
| 334 |
+
# Store total buildings for reference
|
| 335 |
+
self._total_buildings = total_buildings
|
| 336 |
+
self._burning_buildings = burning_buildings
|
| 337 |
+
|
| 338 |
+
# Forest burn ratio is no longer used (replaced by active fires count)
|
| 339 |
+
self.forest_burn_ratio = 0.0
|
| 340 |
+
|
| 341 |
+
def to_dict(self) -> dict:
|
| 342 |
+
"""Serialize world state to dictionary."""
|
| 343 |
+
return {
|
| 344 |
+
"tick": self.tick,
|
| 345 |
+
"status": self.status.value,
|
| 346 |
+
"width": self.width,
|
| 347 |
+
"height": self.height,
|
| 348 |
+
"fires": [f.to_dict() for f in self.get_fires()],
|
| 349 |
+
"units": [u.to_dict() for u in self.units],
|
| 350 |
+
"building_integrity": round(self.building_integrity, 2),
|
| 351 |
+
"forest_burn_ratio": round(self.forest_burn_ratio, 2),
|
| 352 |
+
"recent_events": [e.to_dict() for e in self.recent_events[-5:]],
|
| 353 |
+
"buildings": [{"x": x, "y": y} for x, y in self.building_positions],
|
| 354 |
+
"max_units": self.max_units,
|
| 355 |
+
}
|
| 356 |
+
|
prompts.yaml
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fire-Rescue AI Agent Prompts Configuration
|
| 2 |
+
# Multi-stage analysis: ASSESS → PLAN → EXECUTE
|
| 3 |
+
#
|
| 4 |
+
# MCP Tools Available for Analysis:
|
| 5 |
+
# - analyze_coverage(): Get comprehensive tactical analysis
|
| 6 |
+
# - find_idle_units(): Find units not covering any fires
|
| 7 |
+
# - find_uncovered_fires(): Find fires with no unit coverage
|
| 8 |
+
# - find_building_threats(): Find fires threatening buildings (within 2 cells)
|
| 9 |
+
# - move_unit(source_x, source_y, target_x, target_y): Reposition a unit
|
| 10 |
+
# - deploy_unit(unit_type, x, y): Deploy new unit
|
| 11 |
+
# - remove_unit(x, y): Remove a unit (then deploy_unit elsewhere)
|
| 12 |
+
|
| 13 |
+
assess:
|
| 14 |
+
system: |
|
| 15 |
+
You are a SMART fire rescue tactical analyst. ASSESS the battlefield CAREFULLY!
|
| 16 |
+
|
| 17 |
+
🎯 WIN/LOSE CONDITIONS (CRITICAL):
|
| 18 |
+
- ❌ LOSE: Building integrity drops below 50%
|
| 19 |
+
- ✅ WIN: ALL fires extinguished
|
| 20 |
+
|
| 21 |
+
⚠️ KEY PRINCIPLE: SMART DEPLOYMENT!
|
| 22 |
+
- Analyze the situation FIRST before deciding how many units to deploy
|
| 23 |
+
- Don't deploy more than necessary - be efficient!
|
| 24 |
+
- BUT ensure all building threats and critical fires ARE covered!
|
| 25 |
+
- Balance: enough units to control the situation, not wasteful overkill
|
| 26 |
+
|
| 27 |
+
📡 MCP TOOLS AVAILABLE:
|
| 28 |
+
- `find_building_threats()` → 🏢 HIGHEST PRIORITY! Fires within 2 cells of buildings
|
| 29 |
+
- `find_uncovered_fires()` → Fires with NO unit coverage
|
| 30 |
+
- `find_idle_units()` → Units NOT covering fires (REPOSITION these first!)
|
| 31 |
+
- `analyze_coverage()` → Comprehensive tactical analysis
|
| 32 |
+
|
| 33 |
+
CRITICAL ANALYSIS PRIORITIES (IN ORDER):
|
| 34 |
+
|
| 35 |
+
1. **🏢 BUILDINGS FIRST - ALWAYS! (HIGHEST PRIORITY)**
|
| 36 |
+
- Use `find_building_threats()` to identify fires near buildings
|
| 37 |
+
- Building-adjacent fires cause DIRECT damage → triggers GAME OVER!
|
| 38 |
+
- ANY fire within 2 cells of a building = EMERGENCY
|
| 39 |
+
- These MUST be covered - non-negotiable!
|
| 40 |
+
|
| 41 |
+
2. **🔥 FIRE COVERAGE - SMART ANALYSIS**
|
| 42 |
+
- Use `find_uncovered_fires()` to find fires needing units
|
| 43 |
+
- Count how many fires are uncovered vs how many units we have
|
| 44 |
+
- Consider: Can we reposition idle units FIRST before deploying new?
|
| 45 |
+
- Fire Truck: Covers 1 tile outward from its center (all 8 neighbors), power 40% — BEST for building threats & high intensity!
|
| 46 |
+
- Helicopter: Covers 2 tiles outward from its center, power 25% — great for area coverage & HARD-TO-REACH fires!
|
| 47 |
+
|
| 48 |
+
🚁 **HELICOPTER ADVANTAGE - USE WHEN:**
|
| 49 |
+
- Fire is SURROUNDED by buildings/obstacles (trucks can't reach center)
|
| 50 |
+
- Fire persists despite multiple trucks nearby (need longer range)
|
| 51 |
+
- Fire is in a confined area where trucks can't get close enough
|
| 52 |
+
- A helicopter covering 2 tiles outward can reach fires that trucks (1-tile reach) cannot!
|
| 53 |
+
|
| 54 |
+
3. **🔄 UNIT OPTIMIZATION (DO THIS FIRST!)**
|
| 55 |
+
- Use `find_idle_units()` to find wasted units
|
| 56 |
+
- IDLE units should be repositioned BEFORE deploying new units
|
| 57 |
+
- This is FREE optimization - doesn't use deployment slots!
|
| 58 |
+
|
| 59 |
+
4. **Threat Level Determination**
|
| 60 |
+
- CRITICAL: ANY uncovered building threat OR building integrity <60%
|
| 61 |
+
- HIGH: 2+ building threats OR building integrity <80%
|
| 62 |
+
- MODERATE: 3+ uncovered fires OR any high-intensity fire
|
| 63 |
+
- LOW: All fires covered AND buildings safe
|
| 64 |
+
|
| 65 |
+
You MUST respond in valid JSON format only.
|
| 66 |
+
|
| 67 |
+
output_format: |
|
| 68 |
+
{
|
| 69 |
+
"fire_analysis": {
|
| 70 |
+
"total_fires": <number>,
|
| 71 |
+
"high_intensity_count": <number>,
|
| 72 |
+
"high_intensity_positions": [[x,y], ...],
|
| 73 |
+
"building_threat_count": <number>,
|
| 74 |
+
"building_threat_positions": [[x,y], ...],
|
| 75 |
+
"uncovered_fire_count": <number>,
|
| 76 |
+
"uncovered_fire_positions": [[x,y], ...]
|
| 77 |
+
},
|
| 78 |
+
"unit_analysis": {
|
| 79 |
+
"total_units": <number>,
|
| 80 |
+
"effective_count": <number>,
|
| 81 |
+
"ineffective_count": <number>,
|
| 82 |
+
"ineffective_positions": [[x,y,type], ...],
|
| 83 |
+
"coverage_ratio": <0.0-1.0>
|
| 84 |
+
},
|
| 85 |
+
"threat_level": "CRITICAL" | "HIGH" | "MODERATE" | "LOW",
|
| 86 |
+
"summary": "Brief 1-2 sentence assessment"
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
plan:
|
| 90 |
+
system: |
|
| 91 |
+
You are a SMART fire rescue tactical planner. Analyze the situation and deploy EFFICIENTLY!
|
| 92 |
+
|
| 93 |
+
🎯 WIN/LOSE CONDITIONS:
|
| 94 |
+
- ❌ LOSE: Building integrity drops below 50%
|
| 95 |
+
- ✅ WIN: ALL fires extinguished
|
| 96 |
+
|
| 97 |
+
⚠️ SMART DEPLOYMENT PRINCIPLE:
|
| 98 |
+
- Analyze the battlefield FIRST, then decide how many units to deploy
|
| 99 |
+
- DON'T deploy more units than necessary - be efficient!
|
| 100 |
+
- BUT don't be too conservative either - cover all threats!
|
| 101 |
+
- Key question: "How many units do I NEED to control this situation?"
|
| 102 |
+
- MASS FIRE RULE: If uncovered fires >=4 or the board is lighting up fast, DEFAULT to deploying extra units immediately (fill available slots if needed!) before the fire spreads further.
|
| 103 |
+
|
| 104 |
+
📡 MCP TOOLS:
|
| 105 |
+
- `find_building_threats()` → 🏢 HIGHEST PRIORITY! Must cover these!
|
| 106 |
+
- `find_uncovered_fires()` → Fires needing coverage
|
| 107 |
+
- `find_idle_units()` → Units to reposition first
|
| 108 |
+
- `analyze_coverage()` → Full tactical view
|
| 109 |
+
|
| 110 |
+
STRATEGIC PRIORITIES (STRICT ORDER):
|
| 111 |
+
|
| 112 |
+
1. **🏢 BUILDINGS = SURVIVAL (ALWAYS FIRST!)**
|
| 113 |
+
- Building threats = EXISTENTIAL DANGER
|
| 114 |
+
- Cover ALL building-adjacent fires BEFORE anything else!
|
| 115 |
+
- Always deploy or reposition units hugging these burning buildings (adjacent or the closest legal tile) before touching any other fire.
|
| 116 |
+
- Use Fire Trucks for building threats (40% power = faster extinguish)
|
| 117 |
+
|
| 118 |
+
2. **🔄 REPOSITION IDLE UNITS FIRST**
|
| 119 |
+
- Idle units are WASTED resources - use them!
|
| 120 |
+
- Move EVERY idle unit to stand 1 cell away from an active fire (prioritize building threats!)
|
| 121 |
+
- Never leave a unit sitting on an empty cell with no nearby fire.
|
| 122 |
+
- If idle_units > 0 AND uncovered_fires > 0 → REPOSITION!
|
| 123 |
+
- If idle_units > uncovered_fires, immediately REMOVE the surplus units (`remove_unit`) so their slots can be redeployed efficiently.
|
| 124 |
+
- If a unit truly has no reachable fire, REMOVE it (`remove_unit`) so the slot can be redeployed immediately.
|
| 125 |
+
|
| 126 |
+
3. **🚀 DEPLOY NEW UNITS - SMART CALCULATION**
|
| 127 |
+
- DON'T just deploy all available slots blindly!
|
| 128 |
+
- Calculate: how many NEW units do we ACTUALLY need?
|
| 129 |
+
- Consider: fires will spread, so plan ahead a bit
|
| 130 |
+
- If there are MANY fires (>=6) or multiple clusters, PRIORITIZE rapid deployments over slow repositioning—getting more units on the board fast is the safest play.
|
| 131 |
+
|
| 132 |
+
🔧 ADDITIONAL STRATEGIC GUIDELINES:
|
| 133 |
+
- Maintain a balanced distribution of fire trucks across the grid. Do NOT cluster everything in one sector; ensure each quadrant can cover new hotspots quickly.
|
| 134 |
+
- Re-evaluate the need for helicopters each cycle. Keep at least one deployment slot available for rapid aerial response when intensity spikes.
|
| 135 |
+
- Preserve a small reserve of idle (but ready) units so you can react instantly to unexpected ignitions. Never exhaust every slot unless absolutely necessary.
|
| 136 |
+
- Set up continuous monitoring for fires emerging outside current coverage. The moment a new fire appears beyond your protected zone, queue a reposition or deployment to cover it immediately.
|
| 137 |
+
|
| 138 |
+
SMART DEPLOY COUNT CALCULATION:
|
| 139 |
+
```
|
| 140 |
+
# Step 1: Calculate base need
|
| 141 |
+
uncovered_count = find_uncovered_fires().uncovered_count
|
| 142 |
+
building_threats = find_building_threats().uncovered_threats
|
| 143 |
+
|
| 144 |
+
# Step 2: Account for idle units we can reposition
|
| 145 |
+
idle_count = find_idle_units().idle_count
|
| 146 |
+
fires_after_reposition = max(0, uncovered_count - idle_count)
|
| 147 |
+
|
| 148 |
+
# Step 3: Calculate deploy count based on situation
|
| 149 |
+
IF building_threats > 0:
|
| 150 |
+
# Building emergency! Deploy enough to cover ALL building threats
|
| 151 |
+
deploy_count = max(building_threats, fires_after_reposition)
|
| 152 |
+
ELIF uncovered_count <= 2:
|
| 153 |
+
# Few fires - deploy just enough to cover them
|
| 154 |
+
deploy_count = fires_after_reposition
|
| 155 |
+
ELIF uncovered_count <= 5:
|
| 156 |
+
# Moderate fires - deploy to cover + 1 buffer
|
| 157 |
+
deploy_count = min(fires_after_reposition + 1, available_slots)
|
| 158 |
+
ELSE:
|
| 159 |
+
# Many fires - go aggressive and add extra units
|
| 160 |
+
deploy_count = fires_after_reposition + 2 # base coverage
|
| 161 |
+
if uncovered_count >= 6:
|
| 162 |
+
deploy_count += 1 # extra buffer for surging fires
|
| 163 |
+
if uncovered_count >= 8:
|
| 164 |
+
deploy_count = available_slots # fill every slot for maximum suppression
|
| 165 |
+
|
| 166 |
+
# Final: never exceed available slots
|
| 167 |
+
deploy_count = min(deploy_count, available_slots)
|
| 168 |
+
```
|
| 169 |
+
|
| 170 |
+
Strategy Selection:
|
| 171 |
+
- "balanced": Both reposition idle AND deploy new units
|
| 172 |
+
- "optimize_existing": Have idle units, reposition them first
|
| 173 |
+
- "deploy_new": No idle units, need to deploy new ones
|
| 174 |
+
- "monitor": ALL fires are covered - situation under control
|
| 175 |
+
- "helicopter_fallback": Fire persists despite truck coverage → USE HELICOPTER!
|
| 176 |
+
|
| 177 |
+
🚁 **HELICOPTER FALLBACK STRATEGY:**
|
| 178 |
+
- If a fire has 2+ trucks nearby but is STILL not being extinguished → USE HELICOPTER!
|
| 179 |
+
- If fire is SURROUNDED by buildings/obstacles → Trucks can't reach center → USE HELICOPTER!
|
| 180 |
+
- A helicopter covering 2 tiles outward can reach fires in confined areas that trucks (1-tile reach) cannot
|
| 181 |
+
- Don't keep deploying trucks if they're not working - REMOVE ineffective trucks, DEPLOY helicopters!
|
| 182 |
+
- Use remove_unit(x, y) to remove ineffective truck, then deploy_unit("helicopter", x, y) at better position
|
| 183 |
+
- Free every idle unit slot ASAP: REMOVE anything that cannot be placed adjacent to an active fire.
|
| 184 |
+
|
| 185 |
+
POSITIONING MANDATE (APPLIES TO ALL ACTIONS):
|
| 186 |
+
- Always choose the closest possible legal tile to the target fire, ideally immediately adjacent.
|
| 187 |
+
- Fires threatening buildings must have units hugging them before any non-building fires receive coverage.
|
| 188 |
+
|
| 189 |
+
You MUST respond in valid JSON format only.
|
| 190 |
+
|
| 191 |
+
output_format: |
|
| 192 |
+
{
|
| 193 |
+
"strategy": "optimize_existing" | "balanced" | "deploy_new" | "monitor" | "helicopter_fallback",
|
| 194 |
+
"reasoning": "Explain why this strategy - mention MCP tool results (idle units, uncovered fires)!",
|
| 195 |
+
"deploy_count": <number>,
|
| 196 |
+
"reposition_needed": true | false,
|
| 197 |
+
"units_to_reposition": [[source_x, source_y, unit_type], ...],
|
| 198 |
+
"priority_fire_indices": [0, 1, 2],
|
| 199 |
+
"use_helicopter": true | false,
|
| 200 |
+
"helicopter_reason": "Why helicopter is needed (e.g., fire surrounded, trucks ineffective)"
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
execute:
|
| 204 |
+
system: |
|
| 205 |
+
You are a SMART fire rescue executor. Generate EFFICIENT deployment actions!
|
| 206 |
+
|
| 207 |
+
🎯 WIN/LOSE CONDITIONS:
|
| 208 |
+
- ❌ LOSE: Building integrity drops below 50%
|
| 209 |
+
- ✅ WIN: ALL fires extinguished
|
| 210 |
+
|
| 211 |
+
⚠️ SMART EXECUTION PRINCIPLE:
|
| 212 |
+
- Follow the plan's deploy_count - don't exceed it unless building emergency!
|
| 213 |
+
- Prioritize building threats ALWAYS - these are non-negotiable
|
| 214 |
+
- Generate actions based on ACTUAL NEED, not maximum possible
|
| 215 |
+
- Quality over quantity - each action should have clear purpose
|
| 216 |
+
|
| 217 |
+
📡 MCP TOOLS:
|
| 218 |
+
- `move_unit(source_x, source_y, target_x, target_y)` → Reposition unit
|
| 219 |
+
- `deploy_unit(unit_type, x, y)` → Deploy new unit
|
| 220 |
+
- `remove_unit(x, y)` → Remove a unit (frees deployment slot)
|
| 221 |
+
- `find_idle_units()` → Get idle unit positions
|
| 222 |
+
- `find_uncovered_fires()` → Get fires needing coverage
|
| 223 |
+
- `find_building_threats()` → Get building-adjacent fires (HIGHEST PRIORITY!)
|
| 224 |
+
|
| 225 |
+
EXECUTION PRIORITIES (STRICT ORDER):
|
| 226 |
+
|
| 227 |
+
1. **🏢 BUILDINGS FIRST - ALWAYS! (NON-NEGOTIABLE)**
|
| 228 |
+
- Use `find_building_threats()` to get fires near buildings
|
| 229 |
+
- These fires cause DIRECT building damage → GAME OVER risk!
|
| 230 |
+
- ALWAYS address building threats BEFORE other fires
|
| 231 |
+
- Use fire_truck for building threats (40% power = fastest!) and park them as close as legally possible (adjacent preferred)
|
| 232 |
+
|
| 233 |
+
2. **🔄 REPOSITION IDLE UNITS FIRST**
|
| 234 |
+
- Use `find_idle_units()` to find wasted units
|
| 235 |
+
- Move idle units to building threats FIRST, then uncovered fires, always ending adjacent (1 cell away) to the target fire
|
| 236 |
+
- NEVER park a unit on a cell without a nearby fire; either move it or remove it
|
| 237 |
+
- If idle_units > uncovered_fires, immediately remove the excess idle units so the board only holds active responders
|
| 238 |
+
- This is FREE optimization - doesn't use deployment slots!
|
| 239 |
+
|
| 240 |
+
3. **🚀 DEPLOY NEW UNITS (FOLLOW THE PLAN)**
|
| 241 |
+
- Deploy according to plan's deploy_count
|
| 242 |
+
- Prioritize: building threats > high intensity > uncovered fires
|
| 243 |
+
- Only exceed deploy_count if there are UNCOVERED building threats!
|
| 244 |
+
- When picking target tiles, always choose the closest available cell hugging the fire; do not leave gaps if adjacency is possible
|
| 245 |
+
|
| 246 |
+
🔧 EXECUTION SAFETY CHECKS:
|
| 247 |
+
- After every action, verify trucks remain spread across the map; if a region is underserved, schedule a reposition to rebalance coverage.
|
| 248 |
+
- Keep at least one deployment slot or removable unit available for helicopter insertion. Before issuing the last deployment, confirm no aerial response will be needed in the next tick.
|
| 249 |
+
- Maintain a micro-reserve of idle-but-ready units; mark them in reasoning so the planner knows they are intentional rapid-response assets.
|
| 250 |
+
- Continuously poll for fires outside the current coverage bubble. If any appear, immediately include a move or deploy action in the recommendation list to contain it.
|
| 251 |
+
|
| 252 |
+
UNIT SELECTION:
|
| 253 |
+
- Fire Truck 🚒: Covers 1 tile outward from its center, Power 40% - USE FOR BUILDING THREATS & high intensity!
|
| 254 |
+
- Helicopter 🚁: Covers 2 tiles outward from its center, Power 25% - good for area coverage & distant fires
|
| 255 |
+
|
| 256 |
+
🚁 **CRITICAL: WHEN TO USE HELICOPTER INSTEAD OF TRUCK:**
|
| 257 |
+
- Fire SURROUNDED by buildings/units → Trucks can't reach center! USE HELICOPTER!
|
| 258 |
+
- Fire PERSISTS despite multiple nearby trucks → Need helicopter's longer range!
|
| 259 |
+
- Fire in CONFINED area where trucks are blocked → Helicopter can reach from farther
|
| 260 |
+
- If same fire remains uncovered after 2+ ticks with trucks nearby → SWITCH TO HELICOPTER!
|
| 261 |
+
- Covering 2 tiles outward (helicopter) beats 1 tile (truck) = use heli when trucks can't reach!
|
| 262 |
+
- If a truck can't reach any fire, remove it immediately instead of leaving it idle on the board
|
| 263 |
+
|
| 264 |
+
RECOMMENDATION COUNT LOGIC:
|
| 265 |
+
- recommendations = reposition_count + deploy_count
|
| 266 |
+
- If uncovered building threats exist: add extra for EACH uncovered threat
|
| 267 |
+
- Cap at 4 for UI display
|
| 268 |
+
|
| 269 |
+
CRITICAL RULES:
|
| 270 |
+
1. Deploy 1-2 cells ADJACENT to fire (not ON the fire)
|
| 271 |
+
2. Cannot deploy ON buildings
|
| 272 |
+
3. Grid boundaries: x=[0, width-1], y=[0, height-1]
|
| 273 |
+
4. Each action should have a clear reason tied to a specific fire
|
| 274 |
+
5. Any idle unit left on an empty cell must be removed so the slot can be redeployed near fire, especially when idle > fires
|
| 275 |
+
|
| 276 |
+
MCP TOOL USAGE:
|
| 277 |
+
```python
|
| 278 |
+
# Move idle unit to cover building threat
|
| 279 |
+
move_unit(source_x=2, source_y=3, target_x=6, target_y=5)
|
| 280 |
+
|
| 281 |
+
# Deploy fire truck near building threat (PRIORITY!)
|
| 282 |
+
deploy_unit(unit_type="fire_truck", x=5, y=3)
|
| 283 |
+
|
| 284 |
+
# Remove ineffective truck, then deploy helicopter at better position
|
| 285 |
+
remove_unit(x=4, y=5) # Remove truck that can't reach fire
|
| 286 |
+
deploy_unit(unit_type="helicopter", x=6, y=7) # Deploy helicopter with better range
|
| 287 |
+
```
|
| 288 |
+
|
| 289 |
+
You MUST respond in valid JSON format only.
|
| 290 |
+
|
| 291 |
+
output_format: |
|
| 292 |
+
{
|
| 293 |
+
"recommendations": [
|
| 294 |
+
{
|
| 295 |
+
"action": "move",
|
| 296 |
+
"unit_type": "fire_truck" | "helicopter",
|
| 297 |
+
"source": {"x": <current_x>, "y": <current_y>},
|
| 298 |
+
"target": {"x": <new_x>, "y": <new_y>},
|
| 299 |
+
"reason": "Moving idle unit to cover uncovered fire at (fx, fy)",
|
| 300 |
+
"mcp_call": "move_unit(source_x=<x>, source_y=<y>, target_x=<x>, target_y=<y>)"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"action": "deploy",
|
| 304 |
+
"unit_type": "fire_truck" | "helicopter",
|
| 305 |
+
"target": {"x": <number>, "y": <number>},
|
| 306 |
+
"reason": "Deploying new unit to cover fire at (fx, fy)",
|
| 307 |
+
"mcp_call": "deploy_unit(unit_type='<type>', x=<x>, y=<y>)"
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"action": "remove",
|
| 311 |
+
"position": {"x": <number>, "y": <number>},
|
| 312 |
+
"unit_type": "fire_truck" | "helicopter",
|
| 313 |
+
"reason": "Removing ineffective truck to redeploy as helicopter elsewhere",
|
| 314 |
+
"mcp_call": "remove_unit(x=<x>, y=<y>)"
|
| 315 |
+
}
|
| 316 |
+
],
|
| 317 |
+
"summary": "Brief tactical summary"
|
| 318 |
+
}
|
| 319 |
+
|
| 320 |
+
summary:
|
| 321 |
+
system: |
|
| 322 |
+
You are a tactical mission summarizer. After the AI completes Assessment → Planning → Execution, create a concise Stage 4 summary for this cycle.
|
| 323 |
+
|
| 324 |
+
Use the provided data:
|
| 325 |
+
- Tick number and mission status
|
| 326 |
+
- Stage 1 assessment metrics (fires, building threats, integrity, idle units)
|
| 327 |
+
- Stage 2 plan strategy and reasoning
|
| 328 |
+
- Stage 3 execution recommendations (moves/deployments/removals)
|
| 329 |
+
|
| 330 |
+
Requirements:
|
| 331 |
+
1. Produce a single headline that captures the cycle outcome and threat posture.
|
| 332 |
+
2. List 2-4 key highlights (what worked, positive outcomes, metrics that improved).
|
| 333 |
+
3. List 1-3 risks or issues that still need attention.
|
| 334 |
+
4. Suggest 1-3 next-focus items for the upcoming cycle (actionable guidance).
|
| 335 |
+
5. Keep bullet text concise (max 18 words each).
|
| 336 |
+
6. Output strictly in English JSON.
|
| 337 |
+
output_format: |
|
| 338 |
+
{
|
| 339 |
+
"headline": "Concise summary of the cycle outcome",
|
| 340 |
+
"threat_level": "CRITICAL | HIGH | MODERATE | LOW",
|
| 341 |
+
"key_highlights": ["Highlight 1", "Highlight 2"],
|
| 342 |
+
"risks": ["Risk 1", "Risk 2"],
|
| 343 |
+
"next_focus": ["Action item 1", "Action item 2"]
|
| 344 |
+
}
|
| 345 |
+
|
| 346 |
+
after_action:
|
| 347 |
+
system: |
|
| 348 |
+
You are the mission debrief analyst for the Fire-Rescue simulator.
|
| 349 |
+
Produce an AFTER-ACTION REPORT in English when the mission ends (victory or failure).
|
| 350 |
+
|
| 351 |
+
Context provided:
|
| 352 |
+
- Outcome summary (success/fail, integrity, remaining fires, ticks)
|
| 353 |
+
- Stage 4 cycle summaries (headlines, highlights, risks, next focus)
|
| 354 |
+
- Mission status snapshot (units deployed, tick count, integrity metrics)
|
| 355 |
+
|
| 356 |
+
Requirements:
|
| 357 |
+
1. Cite concrete evidence from the provided summaries and mission metrics for every bullet.
|
| 358 |
+
2. Summarize what went WELL (Strengths), what needs IMPROVEMENT, and ACTIONABLE follow-ups (Next Actions).
|
| 359 |
+
3. Each bullet should reference specific fires, units, or buildings when possible.
|
| 360 |
+
4. If any summary data is missing, acknowledge the gap but still deliver useful insights based on available data.
|
| 361 |
+
5. Respond entirely in English.
|
| 362 |
+
output_format: |
|
| 363 |
+
{
|
| 364 |
+
"summary": "One-sentence conclusion highlighting the decisive factor.",
|
| 365 |
+
"strengths": ["Strength 1", "Strength 2"],
|
| 366 |
+
"improvements": ["Improvement 1", "Improvement 2"],
|
| 367 |
+
"next_actions": ["Actionable recommendation 1", "Actionable recommendation 2"]
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
# Model configuration
|
| 371 |
+
# Using HuggingFace Inference Provider with openai/gpt-oss-120b
|
| 372 |
+
# This model has top-notch tool calling capabilities
|
| 373 |
+
model:
|
| 374 |
+
default: "openai/gpt-oss-120b"
|
| 375 |
+
# max_tokens for HuggingFace API (automatically converted from max_completion_tokens)
|
| 376 |
+
# Increased to 10000 to avoid truncation of complex JSON responses
|
| 377 |
+
max_completion_tokens: 10000
|
| 378 |
+
# Note: temperature not set - using model default
|
pyproject.toml
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[project]
|
| 2 |
+
name = "fire-rescue-mcp"
|
| 3 |
+
version = "0.2.0"
|
| 4 |
+
description = "Fire rescue simulation with MCP integration and LLM advisor"
|
| 5 |
+
readme = "README.md"
|
| 6 |
+
requires-python = ">=3.10"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"mcp>=1.0.0",
|
| 9 |
+
"openai>=1.0.0",
|
| 10 |
+
"pydantic>=2.0.0",
|
| 11 |
+
"python-dotenv>=1.0.0",
|
| 12 |
+
"gradio>=6.0.1",
|
| 13 |
+
"pyyaml>=6.0.0",
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
[project.scripts]
|
| 17 |
+
fire-rescue-mcp = "fire_rescue_mcp.mcp_server:run_server"
|
| 18 |
+
|
| 19 |
+
[build-system]
|
| 20 |
+
requires = ["hatchling"]
|
| 21 |
+
build-backend = "hatchling.build"
|
| 22 |
+
|
| 23 |
+
[tool.hatch.build.targets.wheel]
|
| 24 |
+
packages = ["fire_rescue_mcp"]
|
| 25 |
+
|
| 26 |
+
[tool.uv]
|
| 27 |
+
dev-dependencies = [
|
| 28 |
+
"pytest>=8.0.0",
|
| 29 |
+
"pytest-asyncio>=0.23.0",
|
| 30 |
+
]
|
| 31 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fire-Rescue MCP Dependencies
|
| 2 |
+
mcp>=1.0.0
|
| 3 |
+
openai>=1.0.0
|
| 4 |
+
pydantic>=2.0.0
|
| 5 |
+
python-dotenv>=1.0.0
|
| 6 |
+
gradio>=6.0.1
|
| 7 |
+
pyyaml>=6.0.0
|
| 8 |
+
|
restart.sh
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Fire-Rescue App Restart Script
|
| 3 |
+
|
| 4 |
+
echo "🔄 Stopping existing app..."
|
| 5 |
+
pkill -f "python app.py" 2>/dev/null
|
| 6 |
+
|
| 7 |
+
# Wait a moment for the process to terminate
|
| 8 |
+
sleep 1
|
| 9 |
+
|
| 10 |
+
echo "🚀 Starting Fire-Rescue App..."
|
| 11 |
+
cd "$(dirname "$0")" && uv run --active python app.py
|
| 12 |
+
|
service.py
ADDED
|
@@ -0,0 +1,1995 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue MCP - Simulation Service
|
| 3 |
+
|
| 4 |
+
Background service that manages simulation loop and LLM advisor evaluations.
|
| 5 |
+
Designed for integration with Gradio and HTTP API endpoints.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import asyncio
|
| 9 |
+
import concurrent.futures
|
| 10 |
+
import html
|
| 11 |
+
import json
|
| 12 |
+
import threading
|
| 13 |
+
import time
|
| 14 |
+
from dataclasses import dataclass, field
|
| 15 |
+
from datetime import datetime
|
| 16 |
+
from typing import Callable, Optional
|
| 17 |
+
|
| 18 |
+
from agent import (
|
| 19 |
+
AdvisorAgent,
|
| 20 |
+
AdvisorResponse,
|
| 21 |
+
AfterActionReport,
|
| 22 |
+
AssessmentResult,
|
| 23 |
+
PlanResult,
|
| 24 |
+
CycleSummary,
|
| 25 |
+
)
|
| 26 |
+
from models import SimulationStatus, CellType
|
| 27 |
+
from simulation import SimulationEngine
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def generate_emoji_map(engine: SimulationEngine) -> str:
|
| 31 |
+
"""
|
| 32 |
+
Generate an emoji-based visualization of the current world state.
|
| 33 |
+
Matches Gradio UI: 🌲Forest 🏢Building 🔥Fire 💨Smoke 🚒Truck 🚁Heli
|
| 34 |
+
"""
|
| 35 |
+
if engine.world is None:
|
| 36 |
+
return "No map available"
|
| 37 |
+
|
| 38 |
+
world = engine.world
|
| 39 |
+
|
| 40 |
+
# Create unit position lookup
|
| 41 |
+
unit_positions = {}
|
| 42 |
+
for unit in world.units:
|
| 43 |
+
key = (unit.x, unit.y)
|
| 44 |
+
if key not in unit_positions:
|
| 45 |
+
unit_positions[key] = []
|
| 46 |
+
unit_positions[key].append(unit.unit_type.value)
|
| 47 |
+
|
| 48 |
+
# Build the map with coordinates
|
| 49 |
+
lines = []
|
| 50 |
+
|
| 51 |
+
# Header with X coordinates
|
| 52 |
+
header = " " + "".join(f"{x:2}" for x in range(world.width))
|
| 53 |
+
lines.append(header)
|
| 54 |
+
|
| 55 |
+
for y in range(world.height):
|
| 56 |
+
row_chars = []
|
| 57 |
+
for x in range(world.width):
|
| 58 |
+
cell = world.grid[y][x]
|
| 59 |
+
pos = (x, y)
|
| 60 |
+
|
| 61 |
+
# Priority: Units > Fire > Terrain
|
| 62 |
+
if pos in unit_positions:
|
| 63 |
+
if "fire_truck" in unit_positions[pos]:
|
| 64 |
+
row_chars.append("🚒")
|
| 65 |
+
else:
|
| 66 |
+
row_chars.append("🚁")
|
| 67 |
+
elif cell.fire_intensity > 0:
|
| 68 |
+
if cell.fire_intensity >= 0.1:
|
| 69 |
+
row_chars.append("🔥")
|
| 70 |
+
else:
|
| 71 |
+
row_chars.append("💨")
|
| 72 |
+
else:
|
| 73 |
+
if cell.cell_type == CellType.BUILDING:
|
| 74 |
+
row_chars.append("🏢")
|
| 75 |
+
elif cell.cell_type == CellType.FOREST:
|
| 76 |
+
row_chars.append("🌲")
|
| 77 |
+
else:
|
| 78 |
+
row_chars.append("⬜")
|
| 79 |
+
|
| 80 |
+
lines.append(f"{y:2} " + "".join(row_chars))
|
| 81 |
+
|
| 82 |
+
return "\n".join(lines)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
@dataclass
|
| 86 |
+
class LogEntry:
|
| 87 |
+
"""A single log entry for the simulation."""
|
| 88 |
+
timestamp: str
|
| 89 |
+
tick: int
|
| 90 |
+
event_type: str # "advisor", "deploy", "status", "error"
|
| 91 |
+
message: str
|
| 92 |
+
details: Optional[dict] = None
|
| 93 |
+
|
| 94 |
+
def to_dict(self) -> dict:
|
| 95 |
+
return {
|
| 96 |
+
"timestamp": self.timestamp,
|
| 97 |
+
"tick": self.tick,
|
| 98 |
+
"event_type": self.event_type,
|
| 99 |
+
"message": self.message,
|
| 100 |
+
"details": self.details
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@dataclass
|
| 105 |
+
class SimulationService:
|
| 106 |
+
"""
|
| 107 |
+
Service that manages the simulation lifecycle and LLM advisor.
|
| 108 |
+
|
| 109 |
+
Provides:
|
| 110 |
+
- Background simulation loop
|
| 111 |
+
- Periodic LLM advisor evaluations
|
| 112 |
+
- Thread-safe state access
|
| 113 |
+
- Event logging
|
| 114 |
+
"""
|
| 115 |
+
|
| 116 |
+
# Configuration
|
| 117 |
+
tick_interval: float = 1.0 # Seconds between simulation ticks (slower pace)
|
| 118 |
+
advisor_interval: int = 10 # Ticks between advisor evaluations
|
| 119 |
+
|
| 120 |
+
# Internal state
|
| 121 |
+
engine: SimulationEngine = field(default_factory=SimulationEngine)
|
| 122 |
+
advisor: AdvisorAgent = field(default_factory=AdvisorAgent)
|
| 123 |
+
|
| 124 |
+
# Runtime state
|
| 125 |
+
_running: bool = False
|
| 126 |
+
_thread: Optional[threading.Thread] = None
|
| 127 |
+
_lock: threading.Lock = field(default_factory=threading.Lock)
|
| 128 |
+
|
| 129 |
+
# Logs and recommendations
|
| 130 |
+
_logs: list[LogEntry] = field(default_factory=list)
|
| 131 |
+
_latest_recommendations: Optional[AdvisorResponse] = None
|
| 132 |
+
_on_update: Optional[Callable] = None # Callback for UI updates
|
| 133 |
+
|
| 134 |
+
# Thinking state for UI display - Progressive stage rendering
|
| 135 |
+
_is_thinking: bool = False
|
| 136 |
+
_thinking_start_tick: int = 0
|
| 137 |
+
_current_stage: int = 0 # 0=idle, 1=assess, 2=plan, 3=execute, 4=summary, 5=complete
|
| 138 |
+
_current_cycle_messages: list = field(default_factory=list) # Messages for current cycle
|
| 139 |
+
|
| 140 |
+
# Advisor message history for chatbot display
|
| 141 |
+
_advisor_history: list[dict] = field(default_factory=list)
|
| 142 |
+
_cycle_summaries: list[dict] = field(default_factory=list)
|
| 143 |
+
_metrics_history: list[dict] = field(default_factory=list)
|
| 144 |
+
_threat_history: list[dict] = field(default_factory=list)
|
| 145 |
+
_action_history: list[dict] = field(default_factory=list)
|
| 146 |
+
_player_actions: list[dict] = field(default_factory=list)
|
| 147 |
+
|
| 148 |
+
# Advisor call control
|
| 149 |
+
_advisor_running: bool = False # Prevent concurrent advisor calls
|
| 150 |
+
advisor_timeout: float = 30.0 # Timeout for AI calls (seconds)
|
| 151 |
+
advisor_max_retries: int = 3 # Max retry attempts
|
| 152 |
+
|
| 153 |
+
# Game result popup control
|
| 154 |
+
_result_shown: bool = False # Track if game result popup has been shown
|
| 155 |
+
_result_dismissed: bool = False # Track if player dismissed the result popup
|
| 156 |
+
_result_report: Optional[AfterActionReport] = None
|
| 157 |
+
_result_report_status: str = "idle"
|
| 158 |
+
_result_report_error: str = ""
|
| 159 |
+
_last_result_signature: str = ""
|
| 160 |
+
_last_result_payload_signature: str = ""
|
| 161 |
+
|
| 162 |
+
# Auto-execute AI recommendations
|
| 163 |
+
_auto_execute: bool = True # Whether to automatically execute AI recommendations
|
| 164 |
+
_executed_recommendations: set = field(default_factory=set) # Track executed recommendations
|
| 165 |
+
|
| 166 |
+
# Simulation loop state (preserved across pause/resume)
|
| 167 |
+
_tick_count: int = 0 # Current tick count in simulation loop
|
| 168 |
+
_advisor_first_run: bool = True # Whether first advisor run has happened
|
| 169 |
+
|
| 170 |
+
# Change tracking for UI optimization (prevents unnecessary re-renders)
|
| 171 |
+
_last_grid_hash: str = "" # Hash of grid state (fires, units, buildings)
|
| 172 |
+
_last_advisor_signature: tuple = field(default_factory=tuple) # Last advisor chat signature
|
| 173 |
+
_last_history_signature: tuple = field(default_factory=tuple) # Last history chat signature
|
| 174 |
+
_last_event_log: str = "" # Last event log text
|
| 175 |
+
_last_button_states: tuple = () # Last (start_enabled, pause_enabled)
|
| 176 |
+
_last_result_state: str = "" # Last result popup state ("", "success", "fail")
|
| 177 |
+
|
| 178 |
+
def __post_init__(self):
|
| 179 |
+
self._lock = threading.Lock()
|
| 180 |
+
self._logs = []
|
| 181 |
+
self._result_shown = False
|
| 182 |
+
self._result_dismissed = False
|
| 183 |
+
self._reset_after_action_report_locked()
|
| 184 |
+
self._is_thinking = False
|
| 185 |
+
self._current_stage = 0 # Progressive stage tracking
|
| 186 |
+
self._current_cycle_messages = [] # Current cycle messages
|
| 187 |
+
self._advisor_history = []
|
| 188 |
+
self._cycle_summaries = []
|
| 189 |
+
self._metrics_history = []
|
| 190 |
+
self._threat_history = []
|
| 191 |
+
self._action_history = []
|
| 192 |
+
self._player_actions = []
|
| 193 |
+
self._advisor_running = False
|
| 194 |
+
self._auto_execute = True
|
| 195 |
+
self._executed_recommendations = set()
|
| 196 |
+
self._tick_count = 0
|
| 197 |
+
self._advisor_first_run = True
|
| 198 |
+
# Change tracking initialization
|
| 199 |
+
self._last_grid_hash = ""
|
| 200 |
+
self._last_advisor_signature = ()
|
| 201 |
+
self._last_history_signature = ()
|
| 202 |
+
self._last_event_log = ""
|
| 203 |
+
self._last_button_states = (True, False)
|
| 204 |
+
self._last_result_state = ""
|
| 205 |
+
|
| 206 |
+
def start(
|
| 207 |
+
self,
|
| 208 |
+
seed: Optional[int] = None,
|
| 209 |
+
fire_count: int = 4,
|
| 210 |
+
fire_intensity: float = 0.6,
|
| 211 |
+
building_count: int = 16,
|
| 212 |
+
max_units: int = 10,
|
| 213 |
+
on_update: Optional[Callable] = None
|
| 214 |
+
) -> dict:
|
| 215 |
+
"""
|
| 216 |
+
Start a new simulation.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
seed: Random seed for reproducibility
|
| 220 |
+
fire_count: Number of initial fire points (1-25)
|
| 221 |
+
fire_intensity: Initial fire intensity (0.0-1.0)
|
| 222 |
+
building_count: Number of buildings to place (1-25)
|
| 223 |
+
on_update: Callback function called on state changes
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
Initial world state
|
| 227 |
+
"""
|
| 228 |
+
# First stop any existing simulation
|
| 229 |
+
thread = None
|
| 230 |
+
with self._lock:
|
| 231 |
+
if self._running:
|
| 232 |
+
self._running = False
|
| 233 |
+
thread = self._thread
|
| 234 |
+
self._thread = None
|
| 235 |
+
|
| 236 |
+
# Wait for thread outside lock
|
| 237 |
+
if thread and thread.is_alive():
|
| 238 |
+
thread.join(timeout=2.0)
|
| 239 |
+
|
| 240 |
+
# Now start fresh with lock
|
| 241 |
+
with self._lock:
|
| 242 |
+
# Reset state
|
| 243 |
+
self._logs = []
|
| 244 |
+
self._latest_recommendations = None
|
| 245 |
+
self._advisor_history = [] # Clear advisor history
|
| 246 |
+
self._cycle_summaries = [] # Clear summaries
|
| 247 |
+
self._current_cycle_messages = [] # Clear current cycle
|
| 248 |
+
self._current_stage = 0 # Reset stage
|
| 249 |
+
self._is_thinking = False
|
| 250 |
+
self._result_shown = False # Reset result popup flag
|
| 251 |
+
self._result_dismissed = False # Reset dismissed flag
|
| 252 |
+
self._reset_after_action_report_locked()
|
| 253 |
+
self._executed_recommendations = set() # Clear executed recommendations
|
| 254 |
+
self._tick_count = 0 # Reset tick count
|
| 255 |
+
self._advisor_first_run = True # Reset first run flag
|
| 256 |
+
self._on_update = on_update
|
| 257 |
+
self._metrics_history = []
|
| 258 |
+
self._threat_history = []
|
| 259 |
+
self._action_history = []
|
| 260 |
+
self._player_actions = []
|
| 261 |
+
# Reset change tracking
|
| 262 |
+
self._last_grid_hash = ""
|
| 263 |
+
self._last_advisor_signature = ()
|
| 264 |
+
self._last_history_signature = ()
|
| 265 |
+
self._last_event_log = ""
|
| 266 |
+
self._last_button_states = (True, False)
|
| 267 |
+
self._last_result_state = ""
|
| 268 |
+
|
| 269 |
+
# Initialize simulation
|
| 270 |
+
self.engine.reset(
|
| 271 |
+
seed=seed,
|
| 272 |
+
fire_count=fire_count,
|
| 273 |
+
fire_intensity=fire_intensity,
|
| 274 |
+
building_count=building_count,
|
| 275 |
+
max_units=max_units
|
| 276 |
+
)
|
| 277 |
+
self._record_tick_metrics_locked(self.engine.get_state())
|
| 278 |
+
|
| 279 |
+
# Log start event
|
| 280 |
+
self._add_log("status", f"Simulation started: {fire_count} fires, {building_count} buildings, max {max_units} units")
|
| 281 |
+
|
| 282 |
+
# Start background loop
|
| 283 |
+
self._running = True
|
| 284 |
+
self._thread = threading.Thread(target=self._simulation_loop, daemon=True)
|
| 285 |
+
self._thread.start()
|
| 286 |
+
|
| 287 |
+
return self._compose_state_locked()
|
| 288 |
+
|
| 289 |
+
def resume(self, on_update: Optional[Callable] = None) -> dict:
|
| 290 |
+
"""
|
| 291 |
+
Resume a paused simulation.
|
| 292 |
+
|
| 293 |
+
Returns:
|
| 294 |
+
Current world state, or error if no paused simulation exists
|
| 295 |
+
"""
|
| 296 |
+
with self._lock:
|
| 297 |
+
# Check if there's a paused simulation to resume
|
| 298 |
+
if self.engine.world is None:
|
| 299 |
+
return {"status": "error", "message": "No simulation to resume"}
|
| 300 |
+
|
| 301 |
+
# Check if already running
|
| 302 |
+
if self._running:
|
| 303 |
+
return {"status": "error", "message": "Simulation is already running"}
|
| 304 |
+
|
| 305 |
+
# Check if simulation has ended
|
| 306 |
+
current_status = self.engine.world.status
|
| 307 |
+
if current_status in [SimulationStatus.SUCCESS, SimulationStatus.FAIL]:
|
| 308 |
+
return {"status": "error", "message": f"Simulation has ended ({current_status.value})"}
|
| 309 |
+
|
| 310 |
+
self._on_update = on_update
|
| 311 |
+
|
| 312 |
+
# Set status back to running
|
| 313 |
+
self.engine.world.status = SimulationStatus.RUNNING
|
| 314 |
+
|
| 315 |
+
# Log resume event
|
| 316 |
+
self._add_log("status", "Simulation resumed")
|
| 317 |
+
|
| 318 |
+
# Start background loop
|
| 319 |
+
self._running = True
|
| 320 |
+
self._thread = threading.Thread(target=self._simulation_loop, daemon=True)
|
| 321 |
+
self._thread.start()
|
| 322 |
+
|
| 323 |
+
return self._compose_state_locked()
|
| 324 |
+
|
| 325 |
+
def pause(self) -> dict:
|
| 326 |
+
"""Pause the simulation (can be resumed later)."""
|
| 327 |
+
# First set flag and get thread reference (with lock)
|
| 328 |
+
with self._lock:
|
| 329 |
+
self._running = False
|
| 330 |
+
thread = self._thread
|
| 331 |
+
self._thread = None
|
| 332 |
+
|
| 333 |
+
# Wait for thread outside lock to avoid deadlock
|
| 334 |
+
if thread and thread.is_alive():
|
| 335 |
+
thread.join(timeout=2.0)
|
| 336 |
+
|
| 337 |
+
with self._lock:
|
| 338 |
+
self._add_log("status", "Simulation paused")
|
| 339 |
+
# Keep status as RUNNING so we know it can be resumed
|
| 340 |
+
# (IDLE means no game, SUCCESS/FAIL means game ended)
|
| 341 |
+
if self.engine.world:
|
| 342 |
+
return self._compose_state_locked()
|
| 343 |
+
return {"status": "idle", "after_action_report": self._get_after_action_report_payload_locked()}
|
| 344 |
+
|
| 345 |
+
def is_paused(self) -> bool:
|
| 346 |
+
"""Check if simulation is paused (has world but not running)."""
|
| 347 |
+
with self._lock:
|
| 348 |
+
if self.engine.world is None:
|
| 349 |
+
return False
|
| 350 |
+
# Paused = has world, not running, and status is still RUNNING
|
| 351 |
+
return (
|
| 352 |
+
not self._running
|
| 353 |
+
and self.engine.world.status == SimulationStatus.RUNNING
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
def _stop_internal(self):
|
| 357 |
+
"""Internal stop - sets flag only (must be called with lock held)."""
|
| 358 |
+
self._running = False
|
| 359 |
+
|
| 360 |
+
def reset(
|
| 361 |
+
self,
|
| 362 |
+
seed: Optional[int] = None,
|
| 363 |
+
fire_count: int = 4,
|
| 364 |
+
fire_intensity: float = 0.6,
|
| 365 |
+
building_count: int = 16,
|
| 366 |
+
max_units: int = 10
|
| 367 |
+
) -> dict:
|
| 368 |
+
"""Reset simulation without starting the loop."""
|
| 369 |
+
# First stop any running simulation
|
| 370 |
+
thread = None
|
| 371 |
+
with self._lock:
|
| 372 |
+
if self._running:
|
| 373 |
+
self._running = False
|
| 374 |
+
thread = self._thread
|
| 375 |
+
self._thread = None
|
| 376 |
+
|
| 377 |
+
# Wait for thread outside lock to avoid deadlock
|
| 378 |
+
if thread and thread.is_alive():
|
| 379 |
+
thread.join(timeout=2.0)
|
| 380 |
+
|
| 381 |
+
# Now reset with lock
|
| 382 |
+
with self._lock:
|
| 383 |
+
self._logs = []
|
| 384 |
+
self._latest_recommendations = None
|
| 385 |
+
self._advisor_history = [] # Clear advisor history
|
| 386 |
+
self._cycle_summaries = [] # Clear summaries
|
| 387 |
+
self._current_cycle_messages = [] # Clear current cycle
|
| 388 |
+
self._current_stage = 0 # Reset stage
|
| 389 |
+
self._is_thinking = False
|
| 390 |
+
self._result_shown = False # Reset result popup flag
|
| 391 |
+
self._result_dismissed = False # Reset dismissed flag
|
| 392 |
+
self._reset_after_action_report_locked()
|
| 393 |
+
self._executed_recommendations = set() # Clear executed recommendations
|
| 394 |
+
self._tick_count = 0 # Reset tick count
|
| 395 |
+
self._advisor_first_run = True # Reset first run flag
|
| 396 |
+
self._metrics_history = []
|
| 397 |
+
self._threat_history = []
|
| 398 |
+
self._action_history = []
|
| 399 |
+
# Reset change tracking
|
| 400 |
+
self._last_grid_hash = ""
|
| 401 |
+
self._last_advisor_signature = ()
|
| 402 |
+
self._last_history_signature = ()
|
| 403 |
+
self._last_event_log = ""
|
| 404 |
+
self._last_button_states = (True, False)
|
| 405 |
+
self._last_result_state = ""
|
| 406 |
+
|
| 407 |
+
self.engine.reset(
|
| 408 |
+
seed=seed,
|
| 409 |
+
fire_count=fire_count,
|
| 410 |
+
fire_intensity=fire_intensity,
|
| 411 |
+
building_count=building_count,
|
| 412 |
+
max_units=max_units
|
| 413 |
+
)
|
| 414 |
+
self._record_tick_metrics_locked(self.engine.get_state())
|
| 415 |
+
self._add_log("status", f"Simulation reset: {fire_count} fires, {building_count} buildings, max {max_units} units")
|
| 416 |
+
|
| 417 |
+
return self._compose_state_locked()
|
| 418 |
+
|
| 419 |
+
def get_state(self) -> dict:
|
| 420 |
+
"""Get current world state (thread-safe)."""
|
| 421 |
+
with self._lock:
|
| 422 |
+
if self.engine.world is None:
|
| 423 |
+
return {
|
| 424 |
+
"status": "idle",
|
| 425 |
+
"message": "No simulation running",
|
| 426 |
+
"after_action_report": self._get_after_action_report_payload_locked(),
|
| 427 |
+
}
|
| 428 |
+
return self._compose_state_locked()
|
| 429 |
+
|
| 430 |
+
def _compose_state_locked(self) -> dict:
|
| 431 |
+
"""Attach after-action report payload to the current engine state."""
|
| 432 |
+
state = self.engine.get_state()
|
| 433 |
+
state["after_action_report"] = self._get_after_action_report_payload_locked()
|
| 434 |
+
return state
|
| 435 |
+
|
| 436 |
+
def _reset_after_action_report_locked(self):
|
| 437 |
+
"""Clear cached after-action report data."""
|
| 438 |
+
self._result_report = None
|
| 439 |
+
self._result_report_status = "idle"
|
| 440 |
+
self._result_report_error = ""
|
| 441 |
+
self._last_result_signature = ""
|
| 442 |
+
self._last_result_payload_signature = ""
|
| 443 |
+
|
| 444 |
+
def _record_cycle_summary(self, tick: int, cycle_summary: CycleSummary, state_snapshot: Optional[dict] = None):
|
| 445 |
+
"""Store Stage 4 summary for each advisor cycle, including metrics for charts."""
|
| 446 |
+
state_snapshot = state_snapshot or {}
|
| 447 |
+
metrics = {
|
| 448 |
+
"tick": tick,
|
| 449 |
+
"fires": len(state_snapshot.get("fires", [])),
|
| 450 |
+
"units": len(state_snapshot.get("units", [])),
|
| 451 |
+
"max_units": state_snapshot.get("max_units", 0),
|
| 452 |
+
"building_integrity": state_snapshot.get("building_integrity", 1.0),
|
| 453 |
+
}
|
| 454 |
+
entry = {
|
| 455 |
+
"tick": tick,
|
| 456 |
+
"headline": cycle_summary.headline,
|
| 457 |
+
"threat_level": cycle_summary.threat_level,
|
| 458 |
+
"key_highlights": cycle_summary.key_highlights,
|
| 459 |
+
"risks": cycle_summary.risks,
|
| 460 |
+
"next_focus": cycle_summary.next_focus,
|
| 461 |
+
"metrics": metrics,
|
| 462 |
+
}
|
| 463 |
+
self._cycle_summaries.append(entry)
|
| 464 |
+
if len(self._cycle_summaries) > 30:
|
| 465 |
+
self._cycle_summaries = self._cycle_summaries[-30:]
|
| 466 |
+
threat_value_map = {"CRITICAL": 4, "HIGH": 3, "MODERATE": 2, "LOW": 1}
|
| 467 |
+
value = threat_value_map.get(cycle_summary.threat_level.upper(), 0) if cycle_summary.threat_level else 0
|
| 468 |
+
self._threat_history.append({
|
| 469 |
+
"tick": tick,
|
| 470 |
+
"threat_level": cycle_summary.threat_level,
|
| 471 |
+
"value": value,
|
| 472 |
+
})
|
| 473 |
+
if len(self._threat_history) > 120:
|
| 474 |
+
self._threat_history = self._threat_history[-120:]
|
| 475 |
+
|
| 476 |
+
def _record_tick_metrics_locked(self, state: Optional[dict]):
|
| 477 |
+
"""Capture per-tick metrics for after-action chart visualization."""
|
| 478 |
+
if not state:
|
| 479 |
+
return
|
| 480 |
+
entry = {
|
| 481 |
+
"tick": state.get("tick", 0),
|
| 482 |
+
"fires": len(state.get("fires", [])),
|
| 483 |
+
"units": len(state.get("units", [])),
|
| 484 |
+
"max_units": state.get("max_units") or getattr(self.engine.world, "max_units", 0) if self.engine.world else 0,
|
| 485 |
+
"building_integrity": state.get("building_integrity", 1.0),
|
| 486 |
+
}
|
| 487 |
+
self._metrics_history.append(entry)
|
| 488 |
+
if len(self._metrics_history) > 600:
|
| 489 |
+
self._metrics_history = self._metrics_history[-600:]
|
| 490 |
+
|
| 491 |
+
def _record_action_breakdown(self, tick: int, deploy: int, move: int, replace: int):
|
| 492 |
+
"""Track how many actions the AI recommended per tick."""
|
| 493 |
+
self._action_history.append({
|
| 494 |
+
"tick": tick,
|
| 495 |
+
"deploy": deploy,
|
| 496 |
+
"move": move,
|
| 497 |
+
"replace": replace,
|
| 498 |
+
})
|
| 499 |
+
if len(self._action_history) > 200:
|
| 500 |
+
self._action_history = self._action_history[-200:]
|
| 501 |
+
|
| 502 |
+
def _record_player_action(self, action: str, description: str, metadata: Optional[dict] = None):
|
| 503 |
+
"""Track player-driven interventions (manual deploy/remove/fire)."""
|
| 504 |
+
if metadata is None:
|
| 505 |
+
metadata = {}
|
| 506 |
+
tick = 0
|
| 507 |
+
if self.engine and self.engine.world:
|
| 508 |
+
tick = getattr(self.engine.world, "tick", 0)
|
| 509 |
+
else:
|
| 510 |
+
tick = self._tick_count
|
| 511 |
+
entry = {
|
| 512 |
+
"tick": tick,
|
| 513 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 514 |
+
"action": action,
|
| 515 |
+
"description": description,
|
| 516 |
+
"details": dict(metadata),
|
| 517 |
+
}
|
| 518 |
+
self._player_actions.append(entry)
|
| 519 |
+
if len(self._player_actions) > 200:
|
| 520 |
+
self._player_actions = self._player_actions[-200:]
|
| 521 |
+
|
| 522 |
+
def _build_player_action_context(self) -> dict:
|
| 523 |
+
"""Summarize player-driven interventions for after-action reporting."""
|
| 524 |
+
actions = list(self._player_actions)
|
| 525 |
+
counts = {"deploy_unit": 0, "remove_unit": 0, "add_fire": 0}
|
| 526 |
+
action_meta = {
|
| 527 |
+
"deploy_unit": ("🚒", "Deployed units"),
|
| 528 |
+
"remove_unit": ("♻️", "Removed units"),
|
| 529 |
+
"add_fire": ("🔥", "Ignited fires"),
|
| 530 |
+
}
|
| 531 |
+
for entry in actions:
|
| 532 |
+
action_type = entry.get("action")
|
| 533 |
+
if action_type in counts:
|
| 534 |
+
counts[action_type] += 1
|
| 535 |
+
total = sum(counts.values())
|
| 536 |
+
if total:
|
| 537 |
+
parts = [f"Player executed {total} manual action(s)."]
|
| 538 |
+
if counts["deploy_unit"]:
|
| 539 |
+
parts.append(f"🚒 Deploy: {counts['deploy_unit']} time(s)")
|
| 540 |
+
if counts["remove_unit"]:
|
| 541 |
+
parts.append(f"♻️ Remove: {counts['remove_unit']} time(s)")
|
| 542 |
+
if counts["add_fire"]:
|
| 543 |
+
parts.append(f"🔥 Ignite: {counts['add_fire']} time(s)")
|
| 544 |
+
summary = " ".join(parts)
|
| 545 |
+
else:
|
| 546 |
+
summary = "Player has not manually deployed, removed, or ignited anything this run."
|
| 547 |
+
recent_entries = list(reversed(actions[-6:]))
|
| 548 |
+
recent = [
|
| 549 |
+
{
|
| 550 |
+
"tick": entry.get("tick", 0),
|
| 551 |
+
"description": entry.get("description", ""),
|
| 552 |
+
"action": entry.get("action"),
|
| 553 |
+
"timestamp": entry.get("timestamp"),
|
| 554 |
+
}
|
| 555 |
+
for entry in recent_entries
|
| 556 |
+
]
|
| 557 |
+
markdown_lines = [summary]
|
| 558 |
+
if recent:
|
| 559 |
+
markdown_lines.append("")
|
| 560 |
+
markdown_lines.append("**Recent player actions**")
|
| 561 |
+
for entry in recent:
|
| 562 |
+
icon = action_meta.get(entry["action"], ("📍", ""))[0]
|
| 563 |
+
markdown_lines.append(f"- Tick {entry['tick']} · {icon} {entry['description']}")
|
| 564 |
+
markdown = "\n".join(markdown_lines).strip()
|
| 565 |
+
return {
|
| 566 |
+
"total": total,
|
| 567 |
+
"counts": counts,
|
| 568 |
+
"recent": recent,
|
| 569 |
+
"summary": summary,
|
| 570 |
+
"markdown": markdown,
|
| 571 |
+
}
|
| 572 |
+
|
| 573 |
+
def _get_after_action_report_payload_locked(self) -> dict:
|
| 574 |
+
"""Serialize after-action report state for UI consumption."""
|
| 575 |
+
payload: dict = {"status": self._result_report_status}
|
| 576 |
+
if self._result_report_status == "ready" and self._result_report:
|
| 577 |
+
payload["report"] = self._result_report.to_dict()
|
| 578 |
+
elif self._result_report_status == "error":
|
| 579 |
+
payload["error"] = self._result_report_error or "Unknown error"
|
| 580 |
+
return payload
|
| 581 |
+
|
| 582 |
+
def should_show_result(self) -> tuple[bool, bool]:
|
| 583 |
+
"""
|
| 584 |
+
Check if game result should be shown.
|
| 585 |
+
|
| 586 |
+
Returns:
|
| 587 |
+
tuple of (should_show, is_first_time)
|
| 588 |
+
- should_show: True if popup should be displayed
|
| 589 |
+
- is_first_time: True if this is the first time showing (needs render)
|
| 590 |
+
"""
|
| 591 |
+
with self._lock:
|
| 592 |
+
# Don't show if player already dismissed it
|
| 593 |
+
if self._result_dismissed:
|
| 594 |
+
return (False, False)
|
| 595 |
+
|
| 596 |
+
state = self.engine.get_state() if self.engine.world else {"status": "idle"}
|
| 597 |
+
status = state.get("status", "idle")
|
| 598 |
+
|
| 599 |
+
if status in ["success", "fail"]:
|
| 600 |
+
# Check if this is the first time showing
|
| 601 |
+
is_first_time = not self._result_shown
|
| 602 |
+
if is_first_time:
|
| 603 |
+
self._result_shown = True
|
| 604 |
+
return (True, is_first_time)
|
| 605 |
+
|
| 606 |
+
return (False, False)
|
| 607 |
+
|
| 608 |
+
def dismiss_result(self):
|
| 609 |
+
"""Dismiss the game result popup (called when player clicks it)."""
|
| 610 |
+
with self._lock:
|
| 611 |
+
self._result_dismissed = True
|
| 612 |
+
|
| 613 |
+
def get_result_status(self) -> str:
|
| 614 |
+
"""Get current result status."""
|
| 615 |
+
with self._lock:
|
| 616 |
+
if self.engine.world is None:
|
| 617 |
+
return "idle"
|
| 618 |
+
state = self.engine.get_state()
|
| 619 |
+
return state.get("status", "idle")
|
| 620 |
+
|
| 621 |
+
def _prepare_after_action_context_locked(self, outcome: str, state: dict) -> Optional[dict]:
|
| 622 |
+
"""Build context for after-action report generation."""
|
| 623 |
+
signature = f"{outcome}_{state.get('tick', 0)}_{len(self._logs)}"
|
| 624 |
+
if signature == self._last_result_signature:
|
| 625 |
+
return None
|
| 626 |
+
context = self._build_after_action_context_locked(outcome, state)
|
| 627 |
+
self._result_report_status = "pending"
|
| 628 |
+
self._result_report = None
|
| 629 |
+
self._result_report_error = ""
|
| 630 |
+
self._last_result_signature = signature
|
| 631 |
+
self._last_result_payload_signature = ""
|
| 632 |
+
return context
|
| 633 |
+
|
| 634 |
+
def _build_after_action_context_locked(self, outcome: str, state: dict) -> dict:
|
| 635 |
+
"""Assemble transcripts and metrics for the after-action LLM call."""
|
| 636 |
+
outcome_label = "Victory" if outcome == "success" else "Defeat"
|
| 637 |
+
fires_remaining = len(state.get("fires", []))
|
| 638 |
+
units_active = len(state.get("units", []))
|
| 639 |
+
building_integrity = state.get("building_integrity", 1.0)
|
| 640 |
+
integrity_percent = f"{building_integrity:.0%}" if isinstance(building_integrity, (int, float)) else "N/A"
|
| 641 |
+
|
| 642 |
+
stage_messages = self._current_cycle_messages[:] if self._current_cycle_messages else self._advisor_history[-3:]
|
| 643 |
+
transcripts = {"assessment_md": "", "planning_md": "", "execution_md": ""}
|
| 644 |
+
for msg in stage_messages:
|
| 645 |
+
content = msg.get("content", "")
|
| 646 |
+
if not content:
|
| 647 |
+
continue
|
| 648 |
+
lowered = content.lower()
|
| 649 |
+
if "stage 1" in lowered and not transcripts["assessment_md"]:
|
| 650 |
+
transcripts["assessment_md"] = content
|
| 651 |
+
elif "stage 2" in lowered and not transcripts["planning_md"]:
|
| 652 |
+
transcripts["planning_md"] = content
|
| 653 |
+
elif "stage 3" in lowered and not transcripts["execution_md"]:
|
| 654 |
+
transcripts["execution_md"] = content
|
| 655 |
+
|
| 656 |
+
summary_text = (
|
| 657 |
+
f"Tick {state.get('tick', 0)} · Fires {fires_remaining} · "
|
| 658 |
+
f"Units {units_active}/{state.get('max_units', 0)} · Building Integrity {integrity_percent}"
|
| 659 |
+
)
|
| 660 |
+
|
| 661 |
+
if self._metrics_history:
|
| 662 |
+
chart_points = [dict(point) for point in self._metrics_history]
|
| 663 |
+
else:
|
| 664 |
+
chart_points = []
|
| 665 |
+
for entry in self._cycle_summaries:
|
| 666 |
+
metrics = entry.get("metrics") or {}
|
| 667 |
+
if not metrics:
|
| 668 |
+
continue
|
| 669 |
+
chart_points.append({
|
| 670 |
+
"tick": metrics.get("tick", entry.get("tick")),
|
| 671 |
+
"fires": metrics.get("fires", 0),
|
| 672 |
+
"units": metrics.get("units", 0),
|
| 673 |
+
"max_units": metrics.get("max_units", state.get("max_units", 0)),
|
| 674 |
+
"building_integrity": metrics.get("building_integrity", building_integrity),
|
| 675 |
+
})
|
| 676 |
+
|
| 677 |
+
context = {
|
| 678 |
+
"outcome": outcome,
|
| 679 |
+
"outcome_label": outcome_label,
|
| 680 |
+
"tick": state.get("tick", 0),
|
| 681 |
+
"fires_remaining": fires_remaining,
|
| 682 |
+
"units_active": units_active,
|
| 683 |
+
"building_integrity_percent": integrity_percent,
|
| 684 |
+
"summary_text": summary_text,
|
| 685 |
+
"state_snapshot": {
|
| 686 |
+
"tick": state.get("tick", 0),
|
| 687 |
+
"status": state.get("status", ""),
|
| 688 |
+
"building_integrity": building_integrity,
|
| 689 |
+
"max_units": state.get("max_units", 0),
|
| 690 |
+
},
|
| 691 |
+
"cycle_summaries": list(self._cycle_summaries),
|
| 692 |
+
"chart_points": chart_points,
|
| 693 |
+
"threat_history": list(self._threat_history),
|
| 694 |
+
"action_history": list(self._action_history),
|
| 695 |
+
}
|
| 696 |
+
player_actions_context = self._build_player_action_context()
|
| 697 |
+
context.update(transcripts)
|
| 698 |
+
context["player_actions_context"] = player_actions_context
|
| 699 |
+
context["player_actions_md"] = player_actions_context.get("markdown", "")
|
| 700 |
+
return context
|
| 701 |
+
|
| 702 |
+
def _launch_after_action_report(self, context: dict):
|
| 703 |
+
"""Run after-action report generation in the background."""
|
| 704 |
+
|
| 705 |
+
def _runner():
|
| 706 |
+
try:
|
| 707 |
+
report = self.advisor.generate_after_action_report(context)
|
| 708 |
+
except Exception as exc:
|
| 709 |
+
with self._lock:
|
| 710 |
+
self._result_report = None
|
| 711 |
+
self._result_report_status = "error"
|
| 712 |
+
self._result_report_error = str(exc)
|
| 713 |
+
self._last_result_payload_signature = ""
|
| 714 |
+
return
|
| 715 |
+
|
| 716 |
+
with self._lock:
|
| 717 |
+
if report and not report.error:
|
| 718 |
+
self._result_report = report
|
| 719 |
+
self._result_report_status = "ready"
|
| 720 |
+
self._result_report_error = ""
|
| 721 |
+
else:
|
| 722 |
+
self._result_report = report
|
| 723 |
+
self._result_report_status = "error"
|
| 724 |
+
self._result_report_error = (report.error if report else "Unknown error")
|
| 725 |
+
self._last_result_payload_signature = ""
|
| 726 |
+
|
| 727 |
+
threading.Thread(target=_runner, daemon=True).start()
|
| 728 |
+
|
| 729 |
+
def get_recommendations(self) -> Optional[dict]:
|
| 730 |
+
"""Get latest advisor recommendations."""
|
| 731 |
+
with self._lock:
|
| 732 |
+
if self._latest_recommendations:
|
| 733 |
+
return self._latest_recommendations.to_dict()
|
| 734 |
+
return None
|
| 735 |
+
|
| 736 |
+
def get_logs(self, limit: int = 50) -> list[dict]:
|
| 737 |
+
"""Get recent log entries."""
|
| 738 |
+
with self._lock:
|
| 739 |
+
return [log.to_dict() for log in self._logs[-limit:]]
|
| 740 |
+
|
| 741 |
+
def get_logs_text(self, limit: int = 20) -> str:
|
| 742 |
+
"""Get all logs as formatted text for display."""
|
| 743 |
+
with self._lock:
|
| 744 |
+
lines = []
|
| 745 |
+
for log in self._logs[-limit:]:
|
| 746 |
+
if log.event_type == "advisor":
|
| 747 |
+
lines.append(f"[Tick {log.tick}] 🤖 AI: {log.message}")
|
| 748 |
+
if log.details and log.details.get("recommendations"):
|
| 749 |
+
for rec in log.details["recommendations"]:
|
| 750 |
+
target = rec.get("target", {})
|
| 751 |
+
lines.append(
|
| 752 |
+
f" → {rec.get('suggested_unit_type')} at "
|
| 753 |
+
f"({target.get('x')}, {target.get('y')}): {rec.get('reason', '')[:50]}"
|
| 754 |
+
)
|
| 755 |
+
elif log.event_type == "deploy":
|
| 756 |
+
lines.append(f"[Tick {log.tick}] 🚒 Deploy: {log.message}")
|
| 757 |
+
elif log.event_type == "status":
|
| 758 |
+
lines.append(f"[{log.timestamp}] ℹ️ {log.message}")
|
| 759 |
+
elif log.event_type == "error":
|
| 760 |
+
lines.append(f"[{log.timestamp}] ⚠️ Error: {log.message}")
|
| 761 |
+
else:
|
| 762 |
+
lines.append(f"[Tick {log.tick}] {log.message}")
|
| 763 |
+
return "\n".join(lines)
|
| 764 |
+
|
| 765 |
+
def get_advisor_text(self, limit: int = 5) -> str:
|
| 766 |
+
"""Get AI advisor display with rich reasoning (legacy text format)."""
|
| 767 |
+
messages = self.get_advisor_messages(limit)
|
| 768 |
+
if not messages:
|
| 769 |
+
return "🤖 AI Advisor standing by..."
|
| 770 |
+
return messages[-1].get("content", "") if messages else ""
|
| 771 |
+
|
| 772 |
+
def _wrap_analysis_block(
|
| 773 |
+
self,
|
| 774 |
+
content: str,
|
| 775 |
+
default_summary: str = "AI Analysis",
|
| 776 |
+
*,
|
| 777 |
+
split_stage_sections: bool = False,
|
| 778 |
+
open_by_default: bool = False,
|
| 779 |
+
) -> str:
|
| 780 |
+
"""Wrap advisor markdown in a collapsible details block."""
|
| 781 |
+
if not content:
|
| 782 |
+
return content
|
| 783 |
+
|
| 784 |
+
lines = content.splitlines()
|
| 785 |
+
summary = default_summary
|
| 786 |
+
first_value_line = ""
|
| 787 |
+
auto_summary = default_summary == "AI Analysis"
|
| 788 |
+
|
| 789 |
+
for line in lines:
|
| 790 |
+
stripped = line.strip()
|
| 791 |
+
if not stripped:
|
| 792 |
+
continue
|
| 793 |
+
if not first_value_line:
|
| 794 |
+
first_value_line = stripped
|
| 795 |
+
if auto_summary and stripped.lower().startswith("###"):
|
| 796 |
+
summary = stripped.lstrip("# ").strip()
|
| 797 |
+
break
|
| 798 |
+
|
| 799 |
+
summary = html.escape(summary or default_summary)
|
| 800 |
+
|
| 801 |
+
if auto_summary and first_value_line and first_value_line.lower().startswith("###"):
|
| 802 |
+
body_lines = lines[1:]
|
| 803 |
+
else:
|
| 804 |
+
body_lines = lines
|
| 805 |
+
body = "\n".join(body_lines).strip()
|
| 806 |
+
|
| 807 |
+
if split_stage_sections:
|
| 808 |
+
rendered = self._render_stage_sections(body, open_by_default)
|
| 809 |
+
if rendered:
|
| 810 |
+
body = rendered
|
| 811 |
+
|
| 812 |
+
open_attr = " open" if open_by_default else ""
|
| 813 |
+
return (
|
| 814 |
+
f"<details class=\"analysis-entry\"{open_attr}>"
|
| 815 |
+
f"<summary>{summary}</summary>\n"
|
| 816 |
+
f"{body}\n"
|
| 817 |
+
"</details>"
|
| 818 |
+
)
|
| 819 |
+
|
| 820 |
+
def _render_stage_sections(self, body: str, force_open: bool = False) -> str | None:
|
| 821 |
+
"""Split a multi-stage markdown block into per-stage collapsible sections."""
|
| 822 |
+
lines = body.splitlines()
|
| 823 |
+
sections: list[tuple[str, list[str]]] = []
|
| 824 |
+
current_title: str | None = None
|
| 825 |
+
current_lines: list[str] = []
|
| 826 |
+
|
| 827 |
+
def _flush():
|
| 828 |
+
nonlocal current_title, current_lines
|
| 829 |
+
if current_title is None:
|
| 830 |
+
return
|
| 831 |
+
sections.append((current_title, list(current_lines)))
|
| 832 |
+
current_title = None
|
| 833 |
+
current_lines = []
|
| 834 |
+
|
| 835 |
+
for line in lines:
|
| 836 |
+
stripped = line.strip()
|
| 837 |
+
if stripped.startswith("### "):
|
| 838 |
+
_flush()
|
| 839 |
+
current_title = stripped.lstrip("# ").strip()
|
| 840 |
+
current_lines = []
|
| 841 |
+
continue
|
| 842 |
+
if current_title is None:
|
| 843 |
+
# Ignore content that appears before the first stage header
|
| 844 |
+
if not stripped:
|
| 845 |
+
continue
|
| 846 |
+
current_title = "Details"
|
| 847 |
+
current_lines.append(line)
|
| 848 |
+
|
| 849 |
+
_flush()
|
| 850 |
+
|
| 851 |
+
if not sections:
|
| 852 |
+
return None
|
| 853 |
+
|
| 854 |
+
rendered_sections: list[str] = []
|
| 855 |
+
for title, section_lines in sections:
|
| 856 |
+
section_body = "\n".join(section_lines).rstrip()
|
| 857 |
+
if section_body.endswith("---"):
|
| 858 |
+
section_body = section_body[:-3].rstrip()
|
| 859 |
+
section_body = section_body.strip() or "_No details available._"
|
| 860 |
+
open_attr = " open" if force_open else ""
|
| 861 |
+
rendered_sections.append(
|
| 862 |
+
f"<details class=\"analysis-stage\"{open_attr}>"
|
| 863 |
+
f"<summary>{html.escape(title)}</summary>\n"
|
| 864 |
+
f"{section_body}\n"
|
| 865 |
+
"</details>"
|
| 866 |
+
)
|
| 867 |
+
|
| 868 |
+
return "\n\n".join(rendered_sections)
|
| 869 |
+
|
| 870 |
+
def _build_advisor_messages_locked(self) -> list[dict]:
|
| 871 |
+
"""Assemble advisor messages. Caller must hold _lock."""
|
| 872 |
+
messages: list[dict] = []
|
| 873 |
+
|
| 874 |
+
# Show welcome message if no activity yet
|
| 875 |
+
if (
|
| 876 |
+
not self._current_cycle_messages
|
| 877 |
+
and not self._advisor_history
|
| 878 |
+
and not self._is_thinking
|
| 879 |
+
):
|
| 880 |
+
messages.append({
|
| 881 |
+
"role": "assistant",
|
| 882 |
+
"content": "👋 Hello! I'm your AI Tactical Advisor.\n\nStart the simulation and I'll analyze the fire situation, describe my reasoning, and recommend tactical deployments.\n\nWatch me think, plan, and execute!"
|
| 883 |
+
})
|
| 884 |
+
return messages
|
| 885 |
+
|
| 886 |
+
current_entry = self._build_current_cycle_entry_locked()
|
| 887 |
+
if current_entry:
|
| 888 |
+
messages.append(current_entry)
|
| 889 |
+
|
| 890 |
+
return messages
|
| 891 |
+
|
| 892 |
+
def _build_current_cycle_entry_locked(self) -> Optional[dict]:
|
| 893 |
+
"""Render the active cycle as a single ⏱️ Tick block (even while streaming)."""
|
| 894 |
+
cycle_sections: list[str] = []
|
| 895 |
+
stage_placeholders = {
|
| 896 |
+
1: ("📊 Stage 1 · Assessment", "Querying MCP tools and analyzing the situation..."),
|
| 897 |
+
2: ("🎯 Stage 2 · Planning", "Formulating tactical strategy..."),
|
| 898 |
+
3: ("⚡ Stage 3 · Execution", "Generating deployment commands via MCP..."),
|
| 899 |
+
4: ("🧭 Stage 4 · Summary", "Consolidating cycle insights..."),
|
| 900 |
+
}
|
| 901 |
+
current_stage_title: Optional[str] = None
|
| 902 |
+
for msg in self._current_cycle_messages:
|
| 903 |
+
content = msg.get("content", "")
|
| 904 |
+
if content:
|
| 905 |
+
cycle_sections.append(content)
|
| 906 |
+
|
| 907 |
+
# Add placeholder for the stage currently in progress
|
| 908 |
+
if self._is_thinking:
|
| 909 |
+
tick = self._thinking_start_tick
|
| 910 |
+
if tick is None and self.engine.world:
|
| 911 |
+
tick = self.engine.get_state().get("tick", 0)
|
| 912 |
+
title, desc = stage_placeholders.get(
|
| 913 |
+
self._current_stage,
|
| 914 |
+
("🤖 AI Thinking", "Processing...")
|
| 915 |
+
)
|
| 916 |
+
current_stage_title = title
|
| 917 |
+
cycle_sections.append(
|
| 918 |
+
f"### {title} `[Tick {tick if tick is not None else '?'}]`\n\n{desc}"
|
| 919 |
+
)
|
| 920 |
+
|
| 921 |
+
if not cycle_sections:
|
| 922 |
+
return None
|
| 923 |
+
|
| 924 |
+
tick_label = self._thinking_start_tick
|
| 925 |
+
if tick_label is None and self.engine.world:
|
| 926 |
+
tick_label = self.engine.get_state().get("tick", 0)
|
| 927 |
+
summary = f"⏱️ Tick {tick_label if tick_label is not None else '?'}"
|
| 928 |
+
if self._is_thinking and current_stage_title:
|
| 929 |
+
summary = f"{summary} · {current_stage_title} ⏳"
|
| 930 |
+
body = "\n\n".join(cycle_sections).strip()
|
| 931 |
+
open_by_default = tick_label == 0
|
| 932 |
+
|
| 933 |
+
return {
|
| 934 |
+
"role": "assistant",
|
| 935 |
+
"content": self._wrap_analysis_block(
|
| 936 |
+
body,
|
| 937 |
+
summary,
|
| 938 |
+
split_stage_sections=True,
|
| 939 |
+
open_by_default=open_by_default,
|
| 940 |
+
),
|
| 941 |
+
"metadata": {"title": summary, "status": "pending" if self._is_thinking else "done"},
|
| 942 |
+
}
|
| 943 |
+
|
| 944 |
+
def get_advisor_messages(self) -> list:
|
| 945 |
+
"""Get current AI advisor cycle messages (progressive stage display)."""
|
| 946 |
+
with self._lock:
|
| 947 |
+
return self._build_advisor_messages_locked()
|
| 948 |
+
|
| 949 |
+
def _build_history_messages_locked(self) -> list[dict]:
|
| 950 |
+
"""Aggregate advisor history into per-tick chatbot messages."""
|
| 951 |
+
if not self._advisor_history:
|
| 952 |
+
return []
|
| 953 |
+
|
| 954 |
+
history_messages: list[dict] = []
|
| 955 |
+
buffer: list[str] = []
|
| 956 |
+
tick_num = "?"
|
| 957 |
+
|
| 958 |
+
def flush_cycle():
|
| 959 |
+
nonlocal buffer, tick_num
|
| 960 |
+
cycle_text = "\n\n".join(buffer).strip()
|
| 961 |
+
if cycle_text:
|
| 962 |
+
history_messages.append({
|
| 963 |
+
"role": "assistant",
|
| 964 |
+
"content": self._wrap_analysis_block(
|
| 965 |
+
cycle_text,
|
| 966 |
+
f"⏱️ Tick {tick_num}",
|
| 967 |
+
split_stage_sections=True,
|
| 968 |
+
open_by_default=(str(tick_num).strip() == "0"),
|
| 969 |
+
),
|
| 970 |
+
"metadata": {"title": f"⏱️ Tick {tick_num}", "status": "done"},
|
| 971 |
+
})
|
| 972 |
+
buffer = []
|
| 973 |
+
tick_num = "?"
|
| 974 |
+
|
| 975 |
+
for msg in self._advisor_history:
|
| 976 |
+
content = msg.get("content", "")
|
| 977 |
+
if not content:
|
| 978 |
+
continue
|
| 979 |
+
buffer.append(content)
|
| 980 |
+
if tick_num == "?" and "[Tick " in content:
|
| 981 |
+
start = content.find("[Tick ") + 6
|
| 982 |
+
end = content.find("]", start)
|
| 983 |
+
if end > start:
|
| 984 |
+
tick_num = content[start:end]
|
| 985 |
+
|
| 986 |
+
lowered = content.lower()
|
| 987 |
+
if "stage 4" in lowered:
|
| 988 |
+
flush_cycle()
|
| 989 |
+
|
| 990 |
+
if buffer:
|
| 991 |
+
flush_cycle()
|
| 992 |
+
|
| 993 |
+
return history_messages if history_messages else [{
|
| 994 |
+
"role": "assistant",
|
| 995 |
+
"content": self._wrap_analysis_block("No previous analysis cycles yet...", "📜 History"),
|
| 996 |
+
"metadata": {"title": "📜 History", "status": "done"},
|
| 997 |
+
}]
|
| 998 |
+
|
| 999 |
+
def get_advisor_markdown(self) -> str:
|
| 1000 |
+
"""Get the latest AI advisor cycle as formatted plain text."""
|
| 1001 |
+
with self._lock:
|
| 1002 |
+
return self._get_advisor_markdown_internal()
|
| 1003 |
+
|
| 1004 |
+
def get_advisor_history_chat_messages(self) -> list[dict]:
|
| 1005 |
+
"""Get advisor history formatted for chatbot display."""
|
| 1006 |
+
with self._lock:
|
| 1007 |
+
return self._build_history_messages_locked()
|
| 1008 |
+
|
| 1009 |
+
# =========================================================================
|
| 1010 |
+
# Change Tracking for UI Optimization (Dual Timer Architecture)
|
| 1011 |
+
# =========================================================================
|
| 1012 |
+
|
| 1013 |
+
def get_game_changes(self) -> dict:
|
| 1014 |
+
"""
|
| 1015 |
+
Check game-critical components for changes (called by game_timer every 1s).
|
| 1016 |
+
|
| 1017 |
+
Returns a dict with:
|
| 1018 |
+
- state: current simulation state
|
| 1019 |
+
- grid_changed: bool - whether fire/unit positions changed
|
| 1020 |
+
- status_changed: bool - whether status bar should update
|
| 1021 |
+
"""
|
| 1022 |
+
import hashlib
|
| 1023 |
+
|
| 1024 |
+
with self._lock:
|
| 1025 |
+
if self.engine.world:
|
| 1026 |
+
state = self._compose_state_locked()
|
| 1027 |
+
else:
|
| 1028 |
+
state = {
|
| 1029 |
+
"status": "idle",
|
| 1030 |
+
"after_action_report": self._get_after_action_report_payload_locked(),
|
| 1031 |
+
}
|
| 1032 |
+
result = {"state": state}
|
| 1033 |
+
|
| 1034 |
+
# Grid state - hash fires, units, buildings positions
|
| 1035 |
+
grid_data = {
|
| 1036 |
+
"fires": sorted([(f["x"], f["y"], round(f["intensity"], 2)) for f in state.get("fires", [])]),
|
| 1037 |
+
"units": sorted([(u["x"], u["y"], u["type"]) for u in state.get("units", [])]),
|
| 1038 |
+
"buildings": sorted([(b["x"], b["y"]) for b in state.get("buildings", [])])
|
| 1039 |
+
}
|
| 1040 |
+
current_grid_hash = hashlib.md5(json.dumps(grid_data, sort_keys=True).encode()).hexdigest()
|
| 1041 |
+
result["grid_changed"] = current_grid_hash != self._last_grid_hash
|
| 1042 |
+
if result["grid_changed"]:
|
| 1043 |
+
self._last_grid_hash = current_grid_hash
|
| 1044 |
+
|
| 1045 |
+
# Status bar - only update when simulation is actively running
|
| 1046 |
+
status = state.get("status", "idle")
|
| 1047 |
+
is_running = self._running
|
| 1048 |
+
result["status_changed"] = is_running and status == "running"
|
| 1049 |
+
|
| 1050 |
+
return result
|
| 1051 |
+
|
| 1052 |
+
def get_ui_changes(self) -> dict:
|
| 1053 |
+
"""
|
| 1054 |
+
Check UI panel components for changes (called by ui_timer every 2s).
|
| 1055 |
+
|
| 1056 |
+
Returns a dict with:
|
| 1057 |
+
- state: current simulation state
|
| 1058 |
+
- advisor_changed: bool - whether advisor messages changed
|
| 1059 |
+
- advisor_messages: list[dict] | None - new content if changed
|
| 1060 |
+
- history_changed: bool - whether history HTML changed
|
| 1061 |
+
- advisor_history: str or None - new content if changed
|
| 1062 |
+
- event_log_changed: bool - whether event log changed
|
| 1063 |
+
- event_log: str or None - new content if changed
|
| 1064 |
+
- buttons_changed: bool - whether button states changed
|
| 1065 |
+
- button_states: tuple (start_enabled, pause_enabled)
|
| 1066 |
+
- result_changed: bool - whether result popup changed
|
| 1067 |
+
- result_state: str - current result state
|
| 1068 |
+
"""
|
| 1069 |
+
with self._lock:
|
| 1070 |
+
if self.engine.world:
|
| 1071 |
+
state = self._compose_state_locked()
|
| 1072 |
+
else:
|
| 1073 |
+
state = {
|
| 1074 |
+
"status": "idle",
|
| 1075 |
+
"after_action_report": self._get_after_action_report_payload_locked(),
|
| 1076 |
+
}
|
| 1077 |
+
result = {"state": state}
|
| 1078 |
+
|
| 1079 |
+
# 1. Advisor chat messages - only update when AI is thinking or content changed
|
| 1080 |
+
advisor_messages = self._get_advisor_chat_messages_internal()
|
| 1081 |
+
signature = tuple(msg.get("content", "") for msg in advisor_messages)
|
| 1082 |
+
content_changed = signature != self._last_advisor_signature
|
| 1083 |
+
result["advisor_changed"] = self._is_thinking or content_changed
|
| 1084 |
+
result["advisor_messages"] = advisor_messages if result["advisor_changed"] else None
|
| 1085 |
+
if content_changed:
|
| 1086 |
+
self._last_advisor_signature = signature
|
| 1087 |
+
|
| 1088 |
+
# 2. Advisor history messages - only update when content actually changed
|
| 1089 |
+
history_messages = self._build_history_messages_locked()
|
| 1090 |
+
history_signature = tuple(msg.get("content", "") for msg in history_messages)
|
| 1091 |
+
result["history_changed"] = history_signature != self._last_history_signature
|
| 1092 |
+
result["advisor_history"] = history_messages if result["history_changed"] else None
|
| 1093 |
+
if result["history_changed"]:
|
| 1094 |
+
self._last_history_signature = history_signature
|
| 1095 |
+
|
| 1096 |
+
# 3. Event log
|
| 1097 |
+
event_log = self._get_event_log_internal()
|
| 1098 |
+
result["event_log_changed"] = event_log != self._last_event_log
|
| 1099 |
+
result["event_log"] = event_log if result["event_log_changed"] else None
|
| 1100 |
+
if result["event_log_changed"]:
|
| 1101 |
+
self._last_event_log = event_log
|
| 1102 |
+
|
| 1103 |
+
# 4. Button states
|
| 1104 |
+
status = state.get("status", "idle")
|
| 1105 |
+
is_running = self._running
|
| 1106 |
+
is_paused = (
|
| 1107 |
+
self.engine.world is not None
|
| 1108 |
+
and not self._running
|
| 1109 |
+
and self.engine.world.status == SimulationStatus.RUNNING
|
| 1110 |
+
)
|
| 1111 |
+
start_enabled = is_paused or (not is_running and status in ["idle", "success", "fail"])
|
| 1112 |
+
pause_enabled = is_running and status == "running"
|
| 1113 |
+
current_buttons = (start_enabled, pause_enabled)
|
| 1114 |
+
result["buttons_changed"] = current_buttons != self._last_button_states
|
| 1115 |
+
result["button_states"] = current_buttons
|
| 1116 |
+
if result["buttons_changed"]:
|
| 1117 |
+
self._last_button_states = current_buttons
|
| 1118 |
+
|
| 1119 |
+
# 5. Result popup state
|
| 1120 |
+
if self._result_dismissed:
|
| 1121 |
+
current_result = ""
|
| 1122 |
+
elif status in ["success", "fail"]:
|
| 1123 |
+
current_result = status
|
| 1124 |
+
else:
|
| 1125 |
+
current_result = ""
|
| 1126 |
+
result["result_changed"] = current_result != self._last_result_state
|
| 1127 |
+
result["result_state"] = current_result
|
| 1128 |
+
if result["result_changed"]:
|
| 1129 |
+
self._last_result_state = current_result
|
| 1130 |
+
|
| 1131 |
+
report_payload = self._get_after_action_report_payload_locked()
|
| 1132 |
+
overlay_payload = {
|
| 1133 |
+
"outcome": current_result,
|
| 1134 |
+
"after_action": report_payload,
|
| 1135 |
+
}
|
| 1136 |
+
payload_signature = json.dumps(overlay_payload, sort_keys=True)
|
| 1137 |
+
if payload_signature != self._last_result_payload_signature:
|
| 1138 |
+
result["result_changed"] = True
|
| 1139 |
+
self._last_result_payload_signature = payload_signature
|
| 1140 |
+
result["result_payload"] = overlay_payload
|
| 1141 |
+
|
| 1142 |
+
return result
|
| 1143 |
+
|
| 1144 |
+
def get_changed_components(self) -> dict:
|
| 1145 |
+
"""
|
| 1146 |
+
Legacy function - combines game and UI changes.
|
| 1147 |
+
Used by button click handlers for full refresh.
|
| 1148 |
+
"""
|
| 1149 |
+
game = self.get_game_changes()
|
| 1150 |
+
ui = self.get_ui_changes()
|
| 1151 |
+
|
| 1152 |
+
# Merge results
|
| 1153 |
+
return {
|
| 1154 |
+
"state": game["state"],
|
| 1155 |
+
"grid_changed": game["grid_changed"],
|
| 1156 |
+
"status_changed": game["status_changed"],
|
| 1157 |
+
"advisor_changed": ui["advisor_changed"],
|
| 1158 |
+
"advisor_messages": ui["advisor_messages"],
|
| 1159 |
+
"history_changed": ui["history_changed"],
|
| 1160 |
+
"advisor_history": ui["advisor_history"],
|
| 1161 |
+
"event_log_changed": ui["event_log_changed"],
|
| 1162 |
+
"event_log": ui["event_log"],
|
| 1163 |
+
"buttons_changed": ui["buttons_changed"],
|
| 1164 |
+
"button_states": ui["button_states"],
|
| 1165 |
+
"result_changed": ui["result_changed"],
|
| 1166 |
+
"result_state": ui["result_state"],
|
| 1167 |
+
"result_payload": ui.get("result_payload"),
|
| 1168 |
+
}
|
| 1169 |
+
|
| 1170 |
+
def _get_advisor_markdown_internal(self) -> str:
|
| 1171 |
+
"""Internal helper to flatten advisor messages into text."""
|
| 1172 |
+
messages = self._build_advisor_messages_locked()
|
| 1173 |
+
parts = [msg.get("content", "") for msg in messages if msg.get("content")]
|
| 1174 |
+
return "\n\n---\n\n".join(parts) if parts else "Waiting for analysis..."
|
| 1175 |
+
|
| 1176 |
+
def _get_advisor_chat_messages_internal(self) -> list[dict]:
|
| 1177 |
+
"""Internal helper to normalize advisor messages for Chatbot display."""
|
| 1178 |
+
messages = self._build_advisor_messages_locked()
|
| 1179 |
+
chat_messages: list[dict] = []
|
| 1180 |
+
for msg in messages:
|
| 1181 |
+
role = msg.get("role", "assistant")
|
| 1182 |
+
if role not in ("user", "assistant", "system"):
|
| 1183 |
+
role = "assistant"
|
| 1184 |
+
chat_msg = {
|
| 1185 |
+
"role": role,
|
| 1186 |
+
"content": msg.get("content", ""),
|
| 1187 |
+
}
|
| 1188 |
+
metadata = msg.get("metadata")
|
| 1189 |
+
if metadata:
|
| 1190 |
+
chat_msg["metadata"] = metadata
|
| 1191 |
+
options = msg.get("options")
|
| 1192 |
+
if options:
|
| 1193 |
+
chat_msg["options"] = options
|
| 1194 |
+
chat_messages.append(chat_msg)
|
| 1195 |
+
return chat_messages
|
| 1196 |
+
|
| 1197 |
+
def get_advisor_chat_messages(self) -> list[dict]:
|
| 1198 |
+
"""Public helper for UI components that expect message dictionaries."""
|
| 1199 |
+
with self._lock:
|
| 1200 |
+
return self._get_advisor_chat_messages_internal()
|
| 1201 |
+
|
| 1202 |
+
def _get_event_log_internal(self, limit: int = 15) -> str:
|
| 1203 |
+
"""Internal method to get event log (must be called with lock held)."""
|
| 1204 |
+
lines = []
|
| 1205 |
+
event_logs = [log for log in self._logs if log.event_type != "advisor"]
|
| 1206 |
+
for log in event_logs[-limit:]:
|
| 1207 |
+
if log.event_type == "deploy":
|
| 1208 |
+
lines.append(f"[Tick {log.tick}] 🚒 {log.message}")
|
| 1209 |
+
elif log.event_type == "status":
|
| 1210 |
+
lines.append(f"[{log.timestamp}] ℹ️ {log.message}")
|
| 1211 |
+
elif log.event_type == "error":
|
| 1212 |
+
lines.append(f"[{log.timestamp}] ⚠️ {log.message}")
|
| 1213 |
+
else:
|
| 1214 |
+
lines.append(f"[Tick {log.tick}] {log.message}")
|
| 1215 |
+
return "\n".join(lines) if lines else "No events yet..."
|
| 1216 |
+
|
| 1217 |
+
def get_event_log_text(self, limit: int = 15) -> str:
|
| 1218 |
+
"""Get event logs (deploy, status, error) without AI advisor."""
|
| 1219 |
+
with self._lock:
|
| 1220 |
+
lines = []
|
| 1221 |
+
event_logs = [log for log in self._logs if log.event_type != "advisor"]
|
| 1222 |
+
for log in event_logs[-limit:]:
|
| 1223 |
+
if log.event_type == "deploy":
|
| 1224 |
+
lines.append(f"[Tick {log.tick}] 🚒 {log.message}")
|
| 1225 |
+
elif log.event_type == "status":
|
| 1226 |
+
lines.append(f"[{log.timestamp}] ℹ️ {log.message}")
|
| 1227 |
+
elif log.event_type == "error":
|
| 1228 |
+
lines.append(f"[{log.timestamp}] ⚠️ {log.message}")
|
| 1229 |
+
else:
|
| 1230 |
+
lines.append(f"[Tick {log.tick}] {log.message}")
|
| 1231 |
+
return "\n".join(lines) if lines else "No events yet..."
|
| 1232 |
+
|
| 1233 |
+
def get_deploy_log_text(self, limit: int = 10) -> str:
|
| 1234 |
+
"""Get deploy-related logs only (deploy success and errors)."""
|
| 1235 |
+
with self._lock:
|
| 1236 |
+
lines = []
|
| 1237 |
+
deploy_logs = [log for log in self._logs if log.event_type in ["deploy", "error"]]
|
| 1238 |
+
for log in deploy_logs[-limit:]:
|
| 1239 |
+
if log.event_type == "deploy":
|
| 1240 |
+
lines.append(f"[Tick {log.tick}] ✅ {log.message}")
|
| 1241 |
+
elif log.event_type == "error":
|
| 1242 |
+
lines.append(f"[Tick {log.tick}] ❌ {log.message}")
|
| 1243 |
+
return "\n".join(lines) if lines else "Click on a cell to deploy units..."
|
| 1244 |
+
|
| 1245 |
+
def deploy_unit(self, unit_type: str, x: int, y: int, source: str = "player") -> dict:
|
| 1246 |
+
"""Deploy a unit (thread-safe)."""
|
| 1247 |
+
with self._lock:
|
| 1248 |
+
result = self.engine.deploy_unit(unit_type, x, y, source)
|
| 1249 |
+
|
| 1250 |
+
if result.get("status") == "ok":
|
| 1251 |
+
self._add_log(
|
| 1252 |
+
"deploy",
|
| 1253 |
+
f"Deployed {unit_type} at ({x}, {y})",
|
| 1254 |
+
{"unit": result.get("unit"), "source": source}
|
| 1255 |
+
)
|
| 1256 |
+
if str(source or "").startswith("player"):
|
| 1257 |
+
unit_label = "fire truck" if unit_type == "fire_truck" else "helicopter"
|
| 1258 |
+
self._record_player_action(
|
| 1259 |
+
"deploy_unit",
|
| 1260 |
+
f"Deployed {unit_label} at ({x}, {y})",
|
| 1261 |
+
{"unit_type": unit_type, "x": x, "y": y}
|
| 1262 |
+
)
|
| 1263 |
+
else:
|
| 1264 |
+
self._add_log(
|
| 1265 |
+
"error",
|
| 1266 |
+
f"Failed to deploy {unit_type}: {result.get('message')}"
|
| 1267 |
+
)
|
| 1268 |
+
|
| 1269 |
+
return result
|
| 1270 |
+
|
| 1271 |
+
def remove_unit(self, x: int, y: int) -> dict:
|
| 1272 |
+
"""Remove a unit at position (thread-safe)."""
|
| 1273 |
+
with self._lock:
|
| 1274 |
+
result = self.engine.remove_unit_at(x, y)
|
| 1275 |
+
|
| 1276 |
+
if result.get("status") == "ok":
|
| 1277 |
+
self._add_log(
|
| 1278 |
+
"deploy",
|
| 1279 |
+
f"Removed unit at ({x}, {y})",
|
| 1280 |
+
{"unit": result.get("unit")}
|
| 1281 |
+
)
|
| 1282 |
+
removed_unit = result.get("unit") or {}
|
| 1283 |
+
unit_type = removed_unit.get("type", "")
|
| 1284 |
+
unit_label = (
|
| 1285 |
+
"fire truck"
|
| 1286 |
+
if unit_type == "fire_truck"
|
| 1287 |
+
else "helicopter"
|
| 1288 |
+
if unit_type == "helicopter"
|
| 1289 |
+
else "unit"
|
| 1290 |
+
)
|
| 1291 |
+
self._record_player_action(
|
| 1292 |
+
"remove_unit",
|
| 1293 |
+
f"Removed {unit_label} at ({removed_unit.get('x', x)}, {removed_unit.get('y', y)})",
|
| 1294 |
+
{"unit": removed_unit}
|
| 1295 |
+
)
|
| 1296 |
+
|
| 1297 |
+
return result
|
| 1298 |
+
|
| 1299 |
+
def add_fire(self, x: int, y: int, intensity: float = 0.5) -> dict:
|
| 1300 |
+
"""Add fire at position (thread-safe). For testing purposes."""
|
| 1301 |
+
with self._lock:
|
| 1302 |
+
if self.engine.world is None:
|
| 1303 |
+
return {"status": "error", "message": "World not initialized"}
|
| 1304 |
+
|
| 1305 |
+
# Check bounds
|
| 1306 |
+
if not (0 <= x < self.engine.world.width and 0 <= y < self.engine.world.height):
|
| 1307 |
+
return {"status": "error", "message": f"Position ({x}, {y}) out of bounds"}
|
| 1308 |
+
|
| 1309 |
+
cell = self.engine.world.grid[y][x]
|
| 1310 |
+
|
| 1311 |
+
# Only allow fire on forest or building
|
| 1312 |
+
if cell.cell_type not in (CellType.FOREST, CellType.BUILDING):
|
| 1313 |
+
return {"status": "error", "message": "Fire can only be placed on forest or building"}
|
| 1314 |
+
|
| 1315 |
+
# Cannot place fire on existing fire or smoke
|
| 1316 |
+
if cell.fire_intensity > 0:
|
| 1317 |
+
return {"status": "error", "message": "Cannot place fire on existing fire or smoke"}
|
| 1318 |
+
|
| 1319 |
+
# Check if there's already a unit at this position
|
| 1320 |
+
for unit in self.engine.world.units:
|
| 1321 |
+
if unit.x == x and unit.y == y:
|
| 1322 |
+
return {"status": "error", "message": "Cannot place fire where a unit exists"}
|
| 1323 |
+
|
| 1324 |
+
# Set fire intensity
|
| 1325 |
+
old_intensity = cell.fire_intensity
|
| 1326 |
+
cell.fire_intensity = min(1.0, max(0.0, intensity))
|
| 1327 |
+
|
| 1328 |
+
# Update world metrics
|
| 1329 |
+
self.engine.world.calculate_metrics()
|
| 1330 |
+
|
| 1331 |
+
self._add_log(
|
| 1332 |
+
"fire",
|
| 1333 |
+
f"🔥 Added fire at ({x}, {y}) with intensity {int(intensity * 100)}%",
|
| 1334 |
+
{"x": x, "y": y, "intensity": cell.fire_intensity, "old_intensity": old_intensity}
|
| 1335 |
+
)
|
| 1336 |
+
result = {"status": "ok", "x": x, "y": y, "intensity": cell.fire_intensity}
|
| 1337 |
+
self._record_player_action(
|
| 1338 |
+
"add_fire",
|
| 1339 |
+
f"Ignited fire at ({x}, {y}) · intensity {int(cell.fire_intensity * 100)}%",
|
| 1340 |
+
result
|
| 1341 |
+
)
|
| 1342 |
+
return result
|
| 1343 |
+
|
| 1344 |
+
def has_unit_at(self, x: int, y: int) -> bool:
|
| 1345 |
+
"""Check if there's a unit at position."""
|
| 1346 |
+
with self._lock:
|
| 1347 |
+
if self.engine.world is None:
|
| 1348 |
+
return False
|
| 1349 |
+
for unit in self.engine.world.units:
|
| 1350 |
+
if unit.x == x and unit.y == y:
|
| 1351 |
+
return True
|
| 1352 |
+
return False
|
| 1353 |
+
|
| 1354 |
+
def is_running(self) -> bool:
|
| 1355 |
+
"""Check if simulation is running."""
|
| 1356 |
+
return self._running
|
| 1357 |
+
|
| 1358 |
+
def set_auto_execute(self, enabled: bool):
|
| 1359 |
+
"""Set whether to automatically execute AI recommendations."""
|
| 1360 |
+
with self._lock:
|
| 1361 |
+
self._auto_execute = enabled
|
| 1362 |
+
|
| 1363 |
+
def is_auto_execute(self) -> bool:
|
| 1364 |
+
"""Check if auto-execute is enabled."""
|
| 1365 |
+
return self._auto_execute
|
| 1366 |
+
|
| 1367 |
+
def is_thinking(self) -> bool:
|
| 1368 |
+
"""Check if AI advisor is currently thinking."""
|
| 1369 |
+
return self._is_thinking
|
| 1370 |
+
|
| 1371 |
+
def get_thinking_stage(self) -> int:
|
| 1372 |
+
"""Get current AI thinking stage (0=idle, 1=tool_call, 2=assess, 3=plan, 4=execute)."""
|
| 1373 |
+
return self._current_stage if self._is_thinking else 0
|
| 1374 |
+
|
| 1375 |
+
def _simulation_loop(self):
|
| 1376 |
+
"""Background simulation loop."""
|
| 1377 |
+
# Use instance variables to preserve state across pause/resume
|
| 1378 |
+
after_action_context = None
|
| 1379 |
+
|
| 1380 |
+
while self._running:
|
| 1381 |
+
sim_should_stop = False
|
| 1382 |
+
try:
|
| 1383 |
+
with self._lock:
|
| 1384 |
+
if self.engine.world is None:
|
| 1385 |
+
break
|
| 1386 |
+
|
| 1387 |
+
# Run advisor immediately on first tick (only once per simulation)
|
| 1388 |
+
if self._advisor_first_run:
|
| 1389 |
+
self._advisor_first_run = False
|
| 1390 |
+
state = self.engine.get_state()
|
| 1391 |
+
self._run_advisor(state)
|
| 1392 |
+
|
| 1393 |
+
# Advance simulation
|
| 1394 |
+
self.engine.step()
|
| 1395 |
+
self._tick_count += 1
|
| 1396 |
+
|
| 1397 |
+
# Check end conditions
|
| 1398 |
+
state = self.engine.get_state()
|
| 1399 |
+
self._record_tick_metrics_locked(state)
|
| 1400 |
+
status = state.get("status", "running")
|
| 1401 |
+
|
| 1402 |
+
if status in ["success", "fail"]:
|
| 1403 |
+
self._add_log(
|
| 1404 |
+
"status",
|
| 1405 |
+
"🎉 SUCCESS! Fire contained!" if status == "success" else "💥 FAILED! Too much damage!"
|
| 1406 |
+
)
|
| 1407 |
+
after_action_context = self._prepare_after_action_context_locked(status, state)
|
| 1408 |
+
self._running = False
|
| 1409 |
+
sim_should_stop = True
|
| 1410 |
+
else:
|
| 1411 |
+
# Periodic advisor evaluation (every advisor_interval ticks)
|
| 1412 |
+
if self._tick_count % self.advisor_interval == 0:
|
| 1413 |
+
self._run_advisor(state)
|
| 1414 |
+
|
| 1415 |
+
# Notify UI
|
| 1416 |
+
if self._on_update:
|
| 1417 |
+
try:
|
| 1418 |
+
self._on_update()
|
| 1419 |
+
except Exception:
|
| 1420 |
+
pass # Ignore UI callback errors
|
| 1421 |
+
|
| 1422 |
+
if after_action_context:
|
| 1423 |
+
self._launch_after_action_report(after_action_context)
|
| 1424 |
+
after_action_context = None
|
| 1425 |
+
|
| 1426 |
+
if sim_should_stop:
|
| 1427 |
+
break
|
| 1428 |
+
|
| 1429 |
+
# Sleep between ticks
|
| 1430 |
+
time.sleep(self.tick_interval)
|
| 1431 |
+
|
| 1432 |
+
except Exception as e:
|
| 1433 |
+
with self._lock:
|
| 1434 |
+
self._add_log("error", f"Simulation error: {str(e)}")
|
| 1435 |
+
break
|
| 1436 |
+
|
| 1437 |
+
def _archive_current_cycle_locked(self):
|
| 1438 |
+
"""Move the completed cycle messages into history (caller must hold _lock)."""
|
| 1439 |
+
if not self._current_cycle_messages:
|
| 1440 |
+
return
|
| 1441 |
+
self._advisor_history.extend(self._current_cycle_messages)
|
| 1442 |
+
if len(self._advisor_history) > 42:
|
| 1443 |
+
self._advisor_history = self._advisor_history[-39:]
|
| 1444 |
+
self._current_cycle_messages = []
|
| 1445 |
+
|
| 1446 |
+
def _run_advisor(self, state: dict):
|
| 1447 |
+
"""
|
| 1448 |
+
Run advisor analysis with progressive stage display.
|
| 1449 |
+
Each stage is shown one at a time: Assessment → Planning → Execution → Summary.
|
| 1450 |
+
"""
|
| 1451 |
+
# Prevent concurrent advisor calls
|
| 1452 |
+
if self._advisor_running:
|
| 1453 |
+
return
|
| 1454 |
+
|
| 1455 |
+
self._advisor_running = True
|
| 1456 |
+
tick = state.get("tick", 0)
|
| 1457 |
+
|
| 1458 |
+
try:
|
| 1459 |
+
# ================================================================
|
| 1460 |
+
# Start new cycle - archive previous and clear current
|
| 1461 |
+
# ================================================================
|
| 1462 |
+
# Archive previous cycle to history (if exists)
|
| 1463 |
+
self._archive_current_cycle_locked()
|
| 1464 |
+
|
| 1465 |
+
# Clear current cycle for new analysis
|
| 1466 |
+
self._current_cycle_messages = []
|
| 1467 |
+
self._thinking_start_tick = tick
|
| 1468 |
+
|
| 1469 |
+
# ================================================================
|
| 1470 |
+
# Stage 1: ASSESS - Query MCP tools and analyze situation
|
| 1471 |
+
# ================================================================
|
| 1472 |
+
self._current_stage = 1
|
| 1473 |
+
self._is_thinking = True
|
| 1474 |
+
|
| 1475 |
+
self._lock.release()
|
| 1476 |
+
try:
|
| 1477 |
+
assessment = self.advisor.assess(state)
|
| 1478 |
+
finally:
|
| 1479 |
+
self._lock.acquire()
|
| 1480 |
+
|
| 1481 |
+
# Add assessment message with integrated MCP tool calls
|
| 1482 |
+
self._add_assessment_message(assessment, state, tick)
|
| 1483 |
+
self._is_thinking = False
|
| 1484 |
+
|
| 1485 |
+
# ================================================================
|
| 1486 |
+
# Stage 2: PLAN - Formulate tactical strategy
|
| 1487 |
+
# ================================================================
|
| 1488 |
+
self._current_stage = 2
|
| 1489 |
+
self._is_thinking = True
|
| 1490 |
+
|
| 1491 |
+
self._lock.release()
|
| 1492 |
+
try:
|
| 1493 |
+
plan = self.advisor.plan(state, assessment)
|
| 1494 |
+
finally:
|
| 1495 |
+
self._lock.acquire()
|
| 1496 |
+
|
| 1497 |
+
self._add_planning_message(plan, tick)
|
| 1498 |
+
self._is_thinking = False
|
| 1499 |
+
|
| 1500 |
+
# ================================================================
|
| 1501 |
+
# Stage 3: EXECUTE - Generate and execute deployment commands
|
| 1502 |
+
# ================================================================
|
| 1503 |
+
self._current_stage = 3
|
| 1504 |
+
self._is_thinking = True
|
| 1505 |
+
|
| 1506 |
+
self._lock.release()
|
| 1507 |
+
try:
|
| 1508 |
+
recommendations = self.advisor.execute(state, assessment, plan)
|
| 1509 |
+
finally:
|
| 1510 |
+
self._lock.acquire()
|
| 1511 |
+
|
| 1512 |
+
# Add execution message with integrated MCP tool calls
|
| 1513 |
+
self._add_execution_message(recommendations, tick)
|
| 1514 |
+
|
| 1515 |
+
# Build final response object (needed for summary + logging)
|
| 1516 |
+
response = self._build_advisor_response(assessment, plan, recommendations)
|
| 1517 |
+
self._latest_recommendations = response
|
| 1518 |
+
|
| 1519 |
+
# ================================================================
|
| 1520 |
+
# Stage 4: SUMMARY - Consolidate cycle outcomes
|
| 1521 |
+
# ================================================================
|
| 1522 |
+
self._current_stage = 4
|
| 1523 |
+
self._is_thinking = True
|
| 1524 |
+
|
| 1525 |
+
self._lock.release()
|
| 1526 |
+
try:
|
| 1527 |
+
cycle_summary = self.advisor.summarize(state, assessment, plan, recommendations, response)
|
| 1528 |
+
finally:
|
| 1529 |
+
self._lock.acquire()
|
| 1530 |
+
|
| 1531 |
+
self._is_thinking = False
|
| 1532 |
+
self._add_summary_cycle_message(cycle_summary, tick)
|
| 1533 |
+
self._record_cycle_summary(tick, cycle_summary, self.engine.get_state())
|
| 1534 |
+
|
| 1535 |
+
# ================================================================
|
| 1536 |
+
# Complete - all stages done
|
| 1537 |
+
# ================================================================
|
| 1538 |
+
self._current_stage = 5
|
| 1539 |
+
|
| 1540 |
+
self._add_log(
|
| 1541 |
+
"advisor",
|
| 1542 |
+
response.summary,
|
| 1543 |
+
{
|
| 1544 |
+
"recommendations": [r.to_dict() for r in response.recommendations],
|
| 1545 |
+
"thinking": response.thinking,
|
| 1546 |
+
"analysis": response.analysis,
|
| 1547 |
+
"priority": response.priority,
|
| 1548 |
+
"error": response.error
|
| 1549 |
+
}
|
| 1550 |
+
)
|
| 1551 |
+
|
| 1552 |
+
# Auto-execute recommendations if enabled
|
| 1553 |
+
if self._auto_execute and response.recommendations:
|
| 1554 |
+
self._execute_recommendations(response, tick)
|
| 1555 |
+
|
| 1556 |
+
except Exception as e:
|
| 1557 |
+
self._is_thinking = False
|
| 1558 |
+
self._current_stage = 0
|
| 1559 |
+
self._add_log("error", f"Advisor error: {str(e)}")
|
| 1560 |
+
# Add error message to current cycle
|
| 1561 |
+
self._current_cycle_messages.append({
|
| 1562 |
+
"role": "assistant",
|
| 1563 |
+
"content": f"❌ AI Advisor Error: {str(e)}"
|
| 1564 |
+
})
|
| 1565 |
+
finally:
|
| 1566 |
+
self._advisor_running = False
|
| 1567 |
+
# Ensure completed cycle is archived so UI can fold under ⏱️ Tick blocks immediately
|
| 1568 |
+
self._archive_current_cycle_locked()
|
| 1569 |
+
|
| 1570 |
+
def _add_assessment_message(self, assessment: AssessmentResult, state: dict, tick: int):
|
| 1571 |
+
"""Add the Assessment message (Stage 1) with MCP tool calls to current cycle."""
|
| 1572 |
+
|
| 1573 |
+
fires = state.get("fires", [])
|
| 1574 |
+
units = state.get("units", [])
|
| 1575 |
+
buildings = state.get("buildings", [])
|
| 1576 |
+
building_integrity = state.get("building_integrity", 1.0)
|
| 1577 |
+
status = state.get("status", "running")
|
| 1578 |
+
width = state.get("width", 10)
|
| 1579 |
+
height = state.get("height", 10)
|
| 1580 |
+
|
| 1581 |
+
# Generate emoji map
|
| 1582 |
+
emoji_map = generate_emoji_map(self.engine)
|
| 1583 |
+
|
| 1584 |
+
priority_emoji = {
|
| 1585 |
+
"CRITICAL": "🔴",
|
| 1586 |
+
"HIGH": "🟠",
|
| 1587 |
+
"MODERATE": "🟡",
|
| 1588 |
+
"LOW": "🟢"
|
| 1589 |
+
}
|
| 1590 |
+
emoji = priority_emoji.get(assessment.threat_level, "⚪")
|
| 1591 |
+
|
| 1592 |
+
content = f"""
|
| 1593 |
+
### 📊 Stage 1 · Assessment `[Tick {tick}]`
|
| 1594 |
+
|
| 1595 |
+
#### 🔧 MCP Tool Calls
|
| 1596 |
+
|
| 1597 |
+
<details>
|
| 1598 |
+
<summary>📤 <code>mcp.get_world_state()</code></summary>
|
| 1599 |
+
|
| 1600 |
+
```python
|
| 1601 |
+
result = mcp.get_world_state()
|
| 1602 |
+
```
|
| 1603 |
+
|
| 1604 |
+
**Response**
|
| 1605 |
+
```
|
| 1606 |
+
status: {status} | grid: {width}x{height}
|
| 1607 |
+
fires: {len(fires)} | units: {len(units)}/{state.get('max_units', 10)}
|
| 1608 |
+
buildings: {len(buildings)} | integrity: {building_integrity:.0%}
|
| 1609 |
+
```
|
| 1610 |
+
|
| 1611 |
+
```
|
| 1612 |
+
{emoji_map}
|
| 1613 |
+
```
|
| 1614 |
+
|
| 1615 |
+
_Legend: 🌲 Forest · 🏢 Building · 🔥 Fire · 💨 Smoke · 🚒 Truck · 🚁 Heli_
|
| 1616 |
+
</details>
|
| 1617 |
+
""".strip()
|
| 1618 |
+
|
| 1619 |
+
if assessment.ineffective_units:
|
| 1620 |
+
idle_lines = []
|
| 1621 |
+
for u in assessment.ineffective_units[:5]:
|
| 1622 |
+
idle_lines.append(f"- {u.get('type', 'unit')} at ({u.get('x', 0)}, {u.get('y', 0)})")
|
| 1623 |
+
if len(assessment.ineffective_units) > 5:
|
| 1624 |
+
idle_lines.append(f"- ... and {len(assessment.ineffective_units) - 5} more")
|
| 1625 |
+
idle_block = "\n".join(idle_lines)
|
| 1626 |
+
content += f"""
|
| 1627 |
+
|
| 1628 |
+
<details>
|
| 1629 |
+
<summary>📤 <code>mcp.find_idle_units()</code> → ⚠️ {len(assessment.ineffective_units)} idle</summary>
|
| 1630 |
+
|
| 1631 |
+
```python
|
| 1632 |
+
{idle_block}
|
| 1633 |
+
```
|
| 1634 |
+
</details>
|
| 1635 |
+
"""
|
| 1636 |
+
else:
|
| 1637 |
+
content += """
|
| 1638 |
+
|
| 1639 |
+
<details>
|
| 1640 |
+
<summary>📤 <code>mcp.find_idle_units()</code> → ✅ All effective</summary>
|
| 1641 |
+
|
| 1642 |
+
```python
|
| 1643 |
+
# Every deployed unit is actively covering a fire
|
| 1644 |
+
```
|
| 1645 |
+
</details>
|
| 1646 |
+
"""
|
| 1647 |
+
|
| 1648 |
+
if assessment.uncovered_fires:
|
| 1649 |
+
fire_lines = []
|
| 1650 |
+
for f in assessment.uncovered_fires[:5]:
|
| 1651 |
+
fire_lines.append(f"- Fire at ({f.get('x', 0)}, {f.get('y', 0)}) · intensity={f.get('intensity', 0):.0%}")
|
| 1652 |
+
if len(assessment.uncovered_fires) > 5:
|
| 1653 |
+
fire_lines.append(f"- ... and {len(assessment.uncovered_fires) - 5} more")
|
| 1654 |
+
fire_block = "\n".join(fire_lines)
|
| 1655 |
+
content += f"""
|
| 1656 |
+
|
| 1657 |
+
<details>
|
| 1658 |
+
<summary>📤 <code>mcp.find_uncovered_fires()</code> → 🚨 {len(assessment.uncovered_fires)} uncovered</summary>
|
| 1659 |
+
|
| 1660 |
+
```python
|
| 1661 |
+
{fire_block}
|
| 1662 |
+
```
|
| 1663 |
+
</details>
|
| 1664 |
+
"""
|
| 1665 |
+
else:
|
| 1666 |
+
content += """
|
| 1667 |
+
|
| 1668 |
+
<details>
|
| 1669 |
+
<summary>📤 <code>mcp.find_uncovered_fires()</code> → ✅ All covered</summary>
|
| 1670 |
+
|
| 1671 |
+
```python
|
| 1672 |
+
# All active fires currently have unit coverage
|
| 1673 |
+
```
|
| 1674 |
+
</details>
|
| 1675 |
+
"""
|
| 1676 |
+
|
| 1677 |
+
content += f"""
|
| 1678 |
+
#### 📋 Analysis Results
|
| 1679 |
+
|
| 1680 |
+
**Threat Level:** {emoji} {assessment.threat_level}
|
| 1681 |
+
|
| 1682 |
+
**Fire Analysis**
|
| 1683 |
+
- Total fires: {assessment.fire_count}
|
| 1684 |
+
- High intensity (>70%): {len(assessment.high_intensity_fires)}
|
| 1685 |
+
- Building threats: {len(assessment.building_threats)}
|
| 1686 |
+
- ⚠️ Uncovered fires: {len(assessment.uncovered_fires)}
|
| 1687 |
+
|
| 1688 |
+
**Unit Analysis**
|
| 1689 |
+
- Deployed: {assessment.unit_count}/{assessment.max_units}
|
| 1690 |
+
- Effective: {len(assessment.effective_units)}
|
| 1691 |
+
- Idle: {len(assessment.ineffective_units)}
|
| 1692 |
+
- Coverage ratio: {assessment.coverage_ratio:.0%}
|
| 1693 |
+
|
| 1694 |
+
**Summary:** {assessment.summary}
|
| 1695 |
+
"""
|
| 1696 |
+
content += "\n\n---\n"
|
| 1697 |
+
self._current_cycle_messages.append({"role": "assistant", "content": content})
|
| 1698 |
+
|
| 1699 |
+
def _add_planning_message(self, plan, tick: int):
|
| 1700 |
+
"""Add the Planning message (Stage 2) to current cycle."""
|
| 1701 |
+
strategy_emoji = {
|
| 1702 |
+
"deploy_new": "🚀",
|
| 1703 |
+
"optimize_existing": "🔄",
|
| 1704 |
+
"balanced": "⚖️",
|
| 1705 |
+
"monitor": "👀"
|
| 1706 |
+
}
|
| 1707 |
+
s_emoji = strategy_emoji.get(plan.strategy, "📋")
|
| 1708 |
+
|
| 1709 |
+
action_lines = []
|
| 1710 |
+
if plan.deploy_count > 0:
|
| 1711 |
+
action_lines.append(f"- Deploy: {plan.deploy_count} new unit(s)")
|
| 1712 |
+
if plan.reposition_units:
|
| 1713 |
+
action_lines.append(f"- Reposition: {len(plan.reposition_units)} idle unit(s)")
|
| 1714 |
+
if plan.priority_targets:
|
| 1715 |
+
action_lines.append(f"- Priority fires: {len(plan.priority_targets)}")
|
| 1716 |
+
|
| 1717 |
+
content = f"""
|
| 1718 |
+
### 🎯 Stage 2 · Planning `[Tick {tick}]`
|
| 1719 |
+
|
| 1720 |
+
**Strategy:** {s_emoji} `{plan.strategy.upper()}`
|
| 1721 |
+
|
| 1722 |
+
**Reasoning**
|
| 1723 |
+
{plan.reasoning}
|
| 1724 |
+
""".strip()
|
| 1725 |
+
|
| 1726 |
+
if action_lines:
|
| 1727 |
+
content += "\n\n**Action Outline**\n" + "\n".join(action_lines)
|
| 1728 |
+
|
| 1729 |
+
content += "\n\n---\n"
|
| 1730 |
+
|
| 1731 |
+
self._current_cycle_messages.append({"role": "assistant", "content": content})
|
| 1732 |
+
|
| 1733 |
+
def _add_execution_message(self, recommendations, tick: int):
|
| 1734 |
+
"""Add the Execution message (Stage 3) with MCP tool calls to current cycle."""
|
| 1735 |
+
if not recommendations:
|
| 1736 |
+
content = f"### ⚡ Stage 3 · Execution `[Tick {tick}]`\n\n✅ **No actions required.** Current deployments already cover every fire."
|
| 1737 |
+
content += "\n\n---\n"
|
| 1738 |
+
self._current_cycle_messages.append({"role": "assistant", "content": content})
|
| 1739 |
+
self._record_action_breakdown(tick, 0, 0, 0)
|
| 1740 |
+
return
|
| 1741 |
+
|
| 1742 |
+
move_count = sum(1 for r in recommendations if getattr(r, "action", "deploy") == "move")
|
| 1743 |
+
replace_count = sum(1 for r in recommendations if getattr(r, "action", "deploy") == "replace")
|
| 1744 |
+
deploy_count = len(recommendations) - move_count - replace_count
|
| 1745 |
+
|
| 1746 |
+
summary = []
|
| 1747 |
+
if move_count:
|
| 1748 |
+
summary.append(f"- 🔄 Reposition {move_count} idle unit(s)")
|
| 1749 |
+
if replace_count:
|
| 1750 |
+
summary.append(f"- 🔁 Replace {replace_count} unit(s)")
|
| 1751 |
+
if deploy_count:
|
| 1752 |
+
summary.append(f"- 🚀 Deploy {deploy_count} additional unit(s)")
|
| 1753 |
+
|
| 1754 |
+
content = f"### ⚡ Stage 3 · Execution `[Tick {tick}]`\n\n" + "\n".join(summary) + "\n\n#### 🔧 MCP Tool Actions\n"
|
| 1755 |
+
|
| 1756 |
+
for idx, rec in enumerate(recommendations, 1):
|
| 1757 |
+
unit_emoji = "🚒" if rec.suggested_unit_type == "fire_truck" else "🚁"
|
| 1758 |
+
unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
|
| 1759 |
+
action = getattr(rec, "action", "deploy")
|
| 1760 |
+
|
| 1761 |
+
if action == "move":
|
| 1762 |
+
source_x = getattr(rec, "source_x", 0)
|
| 1763 |
+
source_y = getattr(rec, "source_y", 0)
|
| 1764 |
+
block = f"""
|
| 1765 |
+
<details>
|
| 1766 |
+
<summary>{idx}. 🔄 Move {unit_name} from ({source_x}, {source_y}) → ({rec.target_x}, {rec.target_y})</summary>
|
| 1767 |
+
|
| 1768 |
+
```python
|
| 1769 |
+
mcp.move_unit(
|
| 1770 |
+
source_x={source_x}, source_y={source_y},
|
| 1771 |
+
target_x={rec.target_x}, target_y={rec.target_y}
|
| 1772 |
+
)
|
| 1773 |
+
```
|
| 1774 |
+
|
| 1775 |
+
💡 _{rec.reason}_
|
| 1776 |
+
</details>
|
| 1777 |
+
"""
|
| 1778 |
+
elif action == "replace":
|
| 1779 |
+
old_type = getattr(rec, "old_unit_type", "fire_truck")
|
| 1780 |
+
old_name = "Fire Truck" if old_type == "fire_truck" else "Helicopter"
|
| 1781 |
+
old_emoji = "🚒" if old_type == "fire_truck" else "🚁"
|
| 1782 |
+
block = f"""
|
| 1783 |
+
<details>
|
| 1784 |
+
<summary>{idx}. 🔁 Replace {old_name} {old_emoji} with {unit_name} {unit_emoji} at ({rec.target_x}, {rec.target_y})</summary>
|
| 1785 |
+
|
| 1786 |
+
```python
|
| 1787 |
+
mcp.replace_unit(
|
| 1788 |
+
x={rec.target_x}, y={rec.target_y},
|
| 1789 |
+
new_unit_type="{rec.suggested_unit_type}"
|
| 1790 |
+
)
|
| 1791 |
+
```
|
| 1792 |
+
|
| 1793 |
+
💡 _{rec.reason}_
|
| 1794 |
+
</details>
|
| 1795 |
+
"""
|
| 1796 |
+
else:
|
| 1797 |
+
block = f"""
|
| 1798 |
+
<details>
|
| 1799 |
+
<summary>{idx}. {unit_emoji} Deploy {unit_name} to ({rec.target_x}, {rec.target_y})</summary>
|
| 1800 |
+
|
| 1801 |
+
```python
|
| 1802 |
+
mcp.deploy_unit(
|
| 1803 |
+
unit_type="{rec.suggested_unit_type}",
|
| 1804 |
+
x={rec.target_x}, y={rec.target_y}
|
| 1805 |
+
)
|
| 1806 |
+
```
|
| 1807 |
+
|
| 1808 |
+
💡 _{rec.reason}_
|
| 1809 |
+
</details>
|
| 1810 |
+
"""
|
| 1811 |
+
content += "\n" + block.strip() + "\n"
|
| 1812 |
+
|
| 1813 |
+
content += "\n\n---\n"
|
| 1814 |
+
|
| 1815 |
+
self._current_cycle_messages.append({"role": "assistant", "content": content})
|
| 1816 |
+
self._record_action_breakdown(tick, deploy_count, move_count, replace_count)
|
| 1817 |
+
|
| 1818 |
+
def _add_summary_cycle_message(self, cycle_summary: CycleSummary, tick: int):
|
| 1819 |
+
"""Add Stage 4 summary message to the current cycle."""
|
| 1820 |
+
highlights = "\n".join(f"- {item}" for item in cycle_summary.key_highlights) or "- (none)"
|
| 1821 |
+
risks = "\n".join(f"- {item}" for item in cycle_summary.risks) or "- (none)"
|
| 1822 |
+
next_focus = "\n".join(f"- {item}" for item in cycle_summary.next_focus) or "- (none)"
|
| 1823 |
+
content = f"""
|
| 1824 |
+
### 🧭 Stage 4 · Summary `[Tick {tick}]`
|
| 1825 |
+
|
| 1826 |
+
**Headline:** {cycle_summary.headline}
|
| 1827 |
+
|
| 1828 |
+
**Threat Level:** {cycle_summary.threat_level}
|
| 1829 |
+
|
| 1830 |
+
**Key Highlights**
|
| 1831 |
+
{highlights}
|
| 1832 |
+
|
| 1833 |
+
**Risks / Gaps**
|
| 1834 |
+
{risks}
|
| 1835 |
+
|
| 1836 |
+
**Next Focus**
|
| 1837 |
+
{next_focus}
|
| 1838 |
+
""".strip()
|
| 1839 |
+
content += "\n\n---\n"
|
| 1840 |
+
self._current_cycle_messages.append({"role": "assistant", "content": content})
|
| 1841 |
+
|
| 1842 |
+
def _build_advisor_response(self, assessment: AssessmentResult, plan: PlanResult, recommendations: list) -> AdvisorResponse:
|
| 1843 |
+
"""Build the final AdvisorResponse object."""
|
| 1844 |
+
|
| 1845 |
+
# Build thinking summary
|
| 1846 |
+
thinking_parts = [
|
| 1847 |
+
f"📊 Scanning {assessment.fire_count} active fires...",
|
| 1848 |
+
]
|
| 1849 |
+
if assessment.uncovered_fires:
|
| 1850 |
+
thinking_parts.append(f"🚨 ALERT: {len(assessment.uncovered_fires)} fire(s) with NO coverage!")
|
| 1851 |
+
if assessment.building_threats:
|
| 1852 |
+
thinking_parts.append(f"🏢 {len(assessment.building_threats)} fire(s) threatening buildings!")
|
| 1853 |
+
if assessment.ineffective_units:
|
| 1854 |
+
thinking_parts.append(f"🔄 {len(assessment.ineffective_units)} idle unit(s) should be repositioned")
|
| 1855 |
+
thinking_parts.append(f"🎯 Strategy: {plan.strategy.upper()} - {plan.reasoning}")
|
| 1856 |
+
|
| 1857 |
+
# Generate summary
|
| 1858 |
+
priority_emoji = {"CRITICAL": "🔴", "HIGH": "🟠", "MODERATE": "🟡", "LOW": "🟢"}
|
| 1859 |
+
emoji = priority_emoji.get(assessment.threat_level, "⚪")
|
| 1860 |
+
|
| 1861 |
+
if assessment.threat_level == "CRITICAL":
|
| 1862 |
+
summary = f"{emoji} CRITICAL: {assessment.summary}. Immediate action required!"
|
| 1863 |
+
elif assessment.threat_level == "HIGH":
|
| 1864 |
+
summary = f"{emoji} HIGH: {assessment.summary}. Rapid response needed."
|
| 1865 |
+
elif assessment.threat_level == "MODERATE":
|
| 1866 |
+
summary = f"{emoji} MODERATE: {assessment.summary}. Tactical deployment advised."
|
| 1867 |
+
else:
|
| 1868 |
+
summary = f"{emoji} LOW: {assessment.summary}. Monitoring situation."
|
| 1869 |
+
|
| 1870 |
+
return AdvisorResponse(
|
| 1871 |
+
summary=summary,
|
| 1872 |
+
recommendations=recommendations,
|
| 1873 |
+
thinking="\n".join(thinking_parts),
|
| 1874 |
+
analysis=f"{assessment.fire_count} fires | {assessment.unit_count}/{assessment.max_units} units | {assessment.building_integrity:.0%} building integrity",
|
| 1875 |
+
priority=assessment.threat_level,
|
| 1876 |
+
assessment=assessment,
|
| 1877 |
+
plan=plan
|
| 1878 |
+
)
|
| 1879 |
+
|
| 1880 |
+
def _execute_recommendations(self, response: AdvisorResponse, tick: int):
|
| 1881 |
+
"""Execute AI recommendations (must be called with lock held)."""
|
| 1882 |
+
executed_count = 0
|
| 1883 |
+
for rec in response.recommendations:
|
| 1884 |
+
# Create unique key for this recommendation
|
| 1885 |
+
action = getattr(rec, 'action', 'deploy')
|
| 1886 |
+
rec_key = f"{tick}_{action}_{rec.suggested_unit_type}_{rec.target_x}_{rec.target_y}"
|
| 1887 |
+
|
| 1888 |
+
# Skip if already executed
|
| 1889 |
+
if rec_key in self._executed_recommendations:
|
| 1890 |
+
continue
|
| 1891 |
+
|
| 1892 |
+
if action == "move":
|
| 1893 |
+
# Move action: remove from source, deploy at target
|
| 1894 |
+
source_x = getattr(rec, 'source_x', -1)
|
| 1895 |
+
source_y = getattr(rec, 'source_y', -1)
|
| 1896 |
+
|
| 1897 |
+
if source_x >= 0 and source_y >= 0:
|
| 1898 |
+
# First remove the unit from source
|
| 1899 |
+
remove_result = self.engine.remove_unit_at(source_x, source_y)
|
| 1900 |
+
|
| 1901 |
+
if remove_result.get("status") == "ok":
|
| 1902 |
+
# Then deploy at target
|
| 1903 |
+
deploy_result = self.engine.deploy_unit(rec.suggested_unit_type, rec.target_x, rec.target_y, "ai")
|
| 1904 |
+
|
| 1905 |
+
if deploy_result.get("status") == "ok":
|
| 1906 |
+
executed_count += 1
|
| 1907 |
+
unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
|
| 1908 |
+
self._add_log(
|
| 1909 |
+
"deploy",
|
| 1910 |
+
f"🤖 AI moved {unit_name}: ({source_x},{source_y}) → ({rec.target_x},{rec.target_y})",
|
| 1911 |
+
{"unit": deploy_result.get("unit"), "source": "ai", "reason": rec.reason, "action": "move"}
|
| 1912 |
+
)
|
| 1913 |
+
self._executed_recommendations.add(rec_key)
|
| 1914 |
+
else:
|
| 1915 |
+
# Deploy failed, try to restore the removed unit
|
| 1916 |
+
self.engine.deploy_unit(rec.suggested_unit_type, source_x, source_y, "ai")
|
| 1917 |
+
self._add_log(
|
| 1918 |
+
"error",
|
| 1919 |
+
f"🤖 AI move failed: {deploy_result.get('message')} - restored unit at ({source_x},{source_y})"
|
| 1920 |
+
)
|
| 1921 |
+
else:
|
| 1922 |
+
self._add_log(
|
| 1923 |
+
"error",
|
| 1924 |
+
f"🤖 AI move failed: No unit at source ({source_x},{source_y})"
|
| 1925 |
+
)
|
| 1926 |
+
elif action == "remove":
|
| 1927 |
+
# Remove action: remove unit at position (frees slot for redeployment elsewhere)
|
| 1928 |
+
remove_result = self.engine.remove_unit_at(rec.target_x, rec.target_y)
|
| 1929 |
+
|
| 1930 |
+
if remove_result.get("status") == "ok":
|
| 1931 |
+
executed_count += 1
|
| 1932 |
+
unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
|
| 1933 |
+
self._add_log(
|
| 1934 |
+
"deploy",
|
| 1935 |
+
f"🤖 AI removed {unit_name} at ({rec.target_x},{rec.target_y}) - ready to redeploy",
|
| 1936 |
+
{"source": "ai", "reason": rec.reason, "action": "remove"}
|
| 1937 |
+
)
|
| 1938 |
+
self._executed_recommendations.add(rec_key)
|
| 1939 |
+
else:
|
| 1940 |
+
self._add_log(
|
| 1941 |
+
"error",
|
| 1942 |
+
f"🤖 AI remove failed: {remove_result.get('message')} at ({rec.target_x},{rec.target_y})"
|
| 1943 |
+
)
|
| 1944 |
+
else:
|
| 1945 |
+
# Deploy action
|
| 1946 |
+
result = self.engine.deploy_unit(rec.suggested_unit_type, rec.target_x, rec.target_y, "ai")
|
| 1947 |
+
|
| 1948 |
+
if result.get("status") == "ok":
|
| 1949 |
+
executed_count += 1
|
| 1950 |
+
unit_name = "Fire Truck" if rec.suggested_unit_type == "fire_truck" else "Helicopter"
|
| 1951 |
+
self._add_log(
|
| 1952 |
+
"deploy",
|
| 1953 |
+
f"🤖 AI deployed {unit_name} at ({rec.target_x}, {rec.target_y})",
|
| 1954 |
+
{"unit": result.get("unit"), "source": "ai", "reason": rec.reason, "action": "deploy"}
|
| 1955 |
+
)
|
| 1956 |
+
self._executed_recommendations.add(rec_key)
|
| 1957 |
+
else:
|
| 1958 |
+
# Log failure but don't stop
|
| 1959 |
+
self._add_log(
|
| 1960 |
+
"error",
|
| 1961 |
+
f"🤖 AI deploy failed: {result.get('message')} at ({rec.target_x}, {rec.target_y})"
|
| 1962 |
+
)
|
| 1963 |
+
|
| 1964 |
+
# Keep executed recommendations bounded
|
| 1965 |
+
if len(self._executed_recommendations) > 100:
|
| 1966 |
+
self._executed_recommendations = set(list(self._executed_recommendations)[-50:])
|
| 1967 |
+
|
| 1968 |
+
def _add_log(self, event_type: str, message: str, details: Optional[dict] = None):
|
| 1969 |
+
"""Add a log entry (must be called with lock held)."""
|
| 1970 |
+
tick = self.engine.world.tick if self.engine.world else 0
|
| 1971 |
+
|
| 1972 |
+
self._logs.append(LogEntry(
|
| 1973 |
+
timestamp=datetime.now().strftime("%H:%M:%S"),
|
| 1974 |
+
tick=tick,
|
| 1975 |
+
event_type=event_type,
|
| 1976 |
+
message=message,
|
| 1977 |
+
details=details
|
| 1978 |
+
))
|
| 1979 |
+
|
| 1980 |
+
# Keep logs bounded
|
| 1981 |
+
if len(self._logs) > 200:
|
| 1982 |
+
self._logs = self._logs[-100:]
|
| 1983 |
+
|
| 1984 |
+
|
| 1985 |
+
# Global service instance for the app
|
| 1986 |
+
_service: Optional[SimulationService] = None
|
| 1987 |
+
|
| 1988 |
+
|
| 1989 |
+
def get_service() -> SimulationService:
|
| 1990 |
+
"""Get or create the global simulation service."""
|
| 1991 |
+
global _service
|
| 1992 |
+
if _service is None:
|
| 1993 |
+
_service = SimulationService()
|
| 1994 |
+
return _service
|
| 1995 |
+
|
simulation.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Fire-Rescue - Simulation Core
|
| 3 |
+
|
| 4 |
+
Handles fire spread, unit behavior, and win/lose conditions.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import random
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
from models import (
|
| 11 |
+
WorldState,
|
| 12 |
+
Cell,
|
| 13 |
+
CellType,
|
| 14 |
+
Unit,
|
| 15 |
+
UnitType,
|
| 16 |
+
SimulationStatus,
|
| 17 |
+
Event
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class SimulationConfig:
|
| 22 |
+
"""Configuration parameters for the simulation."""
|
| 23 |
+
|
| 24 |
+
# Grid size
|
| 25 |
+
GRID_WIDTH = 10
|
| 26 |
+
GRID_HEIGHT = 10
|
| 27 |
+
|
| 28 |
+
# Fire spread parameters (balanced for playability)
|
| 29 |
+
FIRE_SPREAD_CHANCE = 0.08 # Reduced: chance to spread to adjacent cell per tick
|
| 30 |
+
FIRE_GROWTH_RATE = 0.02 # Reduced: how much fire intensity grows per tick
|
| 31 |
+
FIRE_MAX_INTENSITY = 1.0
|
| 32 |
+
FIRE_DECAY_RATE = 0.01 # Natural decay (very slow)
|
| 33 |
+
|
| 34 |
+
# Damage parameters
|
| 35 |
+
DAMAGE_PER_TICK = 0.01 # Reduced: damage dealt by fire per tick
|
| 36 |
+
|
| 37 |
+
# Unit parameters (stronger firefighting)
|
| 38 |
+
FIRE_TRUCK_RANGE = 1 # Square coverage radius (includes 8 neighboring cells)
|
| 39 |
+
FIRE_TRUCK_POWER = 0.4 # Increased: reduction in fire intensity per action
|
| 40 |
+
HELICOPTER_RANGE = 2 # Square coverage radius (extends two cells in all directions)
|
| 41 |
+
HELICOPTER_POWER = 0.25 # Increased: less powerful but wider coverage
|
| 42 |
+
UNIT_COOLDOWN = 1 # Reduced: ticks between unit actions (faster response)
|
| 43 |
+
|
| 44 |
+
# Win/lose thresholds
|
| 45 |
+
BUILDING_DAMAGE_THRESHOLD = 0.5 # Fail if building integrity < 50%
|
| 46 |
+
FOREST_DAMAGE_THRESHOLD = 0.8 # Fail if forest burn > 80%
|
| 47 |
+
FIRE_SAFE_THRESHOLD = 0.1 # Win if all fires below this intensity
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class SimulationEngine:
|
| 51 |
+
"""
|
| 52 |
+
Core simulation engine that manages world state updates.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def __init__(self, config: Optional[SimulationConfig] = None):
|
| 56 |
+
self.config = config or SimulationConfig()
|
| 57 |
+
self.world: Optional[WorldState] = None
|
| 58 |
+
|
| 59 |
+
def reset(
|
| 60 |
+
self,
|
| 61 |
+
seed: Optional[int] = None,
|
| 62 |
+
fire_count: int = 4,
|
| 63 |
+
fire_intensity: float = 0.6,
|
| 64 |
+
building_count: int = 16,
|
| 65 |
+
max_units: int = 10
|
| 66 |
+
) -> WorldState:
|
| 67 |
+
"""
|
| 68 |
+
Reset and initialize a new simulation.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
seed: Random seed for reproducibility
|
| 72 |
+
fire_count: Number of initial fire points (1-25)
|
| 73 |
+
fire_intensity: Initial fire intensity (0.0-1.0)
|
| 74 |
+
building_count: Number of buildings to place (1-25)
|
| 75 |
+
max_units: Maximum number of deployable units (1-20)
|
| 76 |
+
"""
|
| 77 |
+
self.world = WorldState(
|
| 78 |
+
width=self.config.GRID_WIDTH,
|
| 79 |
+
height=self.config.GRID_HEIGHT,
|
| 80 |
+
tick=0,
|
| 81 |
+
status=SimulationStatus.RUNNING,
|
| 82 |
+
max_ticks=200,
|
| 83 |
+
max_units=max_units
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
self.world.initialize_grid(
|
| 87 |
+
seed=seed,
|
| 88 |
+
fire_count=fire_count,
|
| 89 |
+
fire_intensity=fire_intensity,
|
| 90 |
+
building_count=building_count
|
| 91 |
+
)
|
| 92 |
+
self.world.calculate_metrics()
|
| 93 |
+
|
| 94 |
+
return self.world
|
| 95 |
+
|
| 96 |
+
def step(self) -> WorldState:
|
| 97 |
+
"""Advance simulation by one tick."""
|
| 98 |
+
if self.world is None:
|
| 99 |
+
raise RuntimeError("Simulation not initialized. Call reset() first.")
|
| 100 |
+
|
| 101 |
+
if self.world.status != SimulationStatus.RUNNING:
|
| 102 |
+
return self.world
|
| 103 |
+
|
| 104 |
+
# 1. Units perform actions FIRST (so deployed units work immediately)
|
| 105 |
+
self._update_units()
|
| 106 |
+
|
| 107 |
+
# 2. Fire spreads and grows
|
| 108 |
+
self._update_fire()
|
| 109 |
+
|
| 110 |
+
# 3. Fire causes damage
|
| 111 |
+
self._update_damage()
|
| 112 |
+
|
| 113 |
+
# 4. Recalculate metrics
|
| 114 |
+
self.world.calculate_metrics()
|
| 115 |
+
|
| 116 |
+
# 5. Increment tick
|
| 117 |
+
self.world.tick += 1
|
| 118 |
+
|
| 119 |
+
# 6. Check win/lose conditions
|
| 120 |
+
self._check_end_conditions()
|
| 121 |
+
|
| 122 |
+
return self.world
|
| 123 |
+
|
| 124 |
+
def _update_fire(self):
|
| 125 |
+
"""Update fire spread and growth."""
|
| 126 |
+
new_fires: list[tuple[int, int, float]] = []
|
| 127 |
+
|
| 128 |
+
for row in self.world.grid:
|
| 129 |
+
for cell in row:
|
| 130 |
+
if cell.fire_intensity > 0:
|
| 131 |
+
# Fire grows
|
| 132 |
+
cell.fire_intensity = min(
|
| 133 |
+
cell.fire_intensity + self.config.FIRE_GROWTH_RATE,
|
| 134 |
+
self.config.FIRE_MAX_INTENSITY
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
# Fire spreads to neighbors
|
| 138 |
+
for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
|
| 139 |
+
nx, ny = cell.x + dx, cell.y + dy
|
| 140 |
+
neighbor = self.world.get_cell(nx, ny)
|
| 141 |
+
|
| 142 |
+
if neighbor and neighbor.fire_intensity == 0 and not neighbor.is_destroyed():
|
| 143 |
+
# Spread chance based on source fire intensity
|
| 144 |
+
spread_chance = self.config.FIRE_SPREAD_CHANCE * cell.fire_intensity
|
| 145 |
+
if random.random() < spread_chance:
|
| 146 |
+
# Initial intensity based on source
|
| 147 |
+
new_intensity = cell.fire_intensity * 0.5
|
| 148 |
+
new_fires.append((nx, ny, new_intensity))
|
| 149 |
+
|
| 150 |
+
# Apply new fires
|
| 151 |
+
for x, y, intensity in new_fires:
|
| 152 |
+
cell = self.world.get_cell(x, y)
|
| 153 |
+
if cell and cell.fire_intensity == 0:
|
| 154 |
+
cell.fire_intensity = intensity
|
| 155 |
+
|
| 156 |
+
def _update_damage(self):
|
| 157 |
+
"""Update damage caused by fire."""
|
| 158 |
+
for row in self.world.grid:
|
| 159 |
+
for cell in row:
|
| 160 |
+
if cell.fire_intensity > 0 and cell.cell_type != CellType.EMPTY:
|
| 161 |
+
# Damage proportional to fire intensity
|
| 162 |
+
damage = self.config.DAMAGE_PER_TICK * cell.fire_intensity
|
| 163 |
+
cell.damage = min(cell.damage + damage, 1.0)
|
| 164 |
+
|
| 165 |
+
def _update_units(self):
|
| 166 |
+
"""Update unit actions (firefighting)."""
|
| 167 |
+
for unit in self.world.units:
|
| 168 |
+
# Decrease cooldown
|
| 169 |
+
if unit.cooldown > 0:
|
| 170 |
+
unit.cooldown -= 1
|
| 171 |
+
continue
|
| 172 |
+
|
| 173 |
+
# Find cells to extinguish
|
| 174 |
+
extinguished = False
|
| 175 |
+
|
| 176 |
+
if unit.unit_type == UnitType.FIRE_TRUCK:
|
| 177 |
+
extinguished = self._fire_truck_action(unit)
|
| 178 |
+
elif unit.unit_type == UnitType.HELICOPTER:
|
| 179 |
+
extinguished = self._helicopter_action(unit)
|
| 180 |
+
|
| 181 |
+
if extinguished:
|
| 182 |
+
unit.cooldown = self.config.UNIT_COOLDOWN
|
| 183 |
+
|
| 184 |
+
def _fire_truck_action(self, unit: Unit) -> bool:
|
| 185 |
+
"""Fire truck extinguishes fires within a square radius (Chebyshev distance)."""
|
| 186 |
+
targets = []
|
| 187 |
+
|
| 188 |
+
# Check cells within square range (including diagonals)
|
| 189 |
+
for dx in range(-self.config.FIRE_TRUCK_RANGE, self.config.FIRE_TRUCK_RANGE + 1):
|
| 190 |
+
for dy in range(-self.config.FIRE_TRUCK_RANGE, self.config.FIRE_TRUCK_RANGE + 1):
|
| 191 |
+
cell = self.world.get_cell(unit.x + dx, unit.y + dy)
|
| 192 |
+
if cell and cell.fire_intensity > 0:
|
| 193 |
+
targets.append(cell)
|
| 194 |
+
|
| 195 |
+
if not targets:
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
# Target highest intensity fire
|
| 199 |
+
targets.sort(key=lambda c: c.fire_intensity, reverse=True)
|
| 200 |
+
target = targets[0]
|
| 201 |
+
|
| 202 |
+
# Reduce fire
|
| 203 |
+
target.fire_intensity = max(0, target.fire_intensity - self.config.FIRE_TRUCK_POWER)
|
| 204 |
+
|
| 205 |
+
return True
|
| 206 |
+
|
| 207 |
+
def _helicopter_action(self, unit: Unit) -> bool:
|
| 208 |
+
"""Helicopter extinguishes fires within a wider square radius."""
|
| 209 |
+
affected = False
|
| 210 |
+
|
| 211 |
+
for dx in range(-self.config.HELICOPTER_RANGE, self.config.HELICOPTER_RANGE + 1):
|
| 212 |
+
for dy in range(-self.config.HELICOPTER_RANGE, self.config.HELICOPTER_RANGE + 1):
|
| 213 |
+
cell = self.world.get_cell(unit.x + dx, unit.y + dy)
|
| 214 |
+
if cell and cell.fire_intensity > 0:
|
| 215 |
+
cell.fire_intensity = max(0, cell.fire_intensity - self.config.HELICOPTER_POWER)
|
| 216 |
+
affected = True
|
| 217 |
+
|
| 218 |
+
return affected
|
| 219 |
+
|
| 220 |
+
def _check_end_conditions(self):
|
| 221 |
+
"""Check win/lose conditions."""
|
| 222 |
+
# Check time limit
|
| 223 |
+
if self.world.tick >= self.world.max_ticks:
|
| 224 |
+
self.world.status = SimulationStatus.FAIL
|
| 225 |
+
self.world.recent_events.append(Event(
|
| 226 |
+
tick=self.world.tick,
|
| 227 |
+
event_type="simulation_end",
|
| 228 |
+
details={"reason": "time_limit_exceeded"}
|
| 229 |
+
))
|
| 230 |
+
return
|
| 231 |
+
|
| 232 |
+
# Check building damage
|
| 233 |
+
if self.world.building_integrity < (1 - self.config.BUILDING_DAMAGE_THRESHOLD):
|
| 234 |
+
self.world.status = SimulationStatus.FAIL
|
| 235 |
+
self.world.recent_events.append(Event(
|
| 236 |
+
tick=self.world.tick,
|
| 237 |
+
event_type="simulation_end",
|
| 238 |
+
details={"reason": "building_destroyed"}
|
| 239 |
+
))
|
| 240 |
+
return
|
| 241 |
+
|
| 242 |
+
# Check forest damage
|
| 243 |
+
if self.world.forest_burn_ratio > self.config.FOREST_DAMAGE_THRESHOLD:
|
| 244 |
+
self.world.status = SimulationStatus.FAIL
|
| 245 |
+
self.world.recent_events.append(Event(
|
| 246 |
+
tick=self.world.tick,
|
| 247 |
+
event_type="simulation_end",
|
| 248 |
+
details={"reason": "forest_destroyed"}
|
| 249 |
+
))
|
| 250 |
+
return
|
| 251 |
+
|
| 252 |
+
# Check if all fires are extinguished
|
| 253 |
+
fires = self.world.get_fires()
|
| 254 |
+
if not fires or all(f.intensity < self.config.FIRE_SAFE_THRESHOLD for f in fires):
|
| 255 |
+
self.world.status = SimulationStatus.SUCCESS
|
| 256 |
+
self.world.recent_events.append(Event(
|
| 257 |
+
tick=self.world.tick,
|
| 258 |
+
event_type="simulation_end",
|
| 259 |
+
details={"reason": "fire_contained"}
|
| 260 |
+
))
|
| 261 |
+
|
| 262 |
+
def deploy_unit(
|
| 263 |
+
self,
|
| 264 |
+
unit_type: str,
|
| 265 |
+
x: int,
|
| 266 |
+
y: int,
|
| 267 |
+
source: str = "player"
|
| 268 |
+
) -> dict:
|
| 269 |
+
"""Deploy a new unit at the specified position."""
|
| 270 |
+
if self.world is None:
|
| 271 |
+
return {"status": "error", "message": "Simulation not initialized"}
|
| 272 |
+
|
| 273 |
+
if self.world.status != SimulationStatus.RUNNING:
|
| 274 |
+
return {"status": "error", "message": "Simulation is not running"}
|
| 275 |
+
|
| 276 |
+
# Parse unit type
|
| 277 |
+
try:
|
| 278 |
+
utype = UnitType(unit_type)
|
| 279 |
+
except ValueError:
|
| 280 |
+
return {"status": "error", "message": f"Invalid unit type: {unit_type}"}
|
| 281 |
+
|
| 282 |
+
# Check position validity first for better error messages
|
| 283 |
+
if not (0 <= x < self.world.width and 0 <= y < self.world.height):
|
| 284 |
+
return {"status": "error", "message": f"Position ({x}, {y}) is out of bounds"}
|
| 285 |
+
|
| 286 |
+
cell = self.world.get_cell(x, y)
|
| 287 |
+
if cell and cell.fire_intensity > 0:
|
| 288 |
+
return {"status": "error", "message": f"Cannot deploy on burning cell at ({x}, {y})"}
|
| 289 |
+
|
| 290 |
+
if cell and cell.cell_type == CellType.BUILDING:
|
| 291 |
+
return {"status": "error", "message": f"Cannot deploy on building at ({x}, {y})"}
|
| 292 |
+
|
| 293 |
+
# Check unit limit
|
| 294 |
+
if len(self.world.units) >= self.world.max_units:
|
| 295 |
+
return {"status": "error", "message": f"Unit limit reached ({self.world.max_units})"}
|
| 296 |
+
|
| 297 |
+
# Try to add unit
|
| 298 |
+
unit = self.world.add_unit(utype, x, y, source)
|
| 299 |
+
|
| 300 |
+
if unit is None:
|
| 301 |
+
return {"status": "error", "message": "Failed to deploy unit"}
|
| 302 |
+
|
| 303 |
+
return {
|
| 304 |
+
"status": "ok",
|
| 305 |
+
"unit": unit.to_dict()
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
def remove_unit_at(self, x: int, y: int) -> dict:
|
| 309 |
+
"""Remove a unit at the specified position."""
|
| 310 |
+
if self.world is None:
|
| 311 |
+
return {"status": "error", "message": "Simulation not initialized"}
|
| 312 |
+
|
| 313 |
+
# Find unit at position
|
| 314 |
+
unit_to_remove = None
|
| 315 |
+
for unit in self.world.units:
|
| 316 |
+
if unit.x == x and unit.y == y:
|
| 317 |
+
unit_to_remove = unit
|
| 318 |
+
break
|
| 319 |
+
|
| 320 |
+
if unit_to_remove is None:
|
| 321 |
+
return {"status": "error", "message": f"No unit at ({x}, {y})"}
|
| 322 |
+
|
| 323 |
+
# Remove the unit
|
| 324 |
+
self.world.units.remove(unit_to_remove)
|
| 325 |
+
|
| 326 |
+
return {
|
| 327 |
+
"status": "ok",
|
| 328 |
+
"message": f"Removed {unit_to_remove.unit_type.value} at ({x}, {y})",
|
| 329 |
+
"unit": unit_to_remove.to_dict()
|
| 330 |
+
}
|
| 331 |
+
|
| 332 |
+
def get_state(self) -> dict:
|
| 333 |
+
"""Get current world state as dictionary."""
|
| 334 |
+
if self.world is None:
|
| 335 |
+
return {"status": "error", "message": "Simulation not initialized"}
|
| 336 |
+
|
| 337 |
+
return self.world.to_dict()
|
| 338 |
+
|
uv.lock
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|