Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,395 Bytes
52b4ed7 39c0a27 52b4ed7 5487be8 52b4ed7 fc23c24 52b4ed7 af9efda 52b4ed7 98c58ec 52b4ed7 03d8100 faa95c5 03d8100 52b4ed7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
"""Configuration constants and global storage"""
import os
import threading
# Model configurations
MEDSWIN_MODELS = {
"MedSwin DT": "MedSwin/MedSwin-Merged-DaRE-TIES-KD-0.7",
"MedSwin Nsl": "MedSwin/MedSwin-Merged-NuSLERP-KD-0.7",
"MedSwin DL": "MedSwin/MedSwin-Merged-DaRE-Linear-KD-0.7",
"MedSwin Ti": "MedSwin/MedSwin-Merged-TIES-KD-0.7",
"MedSwin TA": "MedSwin/MedSwin-Merged-TA-SFT-0.7",
"MedSwin SFT": "MedSwin/MedSwin-7B-SFT",
"MedSwin KD": "MedSwin/MedSwin-7B-KD",
}
DEFAULT_MEDICAL_MODEL = "MedSwin DT"
EMBEDDING_MODEL = "abhinand/MedEmbed-large-v0.1"
TTS_MODEL = "maya-research/maya1"
HF_TOKEN = os.environ.get("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN not found in environment variables")
# Gemini MCP configuration
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash")
GEMINI_MODEL_LITE = os.environ.get("GEMINI_MODEL_LITE", "gemini-2.5-flash-lite")
USE_API = os.environ.get("USE_API", "false").lower() == "true"
# MCP server configuration
script_dir = os.path.dirname(os.path.abspath(__file__))
agent_path = os.path.join(script_dir, "agent.py")
MCP_SERVER_COMMAND = os.environ.get("MCP_SERVER_COMMAND", "python")
MCP_SERVER_ARGS = os.environ.get("MCP_SERVER_ARGS", agent_path).split() if os.environ.get("MCP_SERVER_ARGS") else [agent_path]
MCP_TOOLS_CACHE_TTL = int(os.environ.get("MCP_TOOLS_CACHE_TTL", "60"))
# Global model storage
global_medical_models = {}
global_medical_tokenizers = {}
global_file_info = {}
global_tts_model = None
global_whisper_model = None
global_embed_model = None
# MCP client storage
global_mcp_session = None
global_mcp_stdio_ctx = None
global_mcp_lock = threading.Lock()
global_mcp_tools_cache = {"timestamp": 0.0, "tools": None}
# UI constants
TITLE = "<h1><center>🩺 MedLLM Agent - Medical RAG & Web Search System</center></h1>"
DESCRIPTION = """
<center>
<p><strong>Advanced Medical AI Assistant</strong> powered by MedSwin models</p>
<p>📄 <strong>Document RAG:</strong> Answer based on uploaded medical documents</p>
<p>🌐 <strong>Web Search:</strong> Fetch knowledge from reliable online medical resources</p>
<p>🌍 <strong>Multi-language:</strong> Automatic translation for non-English queries</p>
<p><strong>Tips:</strong> Customise configurations & system prompt to see the magic!</p>
<p><strong>Note:</strong> Case GPU aborted or MedSwin not ready, please try another model!</p>
</center>
"""
CSS = """
.upload-section {
max-width: 400px;
margin: 0 auto;
padding: 10px;
border: 2px dashed #ccc;
border-radius: 10px;
}
.upload-button {
background: #34c759 !important;
color: white !important;
border-radius: 25px !important;
}
.chatbot-container {
margin-top: 20px;
}
.status-output {
margin-top: 10px;
font-size: 14px;
}
.processing-info {
margin-top: 5px;
font-size: 12px;
color: #666;
}
.info-container {
margin-top: 10px;
padding: 10px;
border-radius: 5px;
}
.file-list {
margin-top: 0;
max-height: 200px;
overflow-y: auto;
padding: 5px;
border: 1px solid #eee;
border-radius: 5px;
}
.stats-box {
margin-top: 10px;
padding: 10px;
border-radius: 5px;
font-size: 12px;
}
.submit-btn {
background: #1a73e8 !important;
color: white !important;
border-radius: 25px !important;
margin-left: 10px;
padding: 5px 10px;
font-size: 16px;
}
.input-row {
display: flex;
align-items: center;
}
.recording-timer {
font-size: 12px;
color: #666;
text-align: center;
margin-top: 5px;
}
.feature-badge {
display: inline-block;
padding: 3px 8px;
margin: 2px;
border-radius: 12px;
font-size: 11px;
font-weight: bold;
}
.badge-rag {
background: #e3f2fd;
color: #1976d2;
}
.badge-web {
background: #f3e5f5;
color: #7b1fa2;
}
.model-status {
margin-top: 5px;
padding: 8px;
border-radius: 5px;
font-size: 13px;
font-weight: 500;
background-color: #f5f5f5;
border: 1px solid #e0e0e0;
overflow-y: auto;
max-height: 120px;
}
@media (min-width: 768px) {
.main-container {
display: flex;
justify-content: space-between;
gap: 20px;
}
.upload-section {
flex: 1;
max-width: 300px;
}
.chatbot-container {
flex: 2;
margin-top: 0;
}
}
"""
|