File size: 25,460 Bytes
82d8d17
 
 
7cd245f
82d8d17
 
7cd245f
 
 
 
82d8d17
7cd245f
 
82d8d17
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
7cd245f
 
 
 
 
82d8d17
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
7cd245f
82d8d17
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
7cd245f
 
82d8d17
 
7cd245f
 
82d8d17
 
7cd245f
 
82d8d17
 
 
 
 
 
7cd245f
 
 
82d8d17
7cd245f
 
82d8d17
 
7cd245f
 
82d8d17
 
7cd245f
82d8d17
 
 
7cd245f
82d8d17
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
7cd245f
82d8d17
7cd245f
82d8d17
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
7cd245f
82d8d17
7cd245f
82d8d17
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
7cd245f
 
 
82d8d17
 
 
7cd245f
82d8d17
7cd245f
82d8d17
 
7cd245f
82d8d17
7cd245f
82d8d17
7cd245f
82d8d17
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
 
 
 
 
 
7cd245f
 
82d8d17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cd245f
82d8d17
 
 
7cd245f
82d8d17
 
 
06916ba
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
#!/usr/bin/env python3
# proverbs_ultimate_brain.py
"""
ProVerBs Ultimate Brain with Complete Voice Cloning - FIXED VERSION
Integrates Supertonic voice cloning with all controls (mocked if unavailable).
"""

import sys
import os
import asyncio
from typing import Dict, List, Optional, Any, Tuple

# Add current directory to path
sys.path.append(os.path.dirname(__file__))

import gradio as gr

# Optional external client import (may fail; handled below)
try:
    from huggingface_hub import InferenceClient
    HUGGINGFACE_AVAILABLE = True
except Exception:
    HUGGINGFACE_AVAILABLE = False
    InferenceClient = None

import json
from datetime import datetime

# Try importing optional modules with fallbacks
try:
    from unified_brain import UnifiedBrain, ReasoningContext  # type: ignore
    UNIFIED_BRAIN_AVAILABLE = True
except Exception:
    UNIFIED_BRAIN_AVAILABLE = False
    UnifiedBrain = None
    print("Warning: unified_brain module not available - using fallback")

try:
    from performance_optimizer import performance_cache, performance_monitor, with_caching  # type: ignore
    PERFORMANCE_AVAILABLE = True
except Exception:
    PERFORMANCE_AVAILABLE = False
    performance_cache = None
    performance_monitor = None
    print("Warning: performance_optimizer module not available - using fallback")

try:
    from analytics_seo import analytics_tracker, SEOOptimizer  # type: ignore
    ANALYTICS_AVAILABLE = True
except Exception:
    ANALYTICS_AVAILABLE = False
    analytics_tracker = None
    SEOOptimizer = None
    print("Warning: analytics_seo module not available - using fallback")

try:
    from supertonic_voice_module import create_supertonic_interface  # type: ignore
    VOICE_AVAILABLE = True
except Exception:
    VOICE_AVAILABLE = False
    create_supertonic_interface = None
    print("Warning: supertonic_voice_module not available - using fallback")

# =====================================================================
# MOCK CLASSES / FALLBACKS
# =====================================================================

class MockUnifiedBrain:
    """Fallback when unified_brain is not available"""
    async def process(self, query: str, preferences: dict, execution_mode: str):
        return {
            "success": False,
            "results": [],
            "message": "Unified brain module not available"
        }

class MockPerformanceCache:
    def get_stats(self):
        return {"status": "Cache module not available"}
    def clear(self):
        return {"status": "cleared"}

class MockPerformanceMonitor:
    def get_metrics(self):
        return {"status": "Monitor module not available"}

class MockAnalyticsTracker:
    def get_analytics(self):
        return {"status": "Analytics module not available"}

class MockSEOOptimizer:
    @staticmethod
    def get_meta_tags() -> str:
        return '<meta name="description" content="ProVerBs Legal AI">'
    @staticmethod
    def get_structured_data() -> str:
        return '<script type="application/ld+json">{}</script>'

# Initialize mocks if needed
if not UNIFIED_BRAIN_AVAILABLE:
    UnifiedBrain = MockUnifiedBrain

if not PERFORMANCE_AVAILABLE:
    performance_cache = MockPerformanceCache()
    performance_monitor = MockPerformanceMonitor()

if not ANALYTICS_AVAILABLE:
    analytics_tracker = MockAnalyticsTracker()
    SEOOptimizer = MockSEOOptimizer()

# =====================================================================
# MAIN CLASS DEFINITION
# =====================================================================

class UltimateLegalBrain:
    """Main brain class for legal AI processing"""

    def __init__(self):
        # if UnifiedBrain is a class or a factory, instantiate; if it's a mock, it is class too
        try:
            self.brain = UnifiedBrain()
        except Exception:
            # Last-resort: use mock instance
            self.brain = MockUnifiedBrain()

        self.legal_modes = {
            "navigation": "πŸ“ Navigation Guide",
            "general": "πŸ’¬ General Legal",
            "document_validation": "πŸ“„ Document Validator",
            "legal_research": "πŸ” Legal Research",
            "etymology": "πŸ“š Etymology Expert",
            "case_management": "πŸ’Ό Case Management",
            "regulatory_updates": "πŸ“‹ Regulatory Updates"
        }

    async def process_legal_query(
        self,
        query: str,
        mode: str,
        ai_provider: str = "huggingface",
        use_reasoning_protocols: bool = True,
        **kwargs
    ) -> Dict[str, Any]:
        """Process legal query with reasoning protocols"""
        reasoning_result = None

        if use_reasoning_protocols and UNIFIED_BRAIN_AVAILABLE:
            preferences = {
                "use_reflection": mode in ["document_validation", "legal_research"],
                "multi_agent": False
            }
            try:
                reasoning_result = await self.brain.process(
                    query=query,
                    preferences=preferences,
                    execution_mode="sequential"
                )
            except Exception as e:
                print(f"Reasoning error: {e}")
                reasoning_result = None

        legal_prompt = self.get_legal_system_prompt(mode)

        if reasoning_result and reasoning_result.get("success"):
            # produce a short trace if present
            reasoning_trace = "\n".join([
                f"🧠 {r.get('protocol','unknown')}: {', '.join(r.get('trace', [])[:2])}"
                for r in reasoning_result.get("results", [])
            ])
            enhanced_query = f"{legal_prompt}\n\nReasoning Analysis:\n{reasoning_trace}\n\nUser Query: {query}"
        else:
            enhanced_query = f"{legal_prompt}\n\nUser Query: {query}"

        return {
            "enhanced_query": enhanced_query,
            "reasoning_result": reasoning_result,
            "mode": mode,
            "ai_provider": ai_provider
        }

    def get_legal_system_prompt(self, mode: str) -> str:
        """Get system prompt for specific legal mode"""
        prompts = {
            "navigation": "You are a ProVerBs Legal AI Navigation Guide with advanced reasoning capabilities.",
            "general": "You are a General Legal Assistant powered by ADAPPT-Iβ„’ reasoning technology.",
            "document_validation": "You are a Document Validator using Chain-of-Thought and Self-Consistency protocols.",
            "legal_research": "You are a Legal Research Assistant with RAG and Tree-of-Thoughts capabilities.",
            "etymology": "You are a Legal Etymology Expert with multi-step reasoning.",
            "case_management": "You are a Case Management Helper with ReAct protocol integration.",
            "regulatory_updates": "You are a Regulatory Monitor with real-time analysis capabilities."
        }
        return prompts.get(mode, prompts["general"])

# =====================================================================
# INITIALIZE BRAIN
# =====================================================================

ultimate_brain = UltimateLegalBrain()

# =====================================================================
# RESPONSE HANDLER
# =====================================================================

def respond_with_ultimate_brain(
    message: str,
    history: List[Tuple[Optional[str], Optional[str]]],
    mode: str,
    ai_provider: str,
    use_reasoning: bool,
    max_tokens: int,
    temperature: float,
    top_p: float,
    request: Optional[gr.Request] = None
):
    """
    Main response handler - synchronous wrapper for async processing.
    This is a generator that yields partial or complete responses for streaming.
    """
    # Run the async processing in a fresh event loop (to avoid "already running loop" errors)
    brain_result = None
    try:
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        brain_result = loop.run_until_complete(
            ultimate_brain.process_legal_query(
                query=message,
                mode=mode,
                ai_provider=ai_provider,
                use_reasoning_protocols=use_reasoning
            )
        )
    except Exception as e:
        # If something fails, yield a warning and continue with fallback prompt
        yield f"⚠️ Reasoning processing error: {str(e)}\n\nContinuing with standard processing...\n\n"
        brain_result = {
            "enhanced_query": ultimate_brain.get_legal_system_prompt(mode) + f"\n\nUser Query: {message}",
            "reasoning_result": None,
            "mode": mode,
            "ai_provider": ai_provider
        }
    finally:
        try:
            loop.close()
        except Exception:
            pass

    # Build reasoning info if available
    reasoning_info = ""
    if use_reasoning and brain_result.get("reasoning_result"):
        reasoning_info = "🧠 **Reasoning Protocols Applied:**\n"
        for r in brain_result["reasoning_result"].get("results", []):
            reasoning_info += f"- {r.get('protocol', 'Unknown')}: βœ… {r.get('status', 'completed')}\n"
        reasoning_info += "\n\n"
        yield reasoning_info

    # Handle the AI provider; currently only huggingface is implemented
    if ai_provider == "huggingface":
        # Build HF client if available, otherwise return the enhanced query as text
        if not HUGGINGFACE_AVAILABLE or InferenceClient is None:
            # Return enhanced query so user can see what would be asked
            yield reasoning_info + brain_result["enhanced_query"]
            return

        try:
            hf_token = os.environ.get("HF_TOKEN")
            # If running in Gradio request context, try to pull token from headers
            if not hf_token and request is not None:
                try:
                    hf_token = request.headers.get("authorization", "").replace("Bearer ", "")
                except Exception:
                    hf_token = None

            if not hf_token:
                yield reasoning_info + "❌ HuggingFace token not found. Set HF_TOKEN environment variable or pass token in request."
                return

            client = InferenceClient(token=hf_token, model="meta-llama/Llama-3.3-70B-Instruct")

            # Build message history
            messages = [{"role": "system", "content": brain_result["enhanced_query"]}]
            for user_msg, assistant_msg in history or []:
                if user_msg:
                    messages.append({"role": "user", "content": user_msg})
                if assistant_msg:
                    messages.append({"role": "assistant", "content": assistant_msg})
            messages.append({"role": "user", "content": message})

            # Stream response using client.chat_completion if available
            response_text = reasoning_info
            # Defensive: check if client has chat_completion attribute
            if hasattr(client, "chat_completion"):
                for chunk in client.chat_completion(
                    messages,
                    max_tokens=max_tokens,
                    stream=True,
                    temperature=temperature,
                    top_p=top_p
                ):
                    # chunk shape may vary; attempt to extract text
                    try:
                        delta = chunk.choices[0].delta
                        content = delta.content if hasattr(delta, "content") else delta.get("content", "")
                    except Exception:
                        content = chunk.get("text", "") if isinstance(chunk, dict) else ""
                    response_text += content
                    yield response_text
            else:
                # If no streaming API, perform a single request (defensive)
                res = client.chat(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
                text = ""
                try:
                    text = res.choices[0].message.content
                except Exception:
                    text = str(res)
                response_text += text
                yield response_text

        except Exception as e:
            yield f"{reasoning_info}\n\n❌ HuggingFace API Error: {str(e)}\n\nPlease check your API token or try another provider."
    else:
        # Other providers not yet implemented
        yield f"{reasoning_info}\n\n⚠️ Provider '{ai_provider}' is not yet implemented. Using HuggingFace as fallback or implement your own provider logic."

# =====================================================================
# MOCK VOICE INTERFACE (if supertonic not available)
# =====================================================================

def create_mock_voice_interface():
    """Fallback voice interface when supertonic module is unavailable"""
    # The function must place Gradio components in the current Blocks context.
    gr.Markdown("""
    ## πŸŽ™οΈ Voice Cloning Module

    ⚠️ **Supertonic voice module not found.**

    To enable voice cloning:
    1. Install required dependencies
    2. Add `supertonic_voice_module.py` to your project
    3. Restart the application

    ### Expected Features:
    - Voice recording with professional controls
    - Text-to-speech with voice cloning
    - Audio playback and export
    - Voice profile management
    """)
    with gr.Row():
        gr.Button("🎀 Record (Not Available)", interactive=False)
        gr.Button("⏸️ Pause (Not Available)", interactive=False)
        gr.Button("⏹️ Stop (Not Available)", interactive=False)

# =====================================================================
# CUSTOM CSS & SEO
# =====================================================================

custom_css = """
.gradio-container { max-width: 1400px !important; }
.header-section {
    text-align: center; padding: 40px 20px;
    background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
    color: white; border-radius: 12px; margin-bottom: 30px;
}
.header-section h1 { font-size: 3rem; margin-bottom: 10px; font-weight: 700; }
.brain-badge {
    display: inline-block; background: #ff6b6b; color: white;
    padding: 8px 16px; border-radius: 20px; font-weight: bold;
    margin: 10px 5px;
}
"""

seo_meta = SEOOptimizer.get_meta_tags() if SEOOptimizer is not None else ""
seo_structured = SEOOptimizer.get_structured_data() if SEOOptimizer is not None else ""

# =====================================================================
# GRADIO INTERFACE
# =====================================================================

demo = gr.Blocks(title="ProVerBs Ultimate Legal AI Brain", css=custom_css)

with demo:
    # Add SEO tags (as HTML)
    if seo_meta or seo_structured:
        gr.HTML(seo_meta + seo_structured)

    # Header
    gr.HTML("""
    <div class="header-section">
        <h1>βš–οΈ ProVerBs Ultimate Legal AI Brain</h1>
        <p style="font-size: 1.3rem;">Powered by Pro'VerBsβ„’ & ADAPPT-Iβ„’ Technology</p>
        <div>
            <span class="brain-badge">🧠 100+ Reasoning Protocols</span>
            <span class="brain-badge">πŸ€– 6 AI Models</span>
            <span class="brain-badge">βš–οΈ 7 Legal Modes</span>
            <span class="brain-badge">πŸŽ™οΈ Voice Cloning</span>
        </div>
        <p style="font-size: 0.9rem; margin-top: 15px; opacity: 0.9;">
            Chain-of-Thought β€’ Self-Consistency β€’ Tree-of-Thoughts β€’ ReAct β€’ Reflexion β€’ RAG<br>
            Quantum Reasoning β€’ Multi-Agent β€’ Voice Cloning β€’ Audio Processing
        </p>
    </div>
    """)

    with gr.Tabs():
        # Welcome Tab
        with gr.Tab("🏠 Welcome"):
            gr.Markdown("""
            ## Welcome to the Ultimate ProVerBs Legal AI Brain

            ### 🧠 Unified Reasoning Brain (100+ Protocols)

            **Core Reasoning Protocols:**
            - Chain-of-Thought (CoT) - Step-by-step reasoning
            - Self-Consistency - Multiple reasoning paths
            - Tree-of-Thoughts (ToT) - Branching exploration
            - ReAct - Reason + Act cycles
            - Reflexion - Self-reflection with memory
            - RAG - Retrieval-Augmented Generation

            ### πŸ€– 6 AI Model Options:
            - πŸ€— HuggingFace Llama-3.3-70B (Free, always available if token present)
            - 🧠 GPT-4 Turbo (OpenAI) - Coming Soon
            - ✨ Gemini 3.0 (Google) - Coming Soon
            - πŸ” Perplexity AI (Research) - Coming Soon
            - πŸ₯· Ninja AI - Coming Soon
            - πŸ’» LM Studio (Local) - Coming Soon

            ### βš–οΈ 7 Specialized Legal Modes:
            - Navigation | General Legal | Document Validation
            - Legal Research | Etymology | Case Management | Regulatory Updates

            ### πŸŽ™οΈ Voice Cloning:
            - Record voice samples
            - Clone voices with text-to-speech
            - Professional audio processing
            - Voice profile management

            **Get Started:** Click "πŸ€– AI Legal Chatbot" tab!
            """)

        # AI Chatbot Tab
        with gr.Tab("πŸ€– AI Legal Chatbot"):
            gr.Markdown("## Multi-AI Legal Chatbot\nSelect your AI model and legal assistant mode below!")

            with gr.Row():
                ai_provider_selector = gr.Dropdown(
                    choices=[
                        ("πŸ€— Llama-3.3-70B (Free)", "huggingface"),
                        ("🧠 GPT-4 Turbo", "gpt4"),
                        ("✨ Gemini 3.0", "gemini"),
                        ("πŸ” Perplexity AI", "perplexity"),
                        ("πŸ₯· Ninja AI", "ninjaai"),
                        ("πŸ’» LM Studio", "lmstudio")
                    ],
                    value="huggingface",
                    label="πŸ€– AI Model"
                )

                mode_selector = gr.Dropdown(
                    choices=[
                        ("πŸ“ Navigation", "navigation"),
                        ("πŸ’¬ General Legal", "general"),
                        ("πŸ“„ Document Validator", "document_validation"),
                        ("πŸ” Legal Research", "legal_research"),
                        ("πŸ“š Etymology", "etymology"),
                        ("πŸ’Ό Case Management", "case_management"),
                        ("πŸ“‹ Regulatory Updates", "regulatory_updates")
                    ],
                    value="general",
                    label="βš–οΈ Legal Mode"
                )

                use_reasoning_toggle = gr.Checkbox(
                    label="🧠 Enable Reasoning Protocols",
                    value=True,
                    info="Use 100+ reasoning protocols for enhanced analysis"
                )

            # Chatbot elements: use a Chatbot and Textbox + button to call the generator
            chatbot_display = gr.Chatbot(label="Ultimate Legal AI", height=550)
            user_input = gr.Textbox(placeholder="Ask your legal question here...", lines=2)
            max_tokens_slider = gr.Slider(128, 4096, value=2048, step=128, label="Max Tokens")
            temp_slider = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature")
            top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
            submit_btn = gr.Button("Send")

            # simple history storage in a state component
            conv_history = gr.State([])

            # Function to call the generator and stream into chatbot
            def chat_send(message, history, mode, ai_provider, use_reasoning, max_tokens, temperature, top_p, request: gr.Request = None):
                # This helper collects yielded chunks and returns appended list items for chatbot
                # We'll yield intermediate states from the generator by returning a list of messages
                gen = respond_with_ultimate_brain(
                    message=message,
                    history=history,
                    mode=mode,
                    ai_provider=ai_provider,
                    use_reasoning=use_reasoning,
                    max_tokens=int(max_tokens),
                    temperature=float(temperature),
                    top_p=float(top_p),
                    request=request
                )
                # Ensure initial user message is added
                new_history = history[:] if history else []
                new_history.append((message, None))  # assistant reply to be filled
                # Collect stream
                assistant_text = ""
                for chunk in gen:
                    assistant_text = chunk  # chunk contains full assembled text each yield in our generator
                # Finalize last message
                # replace the placeholder None with assistant_text
                if new_history and new_history[-1][1] is None:
                    new_history[-1] = (new_history[-1][0], assistant_text)
                return new_history, new_history, ""

            submit_btn.click(
                fn=chat_send,
                inputs=[user_input, conv_history, mode_selector, ai_provider_selector, use_reasoning_toggle, max_tokens_slider, temp_slider, top_p_slider, gr.State()],
                outputs=[chatbot_display, conv_history, user_input]
            )

        # Voice Cloning Tab
        with gr.Tab("πŸŽ™οΈ Voice Cloning"):
            if VOICE_AVAILABLE and create_supertonic_interface is not None:
                # If the real interface exists, call it
                try:
                    create_supertonic_interface()
                except Exception:
                    # fallback to mock if real module fails at runtime
                    create_mock_voice_interface()
            else:
                create_mock_voice_interface()

        # Analytics Tab
        with gr.Tab("πŸ“Š Analytics"):
            gr.Markdown("## Analytics & Performance Dashboard\nView real-time analytics and performance metrics for the Ultimate Brain.")

            with gr.Row():
                analytics_btn = gr.Button("πŸ“Š Refresh Analytics", variant="primary")
                clear_cache_btn = gr.Button("πŸ—‘οΈ Clear Cache", variant="secondary")

            analytics_output = gr.JSON(label="Analytics Data")
            performance_output = gr.JSON(label="Performance Metrics")
            cache_stats_output = gr.JSON(label="Cache Statistics")

            def refresh_analytics():
                return (
                    analytics_tracker.get_analytics() if analytics_tracker else {"status": "no analytics"},
                    performance_monitor.get_metrics() if performance_monitor else {"status": "no monitor"},
                    performance_cache.get_stats() if performance_cache else {"status": "no cache"}
                )

            analytics_btn.click(
                fn=refresh_analytics,
                inputs=[],
                outputs=[analytics_output, performance_output, cache_stats_output]
            )

            def clear_cache_fn():
                return performance_cache.clear() if performance_cache else {"status": "no cache"}

            clear_cache_btn.click(
                fn=clear_cache_fn,
                inputs=[],
                outputs=[cache_stats_output]
            )

        # About Tab
        with gr.Tab("ℹ️ About"):
            status_text = "βœ… All modules loaded" if all([
                UNIFIED_BRAIN_AVAILABLE,
                PERFORMANCE_AVAILABLE,
                ANALYTICS_AVAILABLE,
                VOICE_AVAILABLE
            ]) else "⚠️ Some modules unavailable (using fallbacks)"

            gr.Markdown(f"""
            ## About ProVerBs Ultimate Legal AI Brain

            ### Status: {status_text}

            **Module Status:**
            - Unified Brain: {"βœ…" if UNIFIED_BRAIN_AVAILABLE else "⚠️ Fallback"}
            - Performance: {"βœ…" if PERFORMANCE_AVAILABLE else "⚠️ Fallback"}
            - Analytics: {"βœ…" if ANALYTICS_AVAILABLE else "⚠️ Fallback"}
            - Voice Cloning: {"βœ…" if VOICE_AVAILABLE else "⚠️ Not Available"}

            ### πŸš€ Revolutionary Features:
            - **100+ Reasoning Protocols** - Most advanced reasoning system
            - **6 AI Models** - Choose the best for your needs
            - **7 Legal Modes** - Specialized for different legal tasks
            - **Voice Cloning** - Professional Supertonic integration (when available)

            ### ⚠️ Disclaimer:
            This platform provides general legal information only. Consult with a licensed attorney for specific legal matters.

            ---
            **Version 3.0.1 FIXED** | Built by Solomon7890
            """)

    # Footer
    gr.Markdown("""
    ---
    <div style="text-align: center; padding: 20px;">
        <p><strong>βš–οΈ ProVerBs Ultimate Legal AI Brain v3.0.1</strong></p>
        <p>Powered by Pro'VerBsβ„’ & ADAPPT-Iβ„’ | 100+ Protocols | 6 AI Models</p>
        <p style="font-size: 0.85rem; color: #666;">
            © 2025 Solomon 8888 | Built with ❀️ for legal professionals worldwide
        </p>
    </div>
    """)

# =====================================================================
# LAUNCH
# =====================================================================

if __name__ == "__main__":
    # tune queue size and launch options as desired
    demo.queue(max_size=20)
    demo.launch(server_name="0.0.0.0", server_port=7860, share=False)