#!/usr/bin/env python3 # proverbs_ultimate_brain.py """ ProVerBs Ultimate Brain with Complete Voice Cloning - FIXED VERSION Integrates Supertonic voice cloning with all controls (mocked if unavailable). """ import sys import os import asyncio from typing import Dict, List, Optional, Any, Tuple # Add current directory to path sys.path.append(os.path.dirname(__file__)) import gradio as gr # Optional external client import (may fail; handled below) try: from huggingface_hub import InferenceClient HUGGINGFACE_AVAILABLE = True except Exception: HUGGINGFACE_AVAILABLE = False InferenceClient = None import json from datetime import datetime # Try importing optional modules with fallbacks try: from unified_brain import UnifiedBrain, ReasoningContext # type: ignore UNIFIED_BRAIN_AVAILABLE = True except Exception: UNIFIED_BRAIN_AVAILABLE = False UnifiedBrain = None print("Warning: unified_brain module not available - using fallback") try: from performance_optimizer import performance_cache, performance_monitor, with_caching # type: ignore PERFORMANCE_AVAILABLE = True except Exception: PERFORMANCE_AVAILABLE = False performance_cache = None performance_monitor = None print("Warning: performance_optimizer module not available - using fallback") try: from analytics_seo import analytics_tracker, SEOOptimizer # type: ignore ANALYTICS_AVAILABLE = True except Exception: ANALYTICS_AVAILABLE = False analytics_tracker = None SEOOptimizer = None print("Warning: analytics_seo module not available - using fallback") try: from supertonic_voice_module import create_supertonic_interface # type: ignore VOICE_AVAILABLE = True except Exception: VOICE_AVAILABLE = False create_supertonic_interface = None print("Warning: supertonic_voice_module not available - using fallback") # ===================================================================== # MOCK CLASSES / FALLBACKS # ===================================================================== class MockUnifiedBrain: """Fallback when unified_brain is not available""" async def process(self, query: str, preferences: dict, execution_mode: str): return { "success": False, "results": [], "message": "Unified brain module not available" } class MockPerformanceCache: def get_stats(self): return {"status": "Cache module not available"} def clear(self): return {"status": "cleared"} class MockPerformanceMonitor: def get_metrics(self): return {"status": "Monitor module not available"} class MockAnalyticsTracker: def get_analytics(self): return {"status": "Analytics module not available"} class MockSEOOptimizer: @staticmethod def get_meta_tags() -> str: return '' @staticmethod def get_structured_data() -> str: return '' # Initialize mocks if needed if not UNIFIED_BRAIN_AVAILABLE: UnifiedBrain = MockUnifiedBrain if not PERFORMANCE_AVAILABLE: performance_cache = MockPerformanceCache() performance_monitor = MockPerformanceMonitor() if not ANALYTICS_AVAILABLE: analytics_tracker = MockAnalyticsTracker() SEOOptimizer = MockSEOOptimizer() # ===================================================================== # MAIN CLASS DEFINITION # ===================================================================== class UltimateLegalBrain: """Main brain class for legal AI processing""" def __init__(self): # if UnifiedBrain is a class or a factory, instantiate; if it's a mock, it is class too try: self.brain = UnifiedBrain() except Exception: # Last-resort: use mock instance self.brain = MockUnifiedBrain() self.legal_modes = { "navigation": "πŸ“ Navigation Guide", "general": "πŸ’¬ General Legal", "document_validation": "πŸ“„ Document Validator", "legal_research": "πŸ” Legal Research", "etymology": "πŸ“š Etymology Expert", "case_management": "πŸ’Ό Case Management", "regulatory_updates": "πŸ“‹ Regulatory Updates" } async def process_legal_query( self, query: str, mode: str, ai_provider: str = "huggingface", use_reasoning_protocols: bool = True, **kwargs ) -> Dict[str, Any]: """Process legal query with reasoning protocols""" reasoning_result = None if use_reasoning_protocols and UNIFIED_BRAIN_AVAILABLE: preferences = { "use_reflection": mode in ["document_validation", "legal_research"], "multi_agent": False } try: reasoning_result = await self.brain.process( query=query, preferences=preferences, execution_mode="sequential" ) except Exception as e: print(f"Reasoning error: {e}") reasoning_result = None legal_prompt = self.get_legal_system_prompt(mode) if reasoning_result and reasoning_result.get("success"): # produce a short trace if present reasoning_trace = "\n".join([ f"🧠 {r.get('protocol','unknown')}: {', '.join(r.get('trace', [])[:2])}" for r in reasoning_result.get("results", []) ]) enhanced_query = f"{legal_prompt}\n\nReasoning Analysis:\n{reasoning_trace}\n\nUser Query: {query}" else: enhanced_query = f"{legal_prompt}\n\nUser Query: {query}" return { "enhanced_query": enhanced_query, "reasoning_result": reasoning_result, "mode": mode, "ai_provider": ai_provider } def get_legal_system_prompt(self, mode: str) -> str: """Get system prompt for specific legal mode""" prompts = { "navigation": "You are a ProVerBs Legal AI Navigation Guide with advanced reasoning capabilities.", "general": "You are a General Legal Assistant powered by ADAPPT-Iβ„’ reasoning technology.", "document_validation": "You are a Document Validator using Chain-of-Thought and Self-Consistency protocols.", "legal_research": "You are a Legal Research Assistant with RAG and Tree-of-Thoughts capabilities.", "etymology": "You are a Legal Etymology Expert with multi-step reasoning.", "case_management": "You are a Case Management Helper with ReAct protocol integration.", "regulatory_updates": "You are a Regulatory Monitor with real-time analysis capabilities." } return prompts.get(mode, prompts["general"]) # ===================================================================== # INITIALIZE BRAIN # ===================================================================== ultimate_brain = UltimateLegalBrain() # ===================================================================== # RESPONSE HANDLER # ===================================================================== def respond_with_ultimate_brain( message: str, history: List[Tuple[Optional[str], Optional[str]]], mode: str, ai_provider: str, use_reasoning: bool, max_tokens: int, temperature: float, top_p: float, request: Optional[gr.Request] = None ): """ Main response handler - synchronous wrapper for async processing. This is a generator that yields partial or complete responses for streaming. """ # Run the async processing in a fresh event loop (to avoid "already running loop" errors) brain_result = None try: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) brain_result = loop.run_until_complete( ultimate_brain.process_legal_query( query=message, mode=mode, ai_provider=ai_provider, use_reasoning_protocols=use_reasoning ) ) except Exception as e: # If something fails, yield a warning and continue with fallback prompt yield f"⚠️ Reasoning processing error: {str(e)}\n\nContinuing with standard processing...\n\n" brain_result = { "enhanced_query": ultimate_brain.get_legal_system_prompt(mode) + f"\n\nUser Query: {message}", "reasoning_result": None, "mode": mode, "ai_provider": ai_provider } finally: try: loop.close() except Exception: pass # Build reasoning info if available reasoning_info = "" if use_reasoning and brain_result.get("reasoning_result"): reasoning_info = "🧠 **Reasoning Protocols Applied:**\n" for r in brain_result["reasoning_result"].get("results", []): reasoning_info += f"- {r.get('protocol', 'Unknown')}: βœ… {r.get('status', 'completed')}\n" reasoning_info += "\n\n" yield reasoning_info # Handle the AI provider; currently only huggingface is implemented if ai_provider == "huggingface": # Build HF client if available, otherwise return the enhanced query as text if not HUGGINGFACE_AVAILABLE or InferenceClient is None: # Return enhanced query so user can see what would be asked yield reasoning_info + brain_result["enhanced_query"] return try: hf_token = os.environ.get("HF_TOKEN") # If running in Gradio request context, try to pull token from headers if not hf_token and request is not None: try: hf_token = request.headers.get("authorization", "").replace("Bearer ", "") except Exception: hf_token = None if not hf_token: yield reasoning_info + "❌ HuggingFace token not found. Set HF_TOKEN environment variable or pass token in request." return client = InferenceClient(token=hf_token, model="meta-llama/Llama-3.3-70B-Instruct") # Build message history messages = [{"role": "system", "content": brain_result["enhanced_query"]}] for user_msg, assistant_msg in history or []: if user_msg: messages.append({"role": "user", "content": user_msg}) if assistant_msg: messages.append({"role": "assistant", "content": assistant_msg}) messages.append({"role": "user", "content": message}) # Stream response using client.chat_completion if available response_text = reasoning_info # Defensive: check if client has chat_completion attribute if hasattr(client, "chat_completion"): for chunk in client.chat_completion( messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p ): # chunk shape may vary; attempt to extract text try: delta = chunk.choices[0].delta content = delta.content if hasattr(delta, "content") else delta.get("content", "") except Exception: content = chunk.get("text", "") if isinstance(chunk, dict) else "" response_text += content yield response_text else: # If no streaming API, perform a single request (defensive) res = client.chat(messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p) text = "" try: text = res.choices[0].message.content except Exception: text = str(res) response_text += text yield response_text except Exception as e: yield f"{reasoning_info}\n\n❌ HuggingFace API Error: {str(e)}\n\nPlease check your API token or try another provider." else: # Other providers not yet implemented yield f"{reasoning_info}\n\n⚠️ Provider '{ai_provider}' is not yet implemented. Using HuggingFace as fallback or implement your own provider logic." # ===================================================================== # MOCK VOICE INTERFACE (if supertonic not available) # ===================================================================== def create_mock_voice_interface(): """Fallback voice interface when supertonic module is unavailable""" # The function must place Gradio components in the current Blocks context. gr.Markdown(""" ## πŸŽ™οΈ Voice Cloning Module ⚠️ **Supertonic voice module not found.** To enable voice cloning: 1. Install required dependencies 2. Add `supertonic_voice_module.py` to your project 3. Restart the application ### Expected Features: - Voice recording with professional controls - Text-to-speech with voice cloning - Audio playback and export - Voice profile management """) with gr.Row(): gr.Button("🎀 Record (Not Available)", interactive=False) gr.Button("⏸️ Pause (Not Available)", interactive=False) gr.Button("⏹️ Stop (Not Available)", interactive=False) # ===================================================================== # CUSTOM CSS & SEO # ===================================================================== custom_css = """ .gradio-container { max-width: 1400px !important; } .header-section { text-align: center; padding: 40px 20px; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; border-radius: 12px; margin-bottom: 30px; } .header-section h1 { font-size: 3rem; margin-bottom: 10px; font-weight: 700; } .brain-badge { display: inline-block; background: #ff6b6b; color: white; padding: 8px 16px; border-radius: 20px; font-weight: bold; margin: 10px 5px; } """ seo_meta = SEOOptimizer.get_meta_tags() if SEOOptimizer is not None else "" seo_structured = SEOOptimizer.get_structured_data() if SEOOptimizer is not None else "" # ===================================================================== # GRADIO INTERFACE # ===================================================================== demo = gr.Blocks(title="ProVerBs Ultimate Legal AI Brain", css=custom_css) with demo: # Add SEO tags (as HTML) if seo_meta or seo_structured: gr.HTML(seo_meta + seo_structured) # Header gr.HTML("""

βš–οΈ ProVerBs Ultimate Legal AI Brain

Powered by Pro'VerBsβ„’ & ADAPPT-Iβ„’ Technology

🧠 100+ Reasoning Protocols πŸ€– 6 AI Models βš–οΈ 7 Legal Modes πŸŽ™οΈ Voice Cloning

Chain-of-Thought β€’ Self-Consistency β€’ Tree-of-Thoughts β€’ ReAct β€’ Reflexion β€’ RAG
Quantum Reasoning β€’ Multi-Agent β€’ Voice Cloning β€’ Audio Processing

""") with gr.Tabs(): # Welcome Tab with gr.Tab("🏠 Welcome"): gr.Markdown(""" ## Welcome to the Ultimate ProVerBs Legal AI Brain ### 🧠 Unified Reasoning Brain (100+ Protocols) **Core Reasoning Protocols:** - Chain-of-Thought (CoT) - Step-by-step reasoning - Self-Consistency - Multiple reasoning paths - Tree-of-Thoughts (ToT) - Branching exploration - ReAct - Reason + Act cycles - Reflexion - Self-reflection with memory - RAG - Retrieval-Augmented Generation ### πŸ€– 6 AI Model Options: - πŸ€— HuggingFace Llama-3.3-70B (Free, always available if token present) - 🧠 GPT-4 Turbo (OpenAI) - Coming Soon - ✨ Gemini 3.0 (Google) - Coming Soon - πŸ” Perplexity AI (Research) - Coming Soon - πŸ₯· Ninja AI - Coming Soon - πŸ’» LM Studio (Local) - Coming Soon ### βš–οΈ 7 Specialized Legal Modes: - Navigation | General Legal | Document Validation - Legal Research | Etymology | Case Management | Regulatory Updates ### πŸŽ™οΈ Voice Cloning: - Record voice samples - Clone voices with text-to-speech - Professional audio processing - Voice profile management **Get Started:** Click "πŸ€– AI Legal Chatbot" tab! """) # AI Chatbot Tab with gr.Tab("πŸ€– AI Legal Chatbot"): gr.Markdown("## Multi-AI Legal Chatbot\nSelect your AI model and legal assistant mode below!") with gr.Row(): ai_provider_selector = gr.Dropdown( choices=[ ("πŸ€— Llama-3.3-70B (Free)", "huggingface"), ("🧠 GPT-4 Turbo", "gpt4"), ("✨ Gemini 3.0", "gemini"), ("πŸ” Perplexity AI", "perplexity"), ("πŸ₯· Ninja AI", "ninjaai"), ("πŸ’» LM Studio", "lmstudio") ], value="huggingface", label="πŸ€– AI Model" ) mode_selector = gr.Dropdown( choices=[ ("πŸ“ Navigation", "navigation"), ("πŸ’¬ General Legal", "general"), ("πŸ“„ Document Validator", "document_validation"), ("πŸ” Legal Research", "legal_research"), ("πŸ“š Etymology", "etymology"), ("πŸ’Ό Case Management", "case_management"), ("πŸ“‹ Regulatory Updates", "regulatory_updates") ], value="general", label="βš–οΈ Legal Mode" ) use_reasoning_toggle = gr.Checkbox( label="🧠 Enable Reasoning Protocols", value=True, info="Use 100+ reasoning protocols for enhanced analysis" ) # Chatbot elements: use a Chatbot and Textbox + button to call the generator chatbot_display = gr.Chatbot(label="Ultimate Legal AI", height=550) user_input = gr.Textbox(placeholder="Ask your legal question here...", lines=2) max_tokens_slider = gr.Slider(128, 4096, value=2048, step=128, label="Max Tokens") temp_slider = gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature") top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p") submit_btn = gr.Button("Send") # simple history storage in a state component conv_history = gr.State([]) # Function to call the generator and stream into chatbot def chat_send(message, history, mode, ai_provider, use_reasoning, max_tokens, temperature, top_p, request: gr.Request = None): # This helper collects yielded chunks and returns appended list items for chatbot # We'll yield intermediate states from the generator by returning a list of messages gen = respond_with_ultimate_brain( message=message, history=history, mode=mode, ai_provider=ai_provider, use_reasoning=use_reasoning, max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p), request=request ) # Ensure initial user message is added new_history = history[:] if history else [] new_history.append((message, None)) # assistant reply to be filled # Collect stream assistant_text = "" for chunk in gen: assistant_text = chunk # chunk contains full assembled text each yield in our generator # Finalize last message # replace the placeholder None with assistant_text if new_history and new_history[-1][1] is None: new_history[-1] = (new_history[-1][0], assistant_text) return new_history, new_history, "" submit_btn.click( fn=chat_send, inputs=[user_input, conv_history, mode_selector, ai_provider_selector, use_reasoning_toggle, max_tokens_slider, temp_slider, top_p_slider, gr.State()], outputs=[chatbot_display, conv_history, user_input] ) # Voice Cloning Tab with gr.Tab("πŸŽ™οΈ Voice Cloning"): if VOICE_AVAILABLE and create_supertonic_interface is not None: # If the real interface exists, call it try: create_supertonic_interface() except Exception: # fallback to mock if real module fails at runtime create_mock_voice_interface() else: create_mock_voice_interface() # Analytics Tab with gr.Tab("πŸ“Š Analytics"): gr.Markdown("## Analytics & Performance Dashboard\nView real-time analytics and performance metrics for the Ultimate Brain.") with gr.Row(): analytics_btn = gr.Button("πŸ“Š Refresh Analytics", variant="primary") clear_cache_btn = gr.Button("πŸ—‘οΈ Clear Cache", variant="secondary") analytics_output = gr.JSON(label="Analytics Data") performance_output = gr.JSON(label="Performance Metrics") cache_stats_output = gr.JSON(label="Cache Statistics") def refresh_analytics(): return ( analytics_tracker.get_analytics() if analytics_tracker else {"status": "no analytics"}, performance_monitor.get_metrics() if performance_monitor else {"status": "no monitor"}, performance_cache.get_stats() if performance_cache else {"status": "no cache"} ) analytics_btn.click( fn=refresh_analytics, inputs=[], outputs=[analytics_output, performance_output, cache_stats_output] ) def clear_cache_fn(): return performance_cache.clear() if performance_cache else {"status": "no cache"} clear_cache_btn.click( fn=clear_cache_fn, inputs=[], outputs=[cache_stats_output] ) # About Tab with gr.Tab("ℹ️ About"): status_text = "βœ… All modules loaded" if all([ UNIFIED_BRAIN_AVAILABLE, PERFORMANCE_AVAILABLE, ANALYTICS_AVAILABLE, VOICE_AVAILABLE ]) else "⚠️ Some modules unavailable (using fallbacks)" gr.Markdown(f""" ## About ProVerBs Ultimate Legal AI Brain ### Status: {status_text} **Module Status:** - Unified Brain: {"βœ…" if UNIFIED_BRAIN_AVAILABLE else "⚠️ Fallback"} - Performance: {"βœ…" if PERFORMANCE_AVAILABLE else "⚠️ Fallback"} - Analytics: {"βœ…" if ANALYTICS_AVAILABLE else "⚠️ Fallback"} - Voice Cloning: {"βœ…" if VOICE_AVAILABLE else "⚠️ Not Available"} ### πŸš€ Revolutionary Features: - **100+ Reasoning Protocols** - Most advanced reasoning system - **6 AI Models** - Choose the best for your needs - **7 Legal Modes** - Specialized for different legal tasks - **Voice Cloning** - Professional Supertonic integration (when available) ### ⚠️ Disclaimer: This platform provides general legal information only. Consult with a licensed attorney for specific legal matters. --- **Version 3.0.1 FIXED** | Built by Solomon7890 """) # Footer gr.Markdown(""" ---

βš–οΈ ProVerBs Ultimate Legal AI Brain v3.0.1

Powered by Pro'VerBsβ„’ & ADAPPT-Iβ„’ | 100+ Protocols | 6 AI Models

© 2025 Solomon 8888 | Built with ❀️ for legal professionals worldwide

""") # ===================================================================== # LAUNCH # ===================================================================== if __name__ == "__main__": # tune queue size and launch options as desired demo.queue(max_size=20) demo.launch(server_name="0.0.0.0", server_port=7860, share=False)