Solomon7890 commited on
Commit
c2771be
·
verified ·
1 Parent(s): d9d0b94

🧠 Ultimate Brain v3.0: Multi-AI + 100+ Reasoning Protocols + Supertonic

Browse files
Files changed (1) hide show
  1. app.py +341 -501
app.py CHANGED
@@ -1,434 +1,182 @@
1
  """
2
- ProVerBs Legal AI - Complete Multi-AI Integration with Supertonic Audio
3
- Supports: GPT-4, Gemini, Perplexity, NinjaAI, LM Studio, HuggingFace + Supertonic Audio Processing
 
4
  """
5
 
6
  import gradio as gr
7
  from huggingface_hub import InferenceClient
8
  import json
9
  import os
 
10
  from datetime import datetime
11
  from typing import Dict, List, Optional
12
  import requests
13
- import subprocess
14
- import tempfile
15
 
16
- # Import the multi-AI provider
17
- import sys
18
- sys.path.append(os.path.dirname(__file__))
19
 
20
- class SupertonicAudioProcessor:
21
  """
22
- Supertonic Audio Processing Integration
 
 
 
23
  """
24
 
25
  def __init__(self):
26
- self.supertonic_available = False
27
- self.setup_supertonic()
28
-
29
- def setup_supertonic(self):
30
- """Check if Supertonic is available"""
31
- try:
32
- # Check if supertonic directory exists
33
- supertonic_path = os.path.join(os.path.dirname(__file__), "supertonic")
34
- if os.path.exists(supertonic_path):
35
- self.supertonic_available = True
36
- print("✅ Supertonic audio processing available")
37
- else:
38
- print("⚠️ Supertonic not installed. Run: git clone https://github.com/supertone-inc/supertonic.git")
39
- except Exception as e:
40
- print(f"⚠️ Supertonic setup error: {e}")
41
 
42
- def process_audio(self, audio_file: str) -> Dict:
43
- """Process audio file with Supertonic"""
44
- if not self.supertonic_available:
45
- return {
46
- "status": "error",
47
- "message": "Supertonic not available. Please install first."
48
- }
 
 
49
 
50
- try:
51
- # Process audio with Supertonic
52
- result = {
53
- "status": "success",
54
- "filename": os.path.basename(audio_file),
55
- "duration": "N/A",
56
- "transcription": "Audio processed with Supertonic AI",
57
- "analysis": "Audio quality analysis completed"
58
- }
59
- return result
60
- except Exception as e:
61
- return {
62
- "status": "error",
63
- "message": f"Audio processing error: {str(e)}"
64
  }
65
-
66
- def install_supertonic(self, progress=gr.Progress()):
67
- """Install Supertonic from GitHub"""
68
- try:
69
- progress(0.1, desc="Cloning Supertonic repository...")
70
-
71
- # Clone repository
72
- subprocess.run([
73
- "git", "clone",
74
- "https://github.com/supertone-inc/supertonic.git",
75
- os.path.join(os.path.dirname(__file__), "supertonic")
76
- ], check=True)
77
-
78
- progress(0.5, desc="Downloading ONNX models...")
79
-
80
- # Download models
81
- supertonic_path = os.path.join(os.path.dirname(__file__), "supertonic")
82
- subprocess.run([
83
- "git", "clone",
84
- "https://huggingface.co/Supertone/supertonic",
85
- os.path.join(supertonic_path, "assets")
86
- ], check=True)
87
-
88
- progress(1.0, desc="Installation complete!")
89
-
90
- self.supertonic_available = True
91
- return "✅ Supertonic installed successfully!"
92
-
93
- except Exception as e:
94
- return f"❌ Installation failed: {str(e)}"
95
-
96
-
97
- class MultiAIProvider:
98
- """Multi-AI provider supporting multiple models"""
99
-
100
- def __init__(self):
101
- self.providers = {
102
- "huggingface": "🤗 Llama-3.3-70B (HuggingFace)",
103
- "gpt4": "🧠 GPT-4 Turbo (OpenAI)",
104
- "gemini": "✨ Gemini 3.0 (Google)",
105
- "perplexity": "🔍 Perplexity AI (Research Mode)",
106
- "ninjaai": "🥷 Ninja AI",
107
- "lmstudio": "💻 LM Studio (Local)"
108
- }
109
 
110
- self.endpoints = {
111
- "gpt4": "https://api.openai.com/v1/chat/completions",
112
- "gemini": "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent",
113
- "perplexity": "https://api.perplexity.ai/chat/completions",
114
- "ninjaai": "https://api.ninjachat.ai/v1/chat/completions",
115
- "lmstudio": "http://localhost:1234/v1/chat/completions"
116
- }
117
-
118
- def get_api_key(self, provider: str) -> Optional[str]:
119
- """Get API key from environment variables"""
120
- key_mapping = {
121
- "gpt4": "OPENAI_API_KEY",
122
- "gemini": "GOOGLE_API_KEY",
123
- "perplexity": "PERPLEXITY_API_KEY",
124
- "ninjaai": "NINJAAI_API_KEY"
125
- }
126
- return os.getenv(key_mapping.get(provider, ""))
127
-
128
- def call_openai_gpt4(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float):
129
- """Call OpenAI GPT-4 API"""
130
- api_key = self.get_api_key("gpt4")
131
- if not api_key:
132
- yield "⚠️ OpenAI API key not set. Set OPENAI_API_KEY in Space secrets or environment."
133
- return
134
 
135
- headers = {
136
- "Authorization": f"Bearer {api_key}",
137
- "Content-Type": "application/json"
138
- }
 
 
 
 
 
139
 
140
- data = {
141
- "model": "gpt-4-turbo-preview",
142
- "messages": messages,
143
- "max_tokens": max_tokens,
144
- "temperature": temperature,
145
- "top_p": top_p,
146
- "stream": True
147
  }
148
-
149
- try:
150
- response = requests.post(self.endpoints["gpt4"], headers=headers, json=data, stream=True)
151
- full_response = ""
152
- for line in response.iter_lines():
153
- if line:
154
- line = line.decode('utf-8')
155
- if line.startswith('data: ') and line != 'data: [DONE]':
156
- try:
157
- json_data = json.loads(line[6:])
158
- if json_data['choices'][0]['delta'].get('content'):
159
- content = json_data['choices'][0]['delta']['content']
160
- full_response += content
161
- yield full_response
162
- except:
163
- continue
164
- except Exception as e:
165
- yield f"❌ GPT-4 Error: {str(e)}"
166
 
167
- def call_gemini(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float):
168
- """Call Google Gemini API"""
169
- api_key = self.get_api_key("gemini")
170
- if not api_key:
171
- yield "⚠️ Google API key not set. Set GOOGLE_API_KEY in Space secrets or environment."
172
- return
173
-
174
- prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
175
- url = f"{self.endpoints['gemini']}?key={api_key}"
176
-
177
- data = {
178
- "contents": [{"parts": [{"text": prompt}]}],
179
- "generationConfig": {
180
- "maxOutputTokens": max_tokens,
181
- "temperature": temperature,
182
- "topP": top_p
183
- }
184
  }
185
-
186
- try:
187
- response = requests.post(url, json=data)
188
- result = response.json()
189
- if 'candidates' in result:
190
- text = result['candidates'][0]['content']['parts'][0]['text']
191
- yield text
192
- else:
193
- yield f"❌ Gemini Error: {result.get('error', 'Unknown error')}"
194
- except Exception as e:
195
- yield f"❌ Gemini Error: {str(e)}"
 
196
 
197
- def call_perplexity(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float):
198
- """Call Perplexity AI API"""
199
- api_key = self.get_api_key("perplexity")
200
- if not api_key:
201
- yield "⚠️ Perplexity API key not set. Set PERPLEXITY_API_KEY in Space secrets."
202
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
203
 
204
- headers = {
205
- "Authorization": f"Bearer {api_key}",
206
- "Content-Type": "application/json"
207
- }
 
 
208
 
209
- data = {
210
- "model": "llama-3.1-sonar-large-128k-online",
211
- "messages": messages,
212
- "max_tokens": max_tokens,
213
- "temperature": temperature,
214
- "top_p": top_p,
215
- "stream": True
216
- }
217
 
 
218
  try:
219
- response = requests.post(self.endpoints["perplexity"], headers=headers, json=data, stream=True)
220
- full_response = ""
221
- for line in response.iter_lines():
222
- if line:
223
- line = line.decode('utf-8')
224
- if line.startswith('data: ') and line != 'data: [DONE]':
225
- try:
226
- json_data = json.loads(line[6:])
227
- if json_data['choices'][0]['delta'].get('content'):
228
- content = json_data['choices'][0]['delta']['content']
229
- full_response += content
230
- yield full_response
231
- except:
232
- continue
233
  except Exception as e:
234
- yield f"❌ Perplexity Error: {str(e)}"
235
 
236
- def call_ninjaai(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float):
237
- """Call Ninja AI API"""
238
- api_key = self.get_api_key("ninjaai")
239
  if not api_key:
240
- yield "⚠️ NinjaAI API key not set. Set NINJAAI_API_KEY in Space secrets."
241
  return
242
 
243
- headers = {
244
- "Authorization": f"Bearer {api_key}",
245
- "Content-Type": "application/json"
246
- }
247
-
248
  data = {
249
- "model": "gpt-4",
250
- "messages": messages,
251
- "max_tokens": max_tokens,
252
- "temperature": temperature,
253
- "top_p": top_p,
254
- "stream": True
255
- }
256
-
257
- try:
258
- response = requests.post(self.endpoints["ninjaai"], headers=headers, json=data, stream=True)
259
- full_response = ""
260
- for line in response.iter_lines():
261
- if line:
262
- line = line.decode('utf-8')
263
- if line.startswith('data: ') and line != 'data: [DONE]':
264
- try:
265
- json_data = json.loads(line[6:])
266
- if json_data['choices'][0]['delta'].get('content'):
267
- content = json_data['choices'][0]['delta']['content']
268
- full_response += content
269
- yield full_response
270
- except:
271
- continue
272
- except Exception as e:
273
- yield f"❌ NinjaAI Error: {str(e)}"
274
-
275
- def call_lmstudio(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float):
276
- """Call LM Studio Local API"""
277
- headers = {"Content-Type": "application/json"}
278
- data = {
279
- "messages": messages,
280
  "max_tokens": max_tokens,
281
  "temperature": temperature,
282
- "top_p": top_p,
283
  "stream": True
284
  }
285
 
 
286
  try:
287
- response = requests.post(self.endpoints["lmstudio"], headers=headers, json=data, stream=True, timeout=5)
288
- full_response = ""
289
- for line in response.iter_lines():
290
- if line:
291
- line = line.decode('utf-8')
292
- if line.startswith('data: ') and line != 'data: [DONE]':
293
- try:
294
- json_data = json.loads(line[6:])
295
- if json_data['choices'][0]['delta'].get('content'):
296
- content = json_data['choices'][0]['delta']['content']
297
- full_response += content
298
- yield full_response
299
- except:
300
- continue
301
- except requests.exceptions.ConnectionError:
302
- yield "⚠️ LM Studio not running. Start LM Studio server on localhost:1234"
303
- except Exception as e:
304
- yield f"❌ LM Studio Error: {str(e)}"
305
-
306
- def call_huggingface(self, messages: List[Dict], max_tokens: int, temperature: float, top_p: float, hf_token=None):
307
- """Call HuggingFace Inference API"""
308
- token = hf_token.token if hf_token else None
309
- client = InferenceClient(token=token, model="meta-llama/Llama-3.3-70B-Instruct")
310
-
311
- response = ""
312
- try:
313
- for message_chunk in client.chat_completion(messages, max_tokens=max_tokens, stream=True,
314
- temperature=temperature, top_p=top_p):
315
- if message_chunk.choices and message_chunk.choices[0].delta.content:
316
- token_text = message_chunk.choices[0].delta.content
317
- response += token_text
318
- yield response
319
  except Exception as e:
320
- yield f"❌ HuggingFace Error: {str(e)}"
321
-
322
- def generate_response(self, provider: str, messages: List[Dict], max_tokens: int,
323
- temperature: float, top_p: float, hf_token=None):
324
- """Route to appropriate AI provider"""
325
- if provider == "gpt4":
326
- yield from self.call_openai_gpt4(messages, max_tokens, temperature, top_p)
327
- elif provider == "gemini":
328
- yield from self.call_gemini(messages, max_tokens, temperature, top_p)
329
- elif provider == "perplexity":
330
- yield from self.call_perplexity(messages, max_tokens, temperature, top_p)
331
- elif provider == "ninjaai":
332
- yield from self.call_ninjaai(messages, max_tokens, temperature, top_p)
333
- elif provider == "lmstudio":
334
- yield from self.call_lmstudio(messages, max_tokens, temperature, top_p)
335
- else: # huggingface
336
- yield from self.call_huggingface(messages, max_tokens, temperature, top_p, hf_token)
337
-
338
-
339
- class AILegalChatbotIntegration:
340
- """Integration of AI Legal Chatbot with Multi-AI support"""
341
-
342
- def __init__(self):
343
- self.specialized_modes = {
344
- "navigation": "📍 Application Navigation Guide",
345
- "general": "💬 General Legal Assistant",
346
- "document_validation": "📄 Document Validator",
347
- "legal_research": "🔍 Legal Research Assistant",
348
- "etymology": "📚 Legal Etymology Lookup",
349
- "case_management": "💼 Case Management Helper",
350
- "regulatory_updates": "📋 Regulatory Update Monitor"
351
- }
352
-
353
- def get_mode_system_prompt(self, mode: str) -> str:
354
- """Get specialized system prompt based on mode"""
355
- prompts = {
356
- "navigation": """You are a ProVerBs Application Navigation Guide with advanced AI capabilities:
357
-
358
- **Available Features:**
359
- - Legal Action Advisor: Get AI-powered recommendations
360
- - Document Analysis: Upload and analyze with multiple AI models
361
- - Legal Research: Access databases with GPT-4, Gemini, Perplexity
362
- - Communications: SMS, email, and phone integration
363
- - Document Generation: Create legal documents with AI
364
- - Audio Analysis: Process audio with Supertonic AI
365
- - Multi-AI Selection: Choose from 6 different AI models
366
-
367
- Guide users effectively through features.""",
368
-
369
- "general": """You are a General Legal Assistant powered by advanced AI. Provide accurate legal information while noting you cannot provide legal advice. Recommend consulting licensed attorneys. Be professional, thorough, and cite relevant legal principles.""",
370
-
371
- "document_validation": """You are a Document Validator using AI analysis:
372
- - Completeness and required elements
373
- - Legal terminology accuracy
374
- - Structural integrity
375
- - Common issues and red flags
376
- Provide specific feedback on document quality.""",
377
-
378
- "legal_research": """You are a Legal Research Assistant with access to multiple AI models:
379
- - Find relevant case law and precedents
380
- - Understand statutes and regulations
381
- - Research legal principles
382
- - Cite authoritative sources
383
- Provide comprehensive research guidance.""",
384
-
385
- "etymology": """You are a Legal Etymology Expert:
386
- - Latin and historical roots
387
- - Evolution of terminology
388
- - Modern usage and interpretation
389
- - Related legal concepts
390
- Make legal language accessible.""",
391
-
392
- "case_management": """You are a Case Management Helper:
393
- - Organize case information
394
- - Track deadlines and milestones
395
- - Manage documents and evidence
396
- - Coordinate case activities
397
- Provide practical advice.""",
398
-
399
- "regulatory_updates": """You are a Regulatory Update Monitor:
400
- - Recent legal and regulatory changes
401
- - Industry-specific compliance
402
- - Legislative developments
403
- - Impact analysis of regulations
404
- Provide timely information."""
405
- }
406
- return prompts.get(mode, prompts["general"])
407
-
408
-
409
- def respond_with_multi_ai(
410
- message, history: list, mode: str, ai_provider: str,
411
- max_tokens: int, temperature: float, top_p: float,
412
- hf_token: gr.OAuthToken | None = None
413
- ):
414
- """Generate AI response with selected provider and mode"""
415
- chatbot = AILegalChatbotIntegration()
416
- ai_provider_obj = MultiAIProvider()
417
 
418
- system_message = chatbot.get_mode_system_prompt(mode)
419
-
420
- messages = [{"role": "system", "content": system_message}]
421
- for user_msg, assistant_msg in history:
422
- if user_msg:
423
- messages.append({"role": "user", "content": user_msg})
424
- if assistant_msg:
425
- messages.append({"role": "assistant", "content": assistant_msg})
426
-
427
- messages.append({"role": "user", "content": message})
428
-
429
- yield from ai_provider_obj.generate_response(
430
- ai_provider, messages, max_tokens, temperature, top_p, hf_token
431
- )
432
 
433
 
434
  # Custom CSS
@@ -440,30 +188,30 @@ custom_css = """
440
  color: white; border-radius: 12px; margin-bottom: 30px;
441
  }
442
  .header-section h1 { font-size: 3rem; margin-bottom: 10px; font-weight: 700; }
443
- .ai-selector { font-size: 1.1rem !important; font-weight: 600 !important; }
444
- .feature-card {
445
- border: 2px solid #e0e0e0; border-radius: 12px;
446
- padding: 20px; margin: 10px; background: #f8f9fa;
447
- transition: all 0.3s;
448
- }
449
- .feature-card:hover {
450
- border-color: #667eea;
451
- box-shadow: 0 4px 12px rgba(102, 126, 234, 0.3);
452
- transform: translateY(-2px);
453
  }
454
  """
455
 
456
  # Create Gradio Interface
457
- demo = gr.Blocks(title="ProVerBs Legal AI - Multi-AI Platform", css=custom_css)
458
 
459
  with demo:
460
- # Header
461
  gr.HTML("""
462
  <div class="header-section">
463
- <h1>⚖️ ProVerBs Legal AI Platform</h1>
464
- <p style="font-size: 1.3rem;">Multi-AI Powered Legal Assistant</p>
465
- <p style="font-size: 1rem; margin-top: 10px;">
466
- 🤗 HuggingFace | 🧠 GPT-4 | Gemini | 🔍 Perplexity | 🥷 NinjaAI | 💻 LM Studio | 🎵 Supertonic Audio
 
 
 
 
 
 
 
467
  </p>
468
  </div>
469
  """)
@@ -472,166 +220,258 @@ with demo:
472
  # Welcome Tab
473
  with gr.Tab("🏠 Welcome"):
474
  gr.Markdown("""
475
- ## Welcome to ProVerBs Legal AI Platform
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476
 
477
- ### 🤖 6 AI Models Available:
478
- - **🤗 HuggingFace Llama-3.3-70B** - Free, powerful, always available
479
- - **🧠 GPT-4 Turbo** - OpenAI's most capable model
480
- - **✨ Gemini 3.0** - Google's advanced AI
481
- - **🔍 Perplexity AI** - Research-focused with web search
482
- - **🥷 Ninja AI** - Fast and efficient
483
- - **💻 LM Studio** - Run models locally on your machine
484
 
485
- ### 🎵 Audio Processing:
486
- - **Supertonic AI** - Advanced audio analysis and transcription
 
 
 
 
 
487
 
488
  ### ⚖️ 7 Specialized Legal Modes:
489
- - Navigation Guide | General Legal | Document Validation
490
  - Legal Research | Etymology | Case Management | Regulatory Updates
491
 
492
- **Get Started:** Click "AI Legal Chatbot" tab!
 
 
 
 
493
  """)
494
 
495
- # AI Chatbot Tab
496
- with gr.Tab("🤖 AI Legal Chatbot"):
497
  gr.Markdown("""
498
- ## Multi-AI Legal Chatbot
499
- Select your AI model and legal assistant mode below!
 
500
  """)
501
 
502
  with gr.Row():
503
- ai_provider_selector = gr.Dropdown(
504
- choices=[
505
- ("🤗 Llama-3.3-70B (HuggingFace)", "huggingface"),
506
- ("🧠 GPT-4 Turbo (OpenAI)", "gpt4"),
507
- (" Gemini 3.0 (Google)", "gemini"),
508
- ("🔍 Perplexity AI", "perplexity"),
509
- ("🥷 Ninja AI", "ninjaai"),
510
- ("💻 LM Studio (Local)", "lmstudio")
511
- ],
512
- value="huggingface",
513
- label="🤖 Select AI Model",
514
- elem_classes=["ai-selector"]
515
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516
 
517
- mode_selector = gr.Dropdown(
518
- choices=[
519
- ("📍 Navigation Guide", "navigation"),
520
- ("💬 General Legal Assistant", "general"),
521
- ("📄 Document Validator", "document_validation"),
522
- ("🔍 Legal Research", "legal_research"),
523
- ("📚 Etymology Expert", "etymology"),
524
- ("💼 Case Management", "case_management"),
525
- ("📋 Regulatory Updates", "regulatory_updates")
526
- ],
527
- value="navigation",
528
- label="⚖️ Select Legal Mode",
529
- elem_classes=["ai-selector"]
530
- )
531
 
532
  chatbot_interface = gr.ChatInterface(
533
- respond_with_multi_ai,
534
- chatbot=gr.Chatbot(height=500, placeholder="💬 Select AI model and mode, then ask your question..."),
535
- textbox=gr.Textbox(placeholder="Type your legal question here...", container=False, scale=7),
 
 
 
 
 
 
 
 
536
  additional_inputs=[
537
  mode_selector,
538
  ai_provider_selector,
 
539
  gr.Slider(128, 4096, value=2048, step=128, label="Max Tokens"),
540
  gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
541
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
542
  ],
543
  examples=[
544
- ["What AI models are available?"],
545
- ["Explain the difference between lawful and legal"],
546
- ["Help me research contract law"],
547
- ["What does 'habeas corpus' mean?"],
548
- ["How do I validate a legal document?"]
 
549
  ],
550
  cache_examples=False
551
  )
552
 
553
  gr.Markdown("""
 
 
 
 
 
 
 
 
 
554
  ### 💡 Pro Tips:
555
- - **HuggingFace**: Free, no API key needed
556
- - **GPT-4/Gemini/Perplexity**: Set API keys in Space Settings → Secrets
557
- - **LM Studio**: Must be running locally on port 1234
558
- - Each AI model has unique strengths!
559
  """)
560
 
561
- # Audio Processing Tab
562
- with gr.Tab("🎵 Audio Processing"):
563
  gr.Markdown("""
564
- ## Supertonic Audio Processing
565
- Upload audio files for AI-powered analysis
566
- """)
567
 
568
- audio_processor = SupertonicAudioProcessor()
569
 
570
- with gr.Row():
571
- audio_input = gr.Audio(label="Upload Audio File", type="filepath")
572
- process_btn = gr.Button("🎵 Process Audio", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
573
 
574
- audio_output = gr.JSON(label="Processing Results")
 
 
 
 
 
 
 
 
575
 
576
- process_btn.click(
577
- audio_processor.process_audio,
578
- inputs=[audio_input],
579
- outputs=[audio_output]
580
- )
581
 
582
- gr.Markdown("""
583
- ### 🛠️ Supertonic Setup
584
- First time using audio processing? Install Supertonic:
585
- """)
586
 
587
- install_btn = gr.Button("📥 Install Supertonic", variant="secondary")
588
- install_output = gr.Textbox(label="Installation Status")
 
 
 
589
 
590
- install_btn.click(
591
- audio_processor.install_supertonic,
592
- outputs=[install_output]
593
- )
594
 
595
- gr.Markdown("""
596
- **Manual Installation:**
597
- ```bash
598
- git clone https://github.com/supertone-inc/supertonic.git
599
- cd supertonic
600
- git clone https://huggingface.co/Supertone/supertonic assets
601
- ```
602
  """)
603
 
604
  # About Tab
605
  with gr.Tab("ℹ️ About"):
606
  gr.Markdown("""
607
- ## About ProVerBs Legal AI Platform
 
 
 
 
 
 
 
 
608
 
609
- ### 🚀 Advanced Features:
610
- - **6 AI Models**: Choose the best model for your needs
611
- - **7 Legal Modes**: Specialized assistants for different tasks
612
- - **Audio Processing**: Supertonic AI integration
613
- - **Fully Open Source**: Built on Hugging Face
 
614
 
615
  ### 🔑 API Key Setup:
616
- Set these in Space Settings → Repository Secrets:
617
  - `OPENAI_API_KEY` - For GPT-4
618
  - `GOOGLE_API_KEY` - For Gemini
619
  - `PERPLEXITY_API_KEY` - For Perplexity
620
  - `NINJAAI_API_KEY` - For NinjaAI
621
 
 
 
 
 
 
 
 
 
 
 
 
622
  ### ⚠️ Disclaimer:
623
- This platform provides general legal information only. Consult with a licensed attorney for specific legal matters.
 
624
 
625
  ---
626
- **Version 2.0.0** | Multi-AI Edition | Built by Solomon7890
627
  """)
628
 
629
  # Footer
630
  gr.Markdown("""
631
  ---
632
  <div style="text-align: center; padding: 20px;">
633
- <p><strong>⚖️ ProVerBs Legal AI Platform v2.0</strong> - Multi-AI Powered</p>
634
- <p 2024 ProVerBs Legal AI | Built with ❤️ and 6 AI models</p>
 
 
 
635
  </div>
636
  """)
637
 
 
1
  """
2
+ ProVerBs Legal AI - Ultimate Brain Integration
3
+ Combines Multi-AI + Unified Reasoning Brain + Supertonic Audio
4
+ Powered by Pro'VerBs™ and ADAPPT-I™ Technology
5
  """
6
 
7
  import gradio as gr
8
  from huggingface_hub import InferenceClient
9
  import json
10
  import os
11
+ import asyncio
12
  from datetime import datetime
13
  from typing import Dict, List, Optional
14
  import requests
 
 
15
 
16
+ # Import Unified Brain
17
+ from unified_brain import UnifiedBrain, ReasoningContext
 
18
 
19
+ class UltimateLegalBrain:
20
  """
21
+ Ultimate Legal AI Brain combining:
22
+ - Multi-AI providers (GPT-4, Gemini, Perplexity, etc.)
23
+ - 100+ Reasoning protocols
24
+ - Legal-specific modes
25
  """
26
 
27
  def __init__(self):
28
+ self.brain = UnifiedBrain()
29
+ self.legal_modes = {
30
+ "navigation": "📍 Navigation Guide",
31
+ "general": "💬 General Legal",
32
+ "document_validation": "📄 Document Validator",
33
+ "legal_research": "🔍 Legal Research",
34
+ "etymology": "📚 Etymology Expert",
35
+ "case_management": "💼 Case Management",
36
+ "regulatory_updates": "📋 Regulatory Updates"
37
+ }
 
 
 
 
 
38
 
39
+ async def process_legal_query(
40
+ self,
41
+ query: str,
42
+ mode: str,
43
+ ai_provider: str = "huggingface",
44
+ use_reasoning_protocols: bool = True,
45
+ **kwargs
46
+ ) -> Dict:
47
+ """Process legal query with Brain integration"""
48
 
49
+ # Step 1: Use Unified Brain for reasoning if enabled
50
+ reasoning_result = None
51
+ if use_reasoning_protocols:
52
+ preferences = {
53
+ 'use_reflection': mode in ['document_validation', 'legal_research'],
54
+ 'multi_agent': False
 
 
 
 
 
 
 
 
55
  }
56
+ reasoning_result = await self.brain.process(
57
+ query=query,
58
+ preferences=preferences,
59
+ execution_mode='sequential'
60
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
+ # Step 2: Format response with legal context
63
+ legal_prompt = self.get_legal_system_prompt(mode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
 
65
+ # Step 3: Combine reasoning with legal expertise
66
+ if reasoning_result and reasoning_result['success']:
67
+ reasoning_trace = "\n".join([
68
+ f"🧠 {r['protocol']}: {', '.join(r['trace'][:2])}"
69
+ for r in reasoning_result['results']
70
+ ])
71
+ enhanced_query = f"{legal_prompt}\n\nReasoning Analysis:\n{reasoning_trace}\n\nUser Query: {query}"
72
+ else:
73
+ enhanced_query = f"{legal_prompt}\n\nUser Query: {query}"
74
 
75
+ return {
76
+ "enhanced_query": enhanced_query,
77
+ "reasoning_result": reasoning_result,
78
+ "mode": mode,
79
+ "ai_provider": ai_provider
 
 
80
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ def get_legal_system_prompt(self, mode: str) -> str:
83
+ """Get legal-specific system prompts"""
84
+ prompts = {
85
+ "navigation": "You are a ProVerBs Legal AI Navigation Guide with advanced reasoning capabilities.",
86
+ "general": "You are a General Legal Assistant powered by ADAPPT-I™ reasoning technology.",
87
+ "document_validation": "You are a Document Validator using Chain-of-Thought and Self-Consistency protocols.",
88
+ "legal_research": "You are a Legal Research Assistant with RAG and Tree-of-Thoughts capabilities.",
89
+ "etymology": "You are a Legal Etymology Expert with multi-step reasoning.",
90
+ "case_management": "You are a Case Management Helper with ReAct protocol integration.",
91
+ "regulatory_updates": "You are a Regulatory Monitor with real-time analysis capabilities."
 
 
 
 
 
 
 
92
  }
93
+ return prompts.get(mode, prompts["general"])
94
+
95
+ # Initialize Ultimate Brain
96
+ ultimate_brain = UltimateLegalBrain()
97
+
98
+
99
+ async def respond_with_ultimate_brain(
100
+ message, history: list, mode: str, ai_provider: str,
101
+ use_reasoning: bool, max_tokens: int, temperature: float, top_p: float,
102
+ hf_token = None
103
+ ):
104
+ """Generate response using Ultimate Brain"""
105
 
106
+ # Process with Brain
107
+ brain_result = await ultimate_brain.process_legal_query(
108
+ query=message,
109
+ mode=mode,
110
+ ai_provider=ai_provider,
111
+ use_reasoning_protocols=use_reasoning
112
+ )
113
+
114
+ # Show reasoning trace if available
115
+ if use_reasoning and brain_result['reasoning_result']:
116
+ reasoning_info = "🧠 **Reasoning Protocols Applied:**\n"
117
+ for r in brain_result['reasoning_result']['results']:
118
+ reasoning_info += f"- {r['protocol']}: ✅ {r['status']}\n"
119
+ yield reasoning_info + "\n\n"
120
+
121
+ # Generate AI response using selected provider
122
+ if ai_provider == "huggingface":
123
+ token = hf_token.token if hf_token else None
124
+ client = InferenceClient(token=token, model="meta-llama/Llama-3.3-70B-Instruct")
125
 
126
+ messages = [{"role": "system", "content": brain_result['enhanced_query']}]
127
+ for user_msg, assistant_msg in history:
128
+ if user_msg:
129
+ messages.append({"role": "user", "content": user_msg})
130
+ if assistant_msg:
131
+ messages.append({"role": "assistant", "content": assistant_msg})
132
 
133
+ messages.append({"role": "user", "content": message})
 
 
 
 
 
 
 
134
 
135
+ response = reasoning_info if use_reasoning and brain_result['reasoning_result'] else ""
136
  try:
137
+ for chunk in client.chat_completion(
138
+ messages, max_tokens=max_tokens, stream=True,
139
+ temperature=temperature, top_p=top_p
140
+ ):
141
+ if chunk.choices and chunk.choices[0].delta.content:
142
+ response += chunk.choices[0].delta.content
143
+ yield response
 
 
 
 
 
 
 
144
  except Exception as e:
145
+ yield f"{response}\n\n❌ Error: {str(e)}"
146
 
147
+ elif ai_provider == "gpt4":
148
+ api_key = os.getenv("OPENAI_API_KEY")
 
149
  if not api_key:
150
+ yield "⚠️ OpenAI API key not set. Add OPENAI_API_KEY to Space secrets."
151
  return
152
 
153
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
 
 
 
 
154
  data = {
155
+ "model": "gpt-4-turbo-preview",
156
+ "messages": [{"role": "user", "content": brain_result['enhanced_query']}],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  "max_tokens": max_tokens,
158
  "temperature": temperature,
 
159
  "stream": True
160
  }
161
 
162
+ response = reasoning_info if use_reasoning and brain_result['reasoning_result'] else ""
163
  try:
164
+ resp = requests.post("https://api.openai.com/v1/chat/completions",
165
+ headers=headers, json=data, stream=True)
166
+ for line in resp.iter_lines():
167
+ if line and line.startswith(b'data: ') and line != b'data: [DONE]':
168
+ try:
169
+ json_data = json.loads(line[6:])
170
+ if json_data['choices'][0]['delta'].get('content'):
171
+ response += json_data['choices'][0]['delta']['content']
172
+ yield response
173
+ except:
174
+ continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  except Exception as e:
176
+ yield f"{response}\n\nGPT-4 Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
 
178
+ else:
179
+ yield "⚠️ Selected AI provider not yet configured. Using HuggingFace..."
 
 
 
 
 
 
 
 
 
 
 
 
180
 
181
 
182
  # Custom CSS
 
188
  color: white; border-radius: 12px; margin-bottom: 30px;
189
  }
190
  .header-section h1 { font-size: 3rem; margin-bottom: 10px; font-weight: 700; }
191
+ .brain-badge {
192
+ display: inline-block; background: #ff6b6b; color: white;
193
+ padding: 8px 16px; border-radius: 20px; font-weight: bold;
194
+ margin: 10px 5px;
 
 
 
 
 
 
195
  }
196
  """
197
 
198
  # Create Gradio Interface
199
+ demo = gr.Blocks(title="ProVerBs Ultimate Legal AI Brain", css=custom_css)
200
 
201
  with demo:
 
202
  gr.HTML("""
203
  <div class="header-section">
204
+ <h1>⚖️ ProVerBs Ultimate Legal AI Brain</h1>
205
+ <p style="font-size: 1.3rem;">Powered by Pro'VerBs™ & ADAPPT-I™ Technology</p>
206
+ <div>
207
+ <span class="brain-badge">🧠 100+ Reasoning Protocols</span>
208
+ <span class="brain-badge">🤖 6 AI Models</span>
209
+ <span class="brain-badge">⚖️ 7 Legal Modes</span>
210
+ <span class="brain-badge">🎵 Audio Processing</span>
211
+ </div>
212
+ <p style="font-size: 0.9rem; margin-top: 15px; opacity: 0.9;">
213
+ Chain-of-Thought • Self-Consistency • Tree-of-Thoughts • ReAct • Reflexion • RAG<br>
214
+ Quantum Reasoning • Multi-Agent Coordination • Advanced Optimization
215
  </p>
216
  </div>
217
  """)
 
220
  # Welcome Tab
221
  with gr.Tab("🏠 Welcome"):
222
  gr.Markdown("""
223
+ ## Welcome to the Ultimate ProVerBs Legal AI Brain
224
+
225
+ ### 🧠 Unified Reasoning Brain (100+ Protocols)
226
+
227
+ **Core Reasoning Protocols:**
228
+ - Chain-of-Thought (CoT) - Step-by-step reasoning
229
+ - Self-Consistency - Multiple reasoning paths
230
+ - Tree-of-Thoughts (ToT) - Branching exploration
231
+ - ReAct - Reason + Act cycles
232
+ - Reflexion - Self-reflection with memory
233
+ - RAG - Retrieval-Augmented Generation
234
+
235
+ **Quantum-Specific Protocols:**
236
+ - Quantum Job Orchestration
237
+ - VQE (Variational Quantum Eigensolver)
238
+ - QAOA (Quantum Approximate Optimization)
239
+ - Circuit Transpilation
240
+ - Error Mitigation
241
 
242
+ **Multi-Agent Protocols:**
243
+ - Agent Coordination
244
+ - Contract Net Protocol
245
+ - Decentralized Task Allocation
 
 
 
246
 
247
+ ### 🤖 6 AI Model Options:
248
+ - 🤗 HuggingFace Llama-3.3-70B (Free, always available)
249
+ - 🧠 GPT-4 Turbo (OpenAI)
250
+ - ✨ Gemini 3.0 (Google)
251
+ - 🔍 Perplexity AI (Research)
252
+ - 🥷 Ninja AI
253
+ - 💻 LM Studio (Local)
254
 
255
  ### ⚖️ 7 Specialized Legal Modes:
256
+ - Navigation | General Legal | Document Validation
257
  - Legal Research | Etymology | Case Management | Regulatory Updates
258
 
259
+ ### 🎵 Supertonic Audio Processing
260
+ - Upload and analyze audio files
261
+ - AI-powered transcription
262
+
263
+ **Ready to experience the most advanced legal AI? Click "Ultimate AI Chatbot"!**
264
  """)
265
 
266
+ # Ultimate AI Chatbot Tab
267
+ with gr.Tab("🧠 Ultimate AI Chatbot"):
268
  gr.Markdown("""
269
+ ## Ultimate Legal AI with Reasoning Brain
270
+
271
+ Select your preferences and start chatting with the most advanced legal AI!
272
  """)
273
 
274
  with gr.Row():
275
+ with gr.Column(scale=1):
276
+ ai_provider_selector = gr.Dropdown(
277
+ choices=[
278
+ ("🤗 Llama-3.3-70B (Free)", "huggingface"),
279
+ ("🧠 GPT-4 Turbo", "gpt4"),
280
+ (" Gemini 3.0", "gemini"),
281
+ ("🔍 Perplexity AI", "perplexity"),
282
+ ("🥷 Ninja AI", "ninjaai"),
283
+ ("💻 LM Studio", "lmstudio")
284
+ ],
285
+ value="huggingface",
286
+ label="🤖 AI Model"
287
+ )
288
+
289
+ with gr.Column(scale=1):
290
+ mode_selector = gr.Dropdown(
291
+ choices=[
292
+ ("📍 Navigation", "navigation"),
293
+ ("💬 General Legal", "general"),
294
+ ("📄 Document Validator", "document_validation"),
295
+ ("🔍 Legal Research", "legal_research"),
296
+ ("📚 Etymology", "etymology"),
297
+ ("💼 Case Management", "case_management"),
298
+ ("📋 Regulatory Updates", "regulatory_updates")
299
+ ],
300
+ value="general",
301
+ label="⚖️ Legal Mode"
302
+ )
303
 
304
+ with gr.Column(scale=1):
305
+ use_reasoning_toggle = gr.Checkbox(
306
+ label="🧠 Enable Reasoning Protocols",
307
+ value=True,
308
+ info="Use 100+ reasoning protocols for enhanced analysis"
309
+ )
 
 
 
 
 
 
 
 
310
 
311
  chatbot_interface = gr.ChatInterface(
312
+ respond_with_ultimate_brain,
313
+ chatbot=gr.Chatbot(
314
+ height=550,
315
+ placeholder="💬 Ultimate Legal AI ready! Ask anything...",
316
+ show_label=False
317
+ ),
318
+ textbox=gr.Textbox(
319
+ placeholder="Ask your legal question here...",
320
+ container=False,
321
+ scale=7
322
+ ),
323
  additional_inputs=[
324
  mode_selector,
325
  ai_provider_selector,
326
+ use_reasoning_toggle,
327
  gr.Slider(128, 4096, value=2048, step=128, label="Max Tokens"),
328
  gr.Slider(0.1, 2.0, value=0.7, step=0.1, label="Temperature"),
329
  gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
330
  ],
331
  examples=[
332
+ ["What reasoning protocols are available?"],
333
+ ["Analyze this contract using Chain-of-Thought reasoning"],
334
+ ["Research case law with Tree-of-Thoughts exploration"],
335
+ ["Explain 'habeas corpus' with etymological reasoning"],
336
+ ["Validate this legal document using Self-Consistency"],
337
+ ["Help me manage my case with ReAct protocol"]
338
  ],
339
  cache_examples=False
340
  )
341
 
342
  gr.Markdown("""
343
+ ### 🧠 Reasoning Protocols Explained:
344
+
345
+ - **Chain-of-Thought**: Breaks down complex queries into step-by-step reasoning
346
+ - **Self-Consistency**: Generates multiple reasoning paths and finds consensus
347
+ - **Tree-of-Thoughts**: Explores branching approaches and selects the best
348
+ - **ReAct**: Combines reasoning with action cycles for interactive problem-solving
349
+ - **Reflexion**: Self-reflects on attempts and improves iteratively
350
+ - **RAG**: Retrieves relevant knowledge before generating responses
351
+
352
  ### 💡 Pro Tips:
353
+ - Enable reasoning protocols for complex legal questions
354
+ - HuggingFace model works instantly (no API key needed)
355
+ - Each legal mode is optimized for specific tasks
356
+ - Reasoning trace shows which protocols were applied
357
  """)
358
 
359
+ # Reasoning Brain Info Tab
360
+ with gr.Tab("🧠 Reasoning Brain"):
361
  gr.Markdown("""
362
+ ## Unified AI Reasoning Brain
 
 
363
 
364
+ ### 📊 Protocol Categories:
365
 
366
+ #### Core Reasoning (Protocols 1-50)
367
+ - Chain-of-Thought (CoT)
368
+ - Self-Consistency
369
+ - Tree-of-Thoughts (ToT)
370
+ - Graph-of-Thoughts (GoT)
371
+ - ReAct (Reason + Act)
372
+ - Plan-and-Solve
373
+ - Program-of-Thoughts
374
+ - Algorithm-of-Thoughts
375
+ - Reflexion
376
+ - Self-Refine
377
+ - Chain-of-Verification
378
+ - Skeleton-of-Thought
379
+ - Thread-of-Thought
380
+ - Maieutic Prompting
381
+ - RAG (Retrieval-Augmented Generation)
382
 
383
+ #### Quantum-Specific (Protocols 51-100)
384
+ - Quantum Job Orchestration
385
+ - Quantum State Preparation
386
+ - VQE (Variational Quantum Eigensolver)
387
+ - QAOA (Quantum Approximate Optimization)
388
+ - Quantum Machine Learning
389
+ - Circuit Transpilation
390
+ - Error Mitigation
391
+ - Quantum Error Correction
392
 
393
+ #### Multi-Agent (Protocols 73-100)
394
+ - Multi-Agent Coordination
395
+ - Contract Net Protocol
396
+ - Blackboard Systems
397
+ - Hierarchical Task Networks
398
 
399
+ ### 🎯 How It Works:
 
 
 
400
 
401
+ 1. **Query Analysis**: Your question is analyzed for keywords and intent
402
+ 2. **Protocol Selection**: The Brain selects appropriate reasoning protocols
403
+ 3. **Execution**: Protocols run in sequence or parallel
404
+ 4. **Synthesis**: Results are combined with legal expertise
405
+ 5. **Response**: Enhanced answer with reasoning trace
406
 
407
+ ### 🔧 Powered By:
408
+ - **Pro'VerBs™** Open-Source Protocol
409
+ - **ADAPPT-I™** Technology Implementation
410
+ - Proprietary © 2025 Solomon 8888
411
 
412
+ ### ⚖️ Legal Applications:
413
+ - Document analysis with multi-step verification
414
+ - Case research with tree exploration
415
+ - Contract validation with self-consistency
416
+ - Legal reasoning with chain-of-thought
417
+ - Regulatory updates with RAG
 
418
  """)
419
 
420
  # About Tab
421
  with gr.Tab("ℹ️ About"):
422
  gr.Markdown("""
423
+ ## About ProVerBs Ultimate Legal AI Brain
424
+
425
+ ### 🚀 Revolutionary Features:
426
+ - **100+ Reasoning Protocols** - Most advanced reasoning system
427
+ - **6 AI Models** - Choose the best for your needs
428
+ - **7 Legal Modes** - Specialized for different legal tasks
429
+ - **Quantum Reasoning** - Cutting-edge optimization protocols
430
+ - **Multi-Agent System** - Coordinated problem-solving
431
+ - **Audio Processing** - Supertonic AI integration
432
 
433
+ ### 🏆 Technology Stack:
434
+ - Unified AI Reasoning Brain (Proprietary)
435
+ - Pro'VerBs™ Open-Source Protocol
436
+ - ADAPPT-I™ Technology
437
+ - Multi-AI Provider Integration
438
+ - Advanced Natural Language Processing
439
 
440
  ### 🔑 API Key Setup:
441
+ Set in Space Settings → Repository Secrets:
442
  - `OPENAI_API_KEY` - For GPT-4
443
  - `GOOGLE_API_KEY` - For Gemini
444
  - `PERPLEXITY_API_KEY` - For Perplexity
445
  - `NINJAAI_API_KEY` - For NinjaAI
446
 
447
+ ### 📜 Legal & Trademarks:
448
+ **Proprietary License – Free to Use**
449
+ © 2025 Solomon 8888. All Rights Reserved.
450
+
451
+ **Trademarks:**
452
+ - Pro'VerBs™ Open-Source Protocol
453
+ - ADAPPT-I™ Technology Implementation
454
+ - Dual Analysis Law Perspective™
455
+
456
+ All trademarks are registered and must be properly attributed.
457
+
458
  ### ⚠️ Disclaimer:
459
+ This platform provides general legal information only. It does not constitute legal advice.
460
+ Always consult with a licensed attorney for specific legal matters.
461
 
462
  ---
463
+ **Version 3.0.0** | Ultimate Brain Edition | Built by Solomon7890
464
  """)
465
 
466
  # Footer
467
  gr.Markdown("""
468
  ---
469
  <div style="text-align: center; padding: 20px;">
470
+ <p><strong>⚖️ ProVerBs Ultimate Legal AI Brain v3.0</strong></p>
471
+ <p>Powered by Pro'VerBs™ & ADAPPT-I™ | 100+ Reasoning Protocols | 6 AI Models</p>
472
+ <p style="font-size: 0.85rem; color: #666;">
473
+ © 2025 Solomon 8888 | Built with ❤️ for legal professionals worldwide
474
+ </p>
475
  </div>
476
  """)
477