Spaces:
Sleeping
Sleeping
Actualiza `app.py` para integrar la API de Gemini a trav茅s de OpenAI. Se elimina la dependencia de `huggingface_hub` y se refactoriza la funci贸n `respond` para manejar mensajes multimodales. Se implementa la funci贸n `_extract_text_and_files` para extraer texto y archivos adjuntos de los mensajes. Adem谩s, se crea una interfaz de chat personalizada que gu铆a a los usuarios en la creaci贸n de claves API de Gmail y Outlook.
3023539
| import os | |
| import gradio as gr | |
| from openai import OpenAI | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| # Configure Gemini via OpenAI-compatible endpoint | |
| GEMINI_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai/" | |
| GEMINI_MODEL = "gemini-2.5-flash" | |
| _api_key = os.getenv("GEMINI_API_KEY") | |
| _client = OpenAI(api_key=_api_key, base_url=GEMINI_BASE_URL) if _api_key else None | |
| def _extract_text_and_files(message): | |
| """Extract user text and attached files from a multimodal message value.""" | |
| if isinstance(message, str): | |
| return message, [] | |
| # Common multimodal shapes: dict with keys, or list of parts | |
| files = [] | |
| text_parts = [] | |
| try: | |
| if isinstance(message, dict): | |
| if "text" in message: | |
| text_parts.append(message.get("text") or "") | |
| if "files" in message and message["files"]: | |
| files = message["files"] or [] | |
| elif isinstance(message, (list, tuple)): | |
| for part in message: | |
| if isinstance(part, str): | |
| text_parts.append(part) | |
| elif isinstance(part, dict): | |
| # Heuristic: file-like dicts may have 'path' or 'name' | |
| if any(k in part for k in ("path", "name", "mime_type")): | |
| files.append(part) | |
| elif "text" in part: | |
| text_parts.append(part.get("text") or "") | |
| except Exception: | |
| pass | |
| text_combined = " ".join([t for t in text_parts if t]) | |
| return text_combined, files | |
| def respond(message, history: list[tuple[str, str]]): | |
| """Stream assistant reply via Gemini using OpenAI-compatible API. | |
| Yields partial text chunks so the UI shows a live stream. | |
| """ | |
| user_text, files = _extract_text_and_files(message) | |
| if not _client: | |
| yield ( | |
| "Gemini API key not configured. Set environment variable GEMINI_API_KEY " | |
| "and restart the app." | |
| ) | |
| return | |
| # Build OpenAI-style messages from history | |
| messages = [ | |
| { | |
| "role": "system", | |
| "content": ( | |
| "You are a helpful assistant that guides users to create Gmail and Outlook API keys. " | |
| "Answer in Spanish unless asked otherwise." | |
| ), | |
| } | |
| ] | |
| for user_turn, assistant_turn in history or []: | |
| if user_turn: | |
| messages.append({"role": "user", "content": user_turn}) | |
| if assistant_turn: | |
| messages.append({"role": "assistant", "content": assistant_turn}) | |
| # Include a short mention about attached files (no uploading to remote in this demo) | |
| if files: | |
| filenames = [] | |
| for f in files: | |
| if isinstance(f, dict): | |
| name = f.get("name") or f.get("path") or "file" | |
| filenames.append(str(name)) | |
| if filenames: | |
| user_text = (user_text or "").strip() | |
| user_text = f"{user_text}\n\n[Adjuntos: {', '.join(filenames)}]" if user_text else f"[Adjuntos: {', '.join(filenames)}]" | |
| # If user provided no text, provide a nudge | |
| final_user_text = user_text or "Quiero ayuda para crear una API Key." | |
| messages.append({"role": "user", "content": final_user_text}) | |
| try: | |
| stream = _client.chat.completions.create( | |
| model=GEMINI_MODEL, | |
| messages=messages, | |
| stream=True, | |
| ) | |
| accumulated = "" | |
| for chunk in stream: | |
| try: | |
| choice = chunk.choices[0] | |
| delta_text = None | |
| # OpenAI v1: delta.content | |
| if getattr(choice, "delta", None) is not None: | |
| delta_text = getattr(choice.delta, "content", None) | |
| # Fallback: some providers emit message.content in chunks | |
| if delta_text is None and getattr(choice, "message", None) is not None: | |
| delta_text = choice.message.get("content") if isinstance(choice.message, dict) else None | |
| if not delta_text: | |
| continue | |
| accumulated += delta_text | |
| yield accumulated | |
| except Exception: | |
| continue | |
| if not accumulated: | |
| yield "(Sin contenido de respuesta)" | |
| except Exception as e: | |
| yield f"Ocurri贸 un error al llamar a Gemini: {e}" | |
| chat = gr.ChatInterface( | |
| fn=respond, | |
| # default type keeps string message, keeps compatibility across versions | |
| title="Gmail & Outlook API Helper", | |
| description="Chat similar a ChatGPT para guiarte en la creaci贸n de API Keys.", | |
| textbox=gr.MultimodalTextbox(file_types=[".pdf", ".txt"]), | |
| multimodal=True, | |
| fill_height=True, | |
| examples=[ | |
| "驴C贸mo creo una API Key de Gmail?", | |
| "Gu铆ame para obtener credenciales de Outlook", | |
| "驴Qu茅 permisos necesito para enviar correos?", | |
| ], | |
| theme=gr.themes.Monochrome(), | |
| css=""" | |
| /* Force dark appearance similar to ChatGPT */ | |
| :root, .gradio-container { color-scheme: dark; } | |
| body, .gradio-container { background: #0b0f16; } | |
| .prose, .gr-text, .gr-form { color: #e5e7eb; } | |
| /* Chat bubbles */ | |
| .message.user { background: #111827; border-radius: 10px; } | |
| .message.assistant { background: #0f172a; border-radius: 10px; } | |
| /* Input */ | |
| textarea, .gr-textbox textarea { | |
| background: #0f172a !important; | |
| color: #e5e7eb !important; | |
| border-color: #1f2937 !important; | |
| } | |
| /* Buttons */ | |
| button { | |
| background: #1f2937 !important; | |
| color: #e5e7eb !important; | |
| border: 1px solid #374151 !important; | |
| } | |
| button:hover { background: #374151 !important; } | |
| """, | |
| ) | |
| if __name__ == "__main__": | |
| chat.launch() | |