Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| import random | |
| import uuid | |
| import json | |
| import time | |
| import asyncio | |
| from threading import Thread | |
| from pathlib import Path | |
| from io import BytesIO | |
| from typing import Optional, Tuple, Dict, Any | |
| import gradio as gr | |
| import spaces | |
| import torch | |
| import numpy as np | |
| from PIL import Image | |
| import cv2 | |
| import requests | |
| import fitz # PyMuPDF | |
| from transformers import ( | |
| Qwen3VLMoeForConditionalGeneration, | |
| AutoProcessor, | |
| TextIteratorStreamer, | |
| ) | |
| from transformers.image_utils import load_image | |
| # Constants for text generation | |
| MAX_MAX_NEW_TOKENS = 4096 | |
| DEFAULT_MAX_NEW_TOKENS = 2048 | |
| # Let the environment (e.g., Hugging Face Spaces) determine the device. | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES")) | |
| print("torch.__version__ =", torch.__version__) | |
| print("torch.version.cuda =", torch.version.cuda) | |
| print("cuda available:", torch.cuda.is_available()) | |
| print("cuda device count:", torch.cuda.device_count()) | |
| if torch.cuda.is_available(): | |
| print("current device:", torch.cuda.current_device()) | |
| print("device name:", torch.cuda.get_device_name(torch.cuda.current_device())) | |
| print("Using device:", device) | |
| # --- Model Loading --- | |
| # Load Qwen3VL | |
| MODEL_ID_Q3VL = "Qwen/Qwen3-VL-30B-A3B-Instruct" | |
| processor_q3vl = AutoProcessor.from_pretrained(MODEL_ID_Q3VL, trust_remote_code=True, use_fast=False) | |
| model_q3vl = Qwen3VLMoeForConditionalGeneration.from_pretrained( | |
| MODEL_ID_Q3VL, | |
| trust_remote_code=True, | |
| dtype=torch.float16 | |
| ).to(device).eval() | |
| def downsample_video(video_path): | |
| """ | |
| Downsamples the video to evenly spaced frames. | |
| """ | |
| vidcap = cv2.VideoCapture(video_path) | |
| total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| fps = vidcap.get(cv2.CAP_PROP_FPS) | |
| frames = [] | |
| frame_indices = np.linspace(0, total_frames - 1, min(total_frames, 10), dtype=int) | |
| for i in frame_indices: | |
| vidcap.set(cv2.CAP_PROP_POS_FRAMES, i) | |
| success, image = vidcap.read() | |
| if success: | |
| image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) | |
| pil_image = Image.fromarray(image) | |
| frames.append(pil_image) | |
| vidcap.release() | |
| return frames | |
| def convert_pdf_to_images(file_path: str, dpi: int = 200): | |
| """ | |
| Converts a PDF file into a list of PIL Images. | |
| """ | |
| if not file_path: | |
| return [] | |
| images = [] | |
| pdf_document = fitz.open(file_path) | |
| zoom = dpi / 72.0 | |
| mat = fitz.Matrix(zoom, zoom) | |
| for page_num in range(len(pdf_document)): | |
| page = pdf_document.load_page(page_num) | |
| pix = page.get_pixmap(matrix=mat) | |
| img_data = pix.tobytes("png") | |
| images.append(Image.open(BytesIO(img_data))) | |
| pdf_document.close() | |
| return images | |
| def get_initial_pdf_state() -> Dict[str, Any]: | |
| """Returns the default initial state for the PDF viewer.""" | |
| return {"pages": [], "total_pages": 0, "current_page_index": 0} | |
| def load_and_preview_pdf(file_path: Optional[str]) -> Tuple[Optional[Image.Image], Dict[str, Any], str]: | |
| """ | |
| Loads a PDF, converts pages to images, and prepares the state for preview. | |
| """ | |
| state = get_initial_pdf_state() | |
| if not file_path: | |
| return None, state, '<div class="page-info">No file loaded</div>' | |
| try: | |
| pages = convert_pdf_to_images(file_path) | |
| if not pages: | |
| return None, state, '<div class="page-info">Could not load file</div>' | |
| state["pages"] = pages | |
| state["total_pages"] = len(pages) | |
| page_info_html = f'<div class="page-info">Page 1 / {state["total_pages"]}</div>' | |
| return pages[0], state, page_info_html | |
| except Exception as e: | |
| return None, state, f'<div class="page-info">Failed to load preview: {e}</div>' | |
| def navigate_pdf_page(direction: str, state: Dict[str, Any]): | |
| """ | |
| Navigates to the previous or next page in the PDF preview. | |
| """ | |
| if not state or not state["pages"]: | |
| return None, state, '<div class="page-info">No file loaded</div>' | |
| current_index = state["current_page_index"] | |
| total_pages = state["total_pages"] | |
| if direction == "prev": | |
| new_index = max(0, current_index - 1) | |
| elif direction == "next": | |
| new_index = min(total_pages - 1, current_index + 1) | |
| else: | |
| new_index = current_index | |
| state["current_page_index"] = new_index | |
| image_preview = state["pages"][new_index] | |
| page_info_html = f'<div class="page-info">Page {new_index + 1} / {total_pages}</div>' | |
| return image_preview, state, page_info_html | |
| def generate_image(text: str, image: Image.Image, | |
| max_new_tokens: int = 1024, | |
| temperature: float = 0.6, | |
| top_p: float = 0.9, | |
| top_k: int = 50, | |
| repetition_penalty: float = 1.2): | |
| """ | |
| Generates responses for a single image input. | |
| """ | |
| if image is None: | |
| yield "Please upload an image.", "Please upload an image." | |
| return | |
| messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": text}]}] | |
| prompt_full = processor_q3vl.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = processor_q3vl(text=[prompt_full], images=[image], return_tensors="pt", padding=True).to(device) | |
| streamer = TextIteratorStreamer(processor_q3vl, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens} | |
| thread = Thread(target=model_q3vl.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| buffer = "" | |
| for new_text in streamer: | |
| buffer += new_text | |
| time.sleep(0.01) | |
| yield buffer, buffer | |
| def generate_video(text: str, video_path: str, | |
| max_new_tokens: int = 1024, | |
| temperature: float = 0.6, | |
| top_p: float = 0.9, | |
| top_k: int = 50, | |
| repetition_penalty: float = 1.2): | |
| """ | |
| Generates responses for a video input by processing downsampled frames. | |
| """ | |
| if video_path is None: | |
| yield "Please upload a video.", "Please upload a video." | |
| return | |
| frames = downsample_video(video_path) | |
| if not frames: | |
| yield "Could not process video.", "Could not process video." | |
| return | |
| messages = [{"role": "user", "content": [{"type": "text", "text": text}]}] | |
| for frame in frames: | |
| messages[0]["content"].insert(0, {"type": "image"}) | |
| prompt_full = processor_q3vl.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = processor_q3vl(text=[prompt_full], images=frames, return_tensors="pt", padding=True).to(device) | |
| streamer = TextIteratorStreamer(processor_q3vl, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = { | |
| **inputs, "streamer": streamer, "max_new_tokens": max_new_tokens, | |
| "do_sample": True, "temperature": temperature, "top_p": top_p, | |
| "top_k": top_k, "repetition_penalty": repetition_penalty, | |
| } | |
| thread = Thread(target=model_q3vl.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| buffer = "" | |
| for new_text in streamer: | |
| buffer += new_text | |
| buffer = buffer.replace("<|im_end|>", "") | |
| time.sleep(0.01) | |
| yield buffer, buffer | |
| def generate_pdf(text: str, state: Dict[str, Any], | |
| max_new_tokens: int = 2048, | |
| temperature: float = 0.6, | |
| top_p: float = 0.9, | |
| top_k: int = 50, | |
| repetition_penalty: float = 1.2): | |
| """ | |
| Processes a PDF file page by page using the pre-loaded images from the state. | |
| """ | |
| if not state or not state["pages"]: | |
| yield "Please upload a PDF file first.", "Please upload a PDF file first." | |
| return | |
| page_images = state["pages"] | |
| full_response = "" | |
| for i, image in enumerate(page_images): | |
| page_header = f"--- Page {i+1}/{len(page_images)} ---\n" | |
| yield full_response + page_header, full_response + page_header | |
| messages = [{"role": "user", "content": [{"type": "image"}, {"type": "text", "text": text}]}] | |
| prompt_full = processor_q3vl.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) | |
| inputs = processor_q3vl(text=[prompt_full], images=[image], return_tensors="pt", padding=True).to(device) | |
| streamer = TextIteratorStreamer(processor_q3vl, skip_prompt=True, skip_special_tokens=True) | |
| generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens} | |
| thread = Thread(target=model_q3vl.generate, kwargs=generation_kwargs) | |
| thread.start() | |
| page_buffer = "" | |
| for new_text in streamer: | |
| page_buffer += new_text | |
| yield full_response + page_header + page_buffer, full_response + page_header + page_buffer | |
| time.sleep(0.01) | |
| full_response += page_header + page_buffer + "\n\n" | |
| # --- Gradio Interface --- | |
| image_examples = [ | |
| ["Describe the safety measures in the image. Conclude (Safe / Unsafe)..", "images/5.jpg"], | |
| ["Convert this page to doc [markdown] precisely.", "images/3.png"], | |
| ] | |
| video_examples = [["Explain the video in detail.", "videos/2.mp4"]] | |
| pdf_examples = [["examples/sample-doc.pdf"]] | |
| css = """ | |
| :root { | |
| --input-focus: #2d8cf0; | |
| --font-color: #323232; | |
| --font-color-sub: #666; | |
| --bg-color: beige; | |
| --main-color: black; | |
| --form-bg: lightblue; | |
| } | |
| body, .gradio-container { | |
| font-family: 'Arial Rounded MT Bold', 'Helvetica Rounded', Arial, sans-serif; | |
| background-color: var(--bg-color); | |
| } | |
| /* Main Form/Card Styling */ | |
| .retro-form { | |
| padding: 20px; | |
| background: var(--form-bg); | |
| gap: 20px; | |
| border-radius: 5px; | |
| border: 2px solid var(--main-color); | |
| box-shadow: 4px 4px var(--main-color); | |
| } | |
| /* Title Styling */ | |
| .form-title { | |
| color: var(--font-color); | |
| font-weight: 900; | |
| font-size: 20px; | |
| margin-bottom: 25px; | |
| text-align: center; | |
| } | |
| /* Input/Textbox Styling */ | |
| .gr-textbox, .gr-file { | |
| border-radius: 5px !important; | |
| border: 2px solid var(--main-color) !important; | |
| background-color: var(--bg-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| } | |
| textarea, input[type=text] { | |
| font-size: 15px !important; | |
| font-weight: 600 !important; | |
| color: var(--font-color) !important; | |
| padding: 5px 10px !important; | |
| } | |
| .gr-textbox:focus-within, .gr-file:focus-within { | |
| border: 2px solid var(--input-focus) !important; | |
| } | |
| .gr-label > .label-text { | |
| font-weight: 900 !important; | |
| color: var(--font-color) !important; | |
| margin-bottom: 0.5rem !important; | |
| } | |
| /* Button Styling */ | |
| .gr-button { | |
| border-radius: 5px !important; | |
| border: 2px solid var(--main-color) !important; | |
| background-color: var(--bg-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| font-size: 17px !important; | |
| font-weight: 600 !important; | |
| color: var(--font-color) !important; | |
| cursor: pointer !important; | |
| transition: all 0.1s ease-in-out !important; | |
| } | |
| .gr-button:active { | |
| box-shadow: 0px 0px var(--main-color) !important; | |
| transform: translate(3px, 3px) !important; | |
| } | |
| /* Tabs Styling */ | |
| .tab-nav { | |
| gap: 0.5rem; | |
| } | |
| .tab-nav > button { | |
| border: 2px solid var(--main-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| background: lightcyan !important; /* Slightly different for inactive */ | |
| } | |
| .tab-nav > button.selected { | |
| background: var(--bg-color) !important; | |
| } | |
| .tab-nav > button:active { | |
| box-shadow: 0px 0px var(--main-color) !important; | |
| transform: translate(3px, 3px) !important; | |
| } | |
| /* Accordion & Slider Styling */ | |
| .gr-accordion { | |
| border: 2px solid var(--main-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| background-color: lightcyan !important; | |
| border-radius: 5px !important; | |
| } | |
| .gr-slider { | |
| border: 2px solid var(--main-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| background-color: var(--bg-color) !important; | |
| } | |
| .page-info { | |
| text-align: center; | |
| font-weight: 900; | |
| color: var(--font-color); | |
| padding-top: 1rem; | |
| } | |
| /* Image/Video Preview */ | |
| .gr-image, .gr-video { | |
| border: 2px solid var(--main-color) !important; | |
| background-color: var(--bg-color) !important; | |
| box-shadow: 4px 4px var(--main-color) !important; | |
| border-radius: 5px !important; | |
| } | |
| """ | |
| with gr.Blocks(css=css, theme=gr.themes.Base()) as demo: | |
| pdf_state = gr.State(value=get_initial_pdf_state()) | |
| gr.Markdown("# **Qwen3-VL-Processor**", elem_classes="form-title") | |
| with gr.Row(equal_height=False, variant='panel'): | |
| # --- INPUT COLUMN --- | |
| with gr.Column(scale=2, min_width=400, elem_classes="retro-form"): | |
| gr.Markdown("### INPUT", elem_classes="form-title") | |
| with gr.Tabs(): | |
| with gr.TabItem("Image"): | |
| image_query = gr.Textbox(label="Query", placeholder="Describe the image...") | |
| image_upload = gr.Image(type="pil", label="Image Upload") | |
| image_submit = gr.Button("Analyze Image") | |
| gr.Examples(examples=image_examples, inputs=[image_query, image_upload]) | |
| with gr.TabItem("Video"): | |
| video_query = gr.Textbox(label="Query", placeholder="Summarize the video...") | |
| video_upload = gr.Video(label="Video Upload") | |
| video_submit = gr.Button("Analyze Video") | |
| gr.Examples(examples=video_examples, inputs=[video_query, video_upload]) | |
| with gr.TabItem("PDF"): | |
| pdf_query = gr.Textbox(label="Query", placeholder="Extract key points...") | |
| pdf_upload = gr.File(label="PDF Upload", file_types=[".pdf"]) | |
| pdf_preview_img = gr.Image(label="PDF Preview") | |
| with gr.Row(): | |
| prev_page_btn = gr.Button("◀ Prev") | |
| next_page_btn = gr.Button("Next ▶") | |
| page_info = gr.HTML('<div class="page-info">No file loaded</div>') | |
| pdf_submit = gr.Button("Analyze PDF") | |
| with gr.Accordion("Advanced options", open=False): | |
| max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS) | |
| temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6) | |
| top_p = gr.Slider(label="Top-p", minimum=0.05, maximum=1.0, step=0.05, value=0.9) | |
| top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50) | |
| repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2) | |
| # --- OUTPUT COLUMN --- | |
| with gr.Column(scale=3, min_width=500, elem_classes="retro-form"): | |
| gr.Markdown("### RESULT", elem_classes="form-title") | |
| output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=15) | |
| markdown_output = gr.Markdown(label="Formatted Output") | |
| # Event handlers | |
| image_submit.click( | |
| fn=generate_image, | |
| inputs=[image_query, image_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty], | |
| outputs=[output, markdown_output] | |
| ) | |
| video_submit.click( | |
| fn=generate_video, | |
| inputs=[video_query, video_upload, max_new_tokens, temperature, top_p, top_k, repetition_penalty], | |
| outputs=[output, markdown_output] | |
| ) | |
| pdf_submit.click( | |
| fn=generate_pdf, | |
| inputs=[pdf_query, pdf_state, max_new_tokens, temperature, top_p, top_k, repetition_penalty], | |
| outputs=[output, markdown_output] | |
| ) | |
| pdf_upload.change( | |
| fn=load_and_preview_pdf, | |
| inputs=[pdf_upload], | |
| outputs=[pdf_preview_img, pdf_state, page_info] | |
| ) | |
| prev_page_btn.click( | |
| fn=lambda s: navigate_pdf_page("prev", s), | |
| inputs=[pdf_state], | |
| outputs=[pdf_preview_img, pdf_state, page_info] | |
| ) | |
| next_page_btn.click( | |
| fn=lambda s: navigate_pdf_page("next", s), | |
| inputs=[pdf_state], | |
| outputs=[pdf_preview_img, pdf_state, page_info] | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue(max_size=50).launch(ssr_mode=False, show_error=True) |