import gradio as gr import torch from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, BitsAndBytesConfig from peft import PeftModel from qwen_vl_utils import process_vision_info from PIL import Image import os # --- AYARLAR --- CLASS_DESCRIPTIONS = { "AnnualCrop": "🌾 Yıllık Tarım Alanı", "Forest": "🌲 Orman", "HerbaceousVegetation": "🌿 Otsu Bitki Örtüsü", "Highway": "🛣️ Otoyol", "Industrial": "🏭 Endüstriyel Alan", "Pasture": "🐄 Mera/Otlak", "PermanentCrop": "🍇 Kalıcı Tarım (Bağ/Bahçe)", "Residential": "🏘️ Yerleşim Alanı", "River": "🌊 Nehir", "SeaLake": "🏞️ Deniz/Göl" } DEVICE = "cuda" if torch.cuda.is_available() else "cpu" print(f"🚀 Kullanılan cihaz: {DEVICE}") # --- MODEL YÜKLEME --- def load_model(): print("⏳ Model yükleniyor...") try: model_id = "Qwen/Qwen2-VL-2B-Instruct" adapter_id = "tugrulkaya/GeoQwen-VL-2B-EuroSAT" if DEVICE == "cuda": # GPU Varsa: 4-bit quantization bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 ) base_model = Qwen2VLForConditionalGeneration.from_pretrained( model_id, quantization_config=bnb_config, device_map="auto", trust_remote_code=True, _attn_implementation="flash_attention_2" ) else: # CPU (Hugging Face Spaces Free Tier) # Offload ve device_map="auto" KAL