Update app.py
Browse files
app.py
CHANGED
|
@@ -3,33 +3,34 @@ import torch
|
|
| 3 |
import gradio as gr
|
| 4 |
import time
|
| 5 |
from ccd import ccd_eval, run_eval
|
| 6 |
-
from libra.eval.run_libra import load_model
|
| 7 |
|
| 8 |
# =========================================
|
| 9 |
-
# Safe Libra Hook
|
| 10 |
# =========================================
|
| 11 |
import torch
|
| 12 |
import libra.model.builder as builder
|
|
|
|
| 13 |
|
|
|
|
| 14 |
_original_load_pretrained_model = builder.load_pretrained_model
|
| 15 |
|
| 16 |
def safe_load_pretrained_model(model_path, model_base=None, model_name=None, **kwargs):
|
| 17 |
-
print("[INFO]
|
|
|
|
| 18 |
tokenizer, model, image_processor, context_len = _original_load_pretrained_model(
|
| 19 |
model_path, model_base, model_name, **kwargs
|
| 20 |
)
|
| 21 |
|
| 22 |
if torch.cuda.is_available():
|
| 23 |
device, dtype = "cuda", torch.float16
|
| 24 |
-
print("[INFO] GPU detected
|
| 25 |
else:
|
| 26 |
device, dtype = "cpu", torch.float32
|
| 27 |
-
print("[WARN] No GPU
|
| 28 |
|
| 29 |
try:
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
print(f"[INFO] Vision tower moved to {device} ({dtype})")
|
| 33 |
except Exception as e:
|
| 34 |
print(f"[WARN] Could not move vision tower: {e}")
|
| 35 |
|
|
@@ -38,6 +39,14 @@ def safe_load_pretrained_model(model_path, model_base=None, model_name=None, **k
|
|
| 38 |
builder.load_pretrained_model = safe_load_pretrained_model
|
| 39 |
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
# =========================================
|
| 42 |
# Global Configuration
|
| 43 |
# =========================================
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
import time
|
| 5 |
from ccd import ccd_eval, run_eval
|
|
|
|
| 6 |
|
| 7 |
# =========================================
|
| 8 |
+
# Safe Libra Hook (CPU fallback for no-GPU environments)
|
| 9 |
# =========================================
|
| 10 |
import torch
|
| 11 |
import libra.model.builder as builder
|
| 12 |
+
import libra.eval.run_libra as run_libra
|
| 13 |
|
| 14 |
+
# --- Patch 1: replace load_pretrained_model ---
|
| 15 |
_original_load_pretrained_model = builder.load_pretrained_model
|
| 16 |
|
| 17 |
def safe_load_pretrained_model(model_path, model_base=None, model_name=None, **kwargs):
|
| 18 |
+
print("[INFO] Hook activated: safe_load_pretrained_model()")
|
| 19 |
+
|
| 20 |
tokenizer, model, image_processor, context_len = _original_load_pretrained_model(
|
| 21 |
model_path, model_base, model_name, **kwargs
|
| 22 |
)
|
| 23 |
|
| 24 |
if torch.cuda.is_available():
|
| 25 |
device, dtype = "cuda", torch.float16
|
| 26 |
+
print("[INFO] GPU detected — using CUDA + float16.")
|
| 27 |
else:
|
| 28 |
device, dtype = "cpu", torch.float32
|
| 29 |
+
print("[WARN] No GPU found — forcing model to CPU (float32).")
|
| 30 |
|
| 31 |
try:
|
| 32 |
+
model.get_vision_tower().to(device=device, dtype=dtype)
|
| 33 |
+
print(f"[INFO] Vision tower moved to {device} ({dtype}).")
|
|
|
|
| 34 |
except Exception as e:
|
| 35 |
print(f"[WARN] Could not move vision tower: {e}")
|
| 36 |
|
|
|
|
| 39 |
builder.load_pretrained_model = safe_load_pretrained_model
|
| 40 |
|
| 41 |
|
| 42 |
+
# --- Patch 2: replace run_libra.load_model to force using our patched builder ---
|
| 43 |
+
def safe_load_model(model_path, model_base=None, model_name=None):
|
| 44 |
+
print("[INFO] Hook activated: safe_load_model()")
|
| 45 |
+
return safe_load_pretrained_model(model_path, model_base, model_name)
|
| 46 |
+
|
| 47 |
+
run_libra.load_model = safe_load_model
|
| 48 |
+
|
| 49 |
+
|
| 50 |
# =========================================
|
| 51 |
# Global Configuration
|
| 52 |
# =========================================
|