import spaces import gradio as gr from PIL import Image from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref from src.unet_hacked_tryon import UNet2DConditionModel from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, CLIPTextModel, CLIPTextModelWithProjection, ) from diffusers import DDPMScheduler,AutoencoderKL from typing import List import torch import os import io import warnings import requests from transformers import AutoTokenizer import numpy as np from utils_mask import get_mask_location from torchvision import transforms import apply_net from preprocess.humanparsing.run_parsing import Parsing from preprocess.openpose.run_openpose import OpenPose from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation from torchvision.transforms.functional import to_pil_image # import pillow_heif # HEIC 이미지 처리용 (아이폰 촬영 사진 포맷) from urllib.parse import urlparse # zeroGPU 환경에서 compile 사용 여부 is_compile_for_zeroGPU = False # True: compile 사용, False: compile 사용 안 함 # SSL 경고 억제 warnings.filterwarnings("ignore", message=".*OpenSSL.*") warnings.filterwarnings("ignore", category=UserWarning, module="urllib3") # requests 세션 설정 session = requests.Session() session.verify = False # SSL 검증 비활성화 (개발 환경용) def pil_to_binary_mask(pil_image, threshold=0): np_image = np.array(pil_image) grayscale_image = Image.fromarray(np_image).convert("L") binary_mask = np.array(grayscale_image) > threshold mask = np.zeros(binary_mask.shape, dtype=np.uint8) for i in range(binary_mask.shape[0]): for j in range(binary_mask.shape[1]): if binary_mask[i,j] == True : mask[i,j] = 1 mask = (mask*255).astype(np.uint8) output_mask = Image.fromarray(mask) return output_mask print("=" * 60) print("Starting GENAI-VTON Application Initialization") print("=" * 60) base_path = 'yisol/IDM-VTON' example_path = os.path.join(os.path.dirname(__file__), 'example') print("\n[1/10] Loading UNet model...") unet = UNet2DConditionModel.from_pretrained( base_path, subfolder="unet", torch_dtype=torch.float16, ) unet.requires_grad_(False) # torch.compile() 적용 - 추론 속도 20-40% 향상 (PyTorch 2.0+) # 주의: 첫 번째 추론은 컴파일로 인해 느릴 수 있음 if is_compile_for_zeroGPU == True: print("✓ UNet model loaded successfully") else: if hasattr(torch, 'compile'): try: unet = torch.compile(unet, mode="reduce-overhead") print("✓ UNet model loaded and compiled successfully") except Exception as e: print(f"✓ UNet model loaded (compile skipped: {e})") else: print("✓ UNet model loaded successfully") print("\n[2/10] Loading tokenizers...") tokenizer_one = AutoTokenizer.from_pretrained( base_path, subfolder="tokenizer", revision=None, use_fast=False, ) tokenizer_two = AutoTokenizer.from_pretrained( base_path, subfolder="tokenizer_2", revision=None, use_fast=False, ) print("✓ Tokenizers loaded successfully") print("\n[3/10] Loading noise scheduler...") noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler") print("✓ Noise scheduler loaded successfully") print("\n[4/10] Loading text encoders...") text_encoder_one = CLIPTextModel.from_pretrained( base_path, subfolder="text_encoder", torch_dtype=torch.float16, ) text_encoder_two = CLIPTextModelWithProjection.from_pretrained( base_path, subfolder="text_encoder_2", torch_dtype=torch.float16, ) print("✓ Text encoders loaded successfully") print("\n[5/10] Loading image encoder...") image_encoder = CLIPVisionModelWithProjection.from_pretrained( base_path, subfolder="image_encoder", torch_dtype=torch.float16, ) print("✓ Image encoder loaded successfully") print("\n[6/10] Loading VAE...") vae = AutoencoderKL.from_pretrained(base_path, subfolder="vae", torch_dtype=torch.float16, ) # torch.compile() 적용 - VAE 인코딩/디코딩 속도 향상 if is_compile_for_zeroGPU == True: print("✓ VAE loaded successfully") else: if hasattr(torch, 'compile'): try: vae = torch.compile(vae, mode="reduce-overhead") print("✓ VAE loaded and compiled successfully") except Exception as e: print(f"✓ VAE loaded (compile skipped: {e})") else: print("✓ VAE loaded successfully") print("\n[7/10] Loading UNet Encoder...") UNet_Encoder = UNet2DConditionModel_ref.from_pretrained( base_path, subfolder="unet_encoder", torch_dtype=torch.float16, ) # torch.compile() 적용 - UNet Encoder 속도 향상 if is_compile_for_zeroGPU == True: print("✓ UNet Encoder loaded successfully") else: if hasattr(torch, 'compile'): try: UNet_Encoder = torch.compile(UNet_Encoder, mode="reduce-overhead") print("✓ UNet Encoder loaded and compiled successfully") except Exception as e: print(f"✓ UNet Encoder loaded (compile skipped: {e})") else: print("✓ UNet Encoder loaded successfully") print("\n[8/10] Initializing parsing and openpose models...") parsing_model = Parsing(0) openpose_model = OpenPose(0) print("✓ Parsing and OpenPose models initialized") print("\n[9/10] Configuring model parameters...") UNet_Encoder.requires_grad_(False) image_encoder.requires_grad_(False) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder_one.requires_grad_(False) text_encoder_two.requires_grad_(False) tensor_transfrom = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) print("✓ Model parameters configured") print("\n[10/10] Initializing TryonPipeline...") pipe = TryonPipeline.from_pretrained( base_path, unet=unet, vae=vae, feature_extractor= CLIPImageProcessor(), text_encoder = text_encoder_one, text_encoder_2 = text_encoder_two, tokenizer = tokenizer_one, tokenizer_2 = tokenizer_two, scheduler = noise_scheduler, image_encoder=image_encoder, torch_dtype=torch.float16, ) pipe.unet_encoder = UNet_Encoder print("✓ TryonPipeline initialized successfully") # torch, diffusers 등 버전 정리 후 적용 가능. # # xFormers 메모리 효율적 어텐션 활성화 (메모리 20-30% 감소, 속도 10-20% 향상) # print("\n[Optimization] Enabling xFormers memory efficient attention...") # try: # pipe.enable_xformers_memory_efficient_attention() # print("✓ xFormers memory efficient attention enabled") # except Exception as e: # print(f"⚠ xFormers not available, using default attention: {e}") print("\n" + "=" * 60) print("All models loaded successfully!") print("=" * 60 + "\n") # Warm-up: 첫 번째 추론 지연 감소를 위한 모델 초기화 # JIT 컴파일, CUDA 커널 로딩 등을 미리 수행 print("=" * 60) print("Warming up models (CPU)...") print("=" * 60) def warmup_models_cpu(): """앱 시작 시 CPU 모델 초기화를 위한 Warm-up 함수""" try: # CPU에서 텍스트 임베딩 Warm-up (Tokenizer + Text Encoder 초기화) print("[CPU Warm-up 1/2] Text Encoder warm-up...") with torch.no_grad(): dummy_prompt = "a photo of clothing" dummy_tokens = tokenizer_one( dummy_prompt, padding="max_length", max_length=tokenizer_one.model_max_length, truncation=True, return_tensors="pt" ) # CPU에서 실행 가능한 초기화 _ = text_encoder_one(dummy_tokens.input_ids, output_hidden_states=True) print("✓ Text Encoder warmed up") # Tensor 변환 Warm-up print("[CPU Warm-up 2/2] Tensor transform warm-up...") dummy_img = Image.new('RGB', (768, 1024), color='white') _ = tensor_transfrom(dummy_img) print("✓ Tensor transform warmed up") return True except Exception as e: print(f"⚠ CPU Warm-up partially completed: {e}") return False # CPU Warm-up 실행 warmup_success = warmup_models_cpu() if warmup_success: print("\n✓ CPU warm-up completed successfully") else: print("\n⚠ CPU warm-up completed with warnings") print("=" * 60 + "\n") # torch.compile 오류 시 eager 모드로 폴백 설정 # 커스텀 UNet forward 메서드 호환성 문제 대응 if is_compile_for_zeroGPU == True: print("✓ torch.compile is disabled for ZeroGPU") else: try: import torch._dynamo torch._dynamo.config.suppress_errors = True print("✓ torch._dynamo.config.suppress_errors enabled (fallback to eager mode on error)") except Exception as e: print(f"⚠ torch._dynamo config not available: {e}") # GPU Warm-up 함수 (앱 로드 시 자동 실행) # Text Encoder, VAE GPU 로딩 및 CUDA 커널 초기화 @spaces.GPU def warmup_gpu(): """앱 로드 시 GPU 모델 초기화를 위한 Warm-up 함수""" try: device = "cuda" print("=" * 60) print("GPU Warm-up: Loading models to GPU and initializing CUDA kernels...") print("=" * 60) # 모델을 GPU로 이동 print("[GPU Warm-up 1/4] Moving models to GPU...") pipe.to(device) pipe.unet_encoder.to(device) print("✓ Models moved to GPU") # 더미 텐서 생성 with torch.no_grad(): with torch.cuda.amp.autocast(): # 1. 더미 프롬프트 임베딩 생성 (Text Encoder GPU warm-up) print("[GPU Warm-up 2/4] Text Encoder GPU warm-up...") dummy_prompt = "a photo of white t-shirt" _ = pipe.encode_prompt( dummy_prompt, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt="low quality", ) print("✓ Text Encoder GPU warmed up") # 2. 더미 이미지로 VAE 인코딩/디코딩 (VAE GPU warm-up) print("[GPU Warm-up 3/4] VAE GPU warm-up...") dummy_img = torch.randn(1, 3, 1024, 768).to(device, torch.float16) latents = pipe.vae.encode(dummy_img).latent_dist.sample() _ = pipe.vae.decode(latents) print("✓ VAE GPU warmed up (encode + decode)") # 3. CUDA 동기화 (커널 로딩 완료 대기) print("[GPU Warm-up 4/4] CUDA synchronization...") torch.cuda.synchronize() print("✓ CUDA kernels initialized") # GPU 메모리 정리 torch.cuda.empty_cache() print("\n" + "=" * 60) print("✓ GPU Warm-up completed!") print(" Text Encoder, VAE ready. UNet will compile on first request.") print(" (torch.compile errors will fallback to eager mode)") print("=" * 60 + "\n") return "GPU Warm-up completed successfully!" except Exception as e: print(f"\n⚠ GPU Warm-up failed: {e}") print(" Models will be loaded on first user request.") return f"GPU Warm-up skipped: {e}" # 이미지 전처리 함수 def preprocess_image(image): # HEIC 이미지 처리 if isinstance(image, np.ndarray): image = Image.fromarray(image) # HEIC 이미지를 JPEG로 변환 - 이거 안 먹히는 거 같은데.... try: output = io.BytesIO() image.convert("RGB").save(output, format="JPEG", quality=95) output.seek(0) image = Image.open(output) except Exception as e: print(f"Error converting image: {e}") # 변환 실패 시 원본 이미지 사용 image = image.convert("RGB") # 이미지 크기 가져오기 width, height = image.size # 3:4 비율로 중앙 자르기 target_width = int(min(width, height * (3 / 4))) target_height = int(min(height, width * (4 / 3))) left = (width - target_width) / 2 top = (height - target_height) / 2 right = (width + target_width) / 2 bottom = (height + target_height) / 2 # 이미지 자르기 cropped_img = image.crop((left, top, right, bottom)) # 768x1024로 리사이징 resized_img = cropped_img.resize((768, 1024), resample=Image.Resampling.LANCZOS) return resized_img # URL에서 이미지 가져오기 함수 def load_image_from_url(url): try: response = session.get(url, stream=True, timeout=10) response.raise_for_status() # HTTP 오류 확인 # 이미지 다운로드 img = Image.open(response.raw).convert("RGB") # JPEG로 변환 output = io.BytesIO() img.save(output, format="JPEG", quality=95) output.seek(0) # 변환된 JPEG 이미지 반환 jpeg_img = Image.open(output) return jpeg_img except requests.exceptions.RequestException as e: print(f"Error downloading image from URL: {e}") return None except Exception as e: print(f"Error processing image from URL: {e}") return None def process_url_image(url): """Process image from URL and return PIL Image""" if not url or not url.strip(): return None # URL 유효성 검사 try: result = urlparse(url) if not all([result.scheme, result.netloc]): print("Invalid URL format") return None except Exception as e: print(f"Error parsing URL: {e}") return None img = load_image_from_url(url) if img is None: print("Failed to load image from URL") return None return preprocess_image(img) def load_example_for_editor(image_path): """Load example image for ImageEditor component""" if image_path is None: return None # ImageEditor는 특정 형식을 기대하므로 딕셔너리 형태로 반환 return { "background": image_path, "layers": None, "composite": None } def download_model_file(model_path, urls): """Download model file from multiple URLs if it doesn't exist""" if os.path.exists(model_path): print(f"Model file already exists: {model_path}") return True os.makedirs(os.path.dirname(model_path), exist_ok=True) for url in urls: try: print(f"Downloading from: {url}") response = requests.get(url, stream=True) response.raise_for_status() total_size = int(response.headers.get('content-length', 0)) block_size = 8192 with open(model_path, 'wb') as f: downloaded = 0 for chunk in response.iter_content(chunk_size=block_size): if chunk: f.write(chunk) downloaded += len(chunk) if total_size > 0: percent = (downloaded / total_size) * 100 if(percent % 10 == 0): print(f"\rDownload progress: {percent:.1f}%", end='', flush=True) print(f"\nSuccessfully downloaded: {model_path}") return True except Exception as e: print(f"Failed to download from {url}: {e}") continue print(f"Failed to download model file from all URLs: {model_path}") return False def download_densepose_model(): """Download DensePose model file""" model_path = "ckpt/densepose/model_final_162be9.pkl" urls = [ "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl", "https://github.com/facebookresearch/densepose/releases/download/v1.0/model_final_162be9.pkl" ] return download_model_file(model_path, urls) def download_openpose_model(): """Download OpenPose model file""" model_path = "ckpt/openpose/ckpts/body_pose_model.pth" urls = [ "https://huggingface.co/lllyasviel/Annotators/resolve/main/body_pose_model.pth" ] return download_model_file(model_path, urls) def download_humanparsing_models(): """Download Human Parsing model files""" base_url = "https://huggingface.co/Longcat2957/humanparsing-onnx/resolve/main" models = [ ("ckpt/humanparsing/parsing_atr.onnx", f"{base_url}/parsing_atr.onnx"), ("ckpt/humanparsing/parsing_lip.onnx", f"{base_url}/parsing_lip.onnx") ] success = True for model_path, url in models: if os.path.exists(model_path): print(f"Human parsing model already exists: {model_path}") continue print(f"Downloading {model_path} from {url}") if download_model_file(model_path, [url]): print(f"Successfully downloaded: {model_path}") else: print(f"Failed to download: {model_path}") success = False return success def download_all_models(): """Download all required model files""" print("Checking and downloading required model files...") # Download DensePose model print("\n[1/3] Downloading DensePose model...") densepose_success = download_densepose_model() if densepose_success: print("✓ DensePose model ready") else: print("⚠ DensePose model download failed (will download on demand)") # Download OpenPose model print("\n[2/3] Downloading OpenPose model...") openpose_success = download_openpose_model() if openpose_success: print("✓ OpenPose model ready") else: print("⚠ OpenPose model download failed (will download on demand)") # Download Human Parsing models print("\n[3/3] Downloading Human Parsing models...") parsing_success = download_humanparsing_models() if parsing_success: print("✓ Human Parsing models ready") else: print("⚠ Human Parsing models download failed (will download on demand)") return densepose_success and openpose_success and parsing_success @spaces.GPU def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop, denoise_steps,seed): device = "cuda" openpose_model.preprocessor.body_estimation.model.to(device) pipe.to(device) pipe.unet_encoder.to(device) garm_img= garm_img.convert("RGB").resize((768,1024)) human_img_orig = dict["background"].convert("RGB") if is_checked_crop: width, height = human_img_orig.size target_width = int(min(width, height * (3 / 4))) target_height = int(min(height, width * (4 / 3))) left = (width - target_width) / 2 top = (height - target_height) / 2 right = (width + target_width) / 2 bottom = (height + target_height) / 2 cropped_img = human_img_orig.crop((left, top, right, bottom)) crop_size = cropped_img.size human_img = cropped_img.resize((768,1024)) else: human_img = human_img_orig.resize((768,1024)) if is_checked: keypoints = openpose_model(human_img.resize((384,512))) model_parse, _ = parsing_model(human_img.resize((384,512))) mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints) mask = mask.resize((768,1024)) else: mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024))) # mask = transforms.ToTensor()(mask) # mask = mask.unsqueeze(0) mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img) mask_gray = to_pil_image((mask_gray+1.0)/2.0) human_img_arg = _apply_exif_orientation(human_img.resize((384,512))) human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR") # DensePose 모델 다운로드 및 경로 설정 densepose_model_path = './ckpt/densepose/model_final_162be9.pkl' # 모델 파일이 없으면 다운로드 시도 if not os.path.exists(densepose_model_path): print("DensePose model not found, attempting to download...") download_success = download_densepose_model() if not download_success: print("Failed to download DensePose model") return None, None args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', densepose_model_path, 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda')) # verbosity = getattr(args, "verbosity", None) pose_img = args.func(args,human_img_arg) pose_img = pose_img[:,:,::-1] pose_img = Image.fromarray(pose_img).resize((768,1024)) with torch.no_grad(): # Extract the images with torch.cuda.amp.autocast(): with torch.no_grad(): prompt = "model is wearing " + garment_des negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" with torch.inference_mode(): ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = pipe.encode_prompt( prompt, num_images_per_prompt=1, do_classifier_free_guidance=True, negative_prompt=negative_prompt, ) prompt = "a photo of " + garment_des negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" if not isinstance(prompt, List): prompt = [prompt] * 1 if not isinstance(negative_prompt, List): negative_prompt = [negative_prompt] * 1 with torch.inference_mode(): ( prompt_embeds_c, _, _, _, ) = pipe.encode_prompt( prompt, num_images_per_prompt=1, do_classifier_free_guidance=False, negative_prompt=negative_prompt, ) pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16) garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16) generator = torch.Generator(device).manual_seed(seed) if seed is not None else None images = pipe( prompt_embeds=prompt_embeds.to(device,torch.float16), negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16), pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16), negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,torch.float16), num_inference_steps=denoise_steps, generator=generator, strength = 1.0, pose_img = pose_img.to(device,torch.float16), text_embeds_cloth=prompt_embeds_c.to(device,torch.float16), cloth = garm_tensor.to(device,torch.float16), mask_image=mask, image=human_img, height=1024, width=768, ip_adapter_image = garm_img.resize((768,1024)), guidance_scale=2.0, )[0] if is_checked_crop: out_img = images[0].resize(crop_size) human_img_orig.paste(out_img, (int(left), int(top))) return human_img_orig, mask_gray else: return images[0], mask_gray # return images[0], mask_gray print("\n" + "=" * 60) print("Loading Example Images...") print("=" * 60) garm_list = os.listdir(os.path.join(example_path,"cloth")) garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list] print(f"✓ Found {len(garm_list_path)} garment example images") human_list = os.listdir(os.path.join(example_path,"human")) human_list_path = [os.path.join(example_path,"human",human) for human in human_list] print(f"✓ Found {len(human_list_path)} human example images") # human_ex_list를 단순한 이미지 경로 리스트로 변경 (그리드 표시를 위해) human_ex_list = human_list_path ##default human print("\n" + "=" * 60) print("Creating Gradio Application Interface...") print("=" * 60) image_blocks = gr.Blocks().queue() with image_blocks as demo: print("✓ Gradio Blocks created") gr.Markdown("## DXCO : GENAI-VTON") gr.Markdown("임성남, 윤지영, 조민주 based on IDM-VTON") gr.Markdown("* 맨 처음 추론 시 [5분] 걸림 - compile과 GPU warm-up *") gr.Markdown("권장 이미지 사이즈 - 3:4비율(384x512,768x1024)") with gr.Row(): with gr.Column(): imgs = gr.ImageEditor(sources='upload', type="pil", label='대상 이미지', interactive=True) with gr.Row(): img_url_input = gr.Textbox(label="대상 이미지 URL", placeholder="예) https://example.com/human_image.jpg") with gr.Row(): is_checked = gr.Checkbox(label="Yes", info="자동 마스킹",value=True) with gr.Row(): is_checked_crop = gr.Checkbox(label="Yes", info="자동 크롭 및 리사이징",value=True) example = gr.Examples( inputs=imgs, examples_per_page=10, examples=human_ex_list ) with gr.Column(): garm_img = gr.Image(label="의상 이미지", sources='upload', type="pil") with gr.Row(): garm_url_input = gr.Textbox(label="의상 이미지 URL", placeholder="예) https://example.com/garment.jpg") with gr.Row(elem_id="prompt-container"): with gr.Row(): prompt = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", show_label=False, elem_id="prompt") example = gr.Examples( inputs=garm_img, examples_per_page=8, examples=garm_list_path) with gr.Column(): masked_img = gr.Image(label="Masked image output", elem_id="masked-img", show_share_button=False) with gr.Column(): image_out = gr.Image(label="Output", elem_id="output-img", show_share_button=False) with gr.Column(): try_button = gr.Button(value="Try-on") with gr.Accordion(label="Advanced Settings", open=False): with gr.Row(): denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=40, value=30, step=1) seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42) # is_checked = gr.Number(value=True) # 이미지 업로드 시 전처리 # imgs.upload( # fn=preprocess_image, # inputs=imgs, # outputs=imgs, # 전처리된 이미지를 ImageEditor에 다시 표시 # ) # 대상 이미지: URL 입력 처리 img_url_input.change( fn=lambda url: process_url_image(url), inputs=img_url_input, outputs=imgs, ) # 의상 이미지: URL 입력 처리 garm_url_input.change( fn=lambda url: process_url_image(url), inputs=garm_url_input, outputs=garm_img, ) try_button.click( fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked, is_checked_crop, denoise_steps, seed], outputs=[image_out, masked_img], api_name='tryon' ) # GPU Warm-up 상태 표시용 (숨김) warmup_status = gr.Textbox(visible=False) # 앱 로드 시 GPU Warm-up 자동 실행 (torch.compile 첫 컴파일) if is_compile_for_zeroGPU == True: print("✓ GPU warm-up is disabled for ZeroGPU") else: demo.load( fn=warmup_gpu, inputs=None, outputs=warmup_status, ) print("✓ Gradio interface components created") print("✓ Event handlers configured") print("✓ GPU warm-up scheduled on app load") print("\n" + "=" * 60) print("Gradio Application Interface Created Successfully!") print("=" * 60) # DensePose 모델 다운로드 print("\n" + "=" * 60) print("Checking and Downloading Additional Models...") print("=" * 60) try: download_all_models() print("\n✓ All model files downloaded successfully.") except Exception as e: print(f"\n⚠ Warning: Could not download all model files: {e}") print("The models will be downloaded when needed during inference.") # 앱 실행 print("\n" + "=" * 60) print("Launching Application Server...") print("=" * 60) if __name__ == "__main__": try: print("Starting GENAI-VTON application on http://0.0.0.0:7860") print("Please wait while the server starts...") image_blocks.launch(server_name="0.0.0.0", server_port=7860, share=False) except Exception as e: print(f"\n❌ Error starting the application: {e}") print("Please check if all required dependencies are installed.") import traceback traceback.print_exc()