nami0342 commited on
Commit
6614b04
Β·
1 Parent(s): 5be36b8

Fix model download logic and Add it parsing class

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. preprocess/humanparsing/run_parsing.py +52 -2
app.py CHANGED
@@ -537,7 +537,7 @@ with image_blocks as demo:
537
  api_name='tryon'
538
  )
539
 
540
- # μ•± μ‹œμž‘ μ‹œ DensePose λͺ¨λΈ 미리 λ‹€μš΄λ‘œλ“œ
541
  print("Initializing DensePose model...")
542
  try:
543
  download_all_models()
 
537
  api_name='tryon'
538
  )
539
 
540
+ # DensePose λͺ¨λΈ λ‹€μš΄λ‘œλ“œ
541
  print("Initializing DensePose model...")
542
  try:
543
  download_all_models()
preprocess/humanparsing/run_parsing.py CHANGED
@@ -3,23 +3,73 @@ from pathlib import Path
3
  import sys
4
  import os
5
  import onnxruntime as ort
 
6
  PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
7
  sys.path.insert(0, str(PROJECT_ROOT))
8
  from parsing_api import onnx_inference
9
  import torch
10
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  class Parsing:
13
  def __init__(self, gpu_id: int):
14
  # self.gpu_id = gpu_id
15
  # torch.cuda.set_device(gpu_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  session_options = ort.SessionOptions()
17
  session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
18
  session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
19
  # session_options.add_session_config_entry('gpu_id', str(gpu_id))
20
- self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_atr.onnx'),
21
  sess_options=session_options, providers=['CPUExecutionProvider'])
22
- self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_lip.onnx'),
23
  sess_options=session_options, providers=['CPUExecutionProvider'])
24
 
25
 
 
3
  import sys
4
  import os
5
  import onnxruntime as ort
6
+ import requests
7
  PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
8
  sys.path.insert(0, str(PROJECT_ROOT))
9
  from parsing_api import onnx_inference
10
  import torch
11
 
12
 
13
+ def download_model_file(model_path, url):
14
+ """Download model file if it doesn't exist"""
15
+ if os.path.exists(model_path):
16
+ print(f"Model file already exists: {model_path}")
17
+ return True
18
+
19
+ os.makedirs(os.path.dirname(model_path), exist_ok=True)
20
+
21
+ try:
22
+ print(f"Downloading {model_path} from {url}")
23
+ response = requests.get(url, stream=True)
24
+ response.raise_for_status()
25
+
26
+ total_size = int(response.headers.get('content-length', 0))
27
+ block_size = 8192
28
+
29
+ with open(model_path, 'wb') as f:
30
+ downloaded = 0
31
+ for chunk in response.iter_content(chunk_size=block_size):
32
+ if chunk:
33
+ f.write(chunk)
34
+ downloaded += len(chunk)
35
+ if total_size > 0:
36
+ percent = (downloaded / total_size) * 100
37
+ print(f"\rDownload progress: {percent:.1f}%", end='', flush=True)
38
+
39
+ print(f"\nSuccessfully downloaded: {model_path}")
40
+ return True
41
+
42
+ except Exception as e:
43
+ print(f"Failed to download {model_path}: {e}")
44
+ return False
45
+
46
+
47
  class Parsing:
48
  def __init__(self, gpu_id: int):
49
  # self.gpu_id = gpu_id
50
  # torch.cuda.set_device(gpu_id)
51
+
52
+ # Model paths
53
+ base_path = Path(__file__).absolute().parents[2].absolute()
54
+ parsing_atr_path = os.path.join(base_path, 'ckpt/humanparsing/parsing_atr.onnx')
55
+ parsing_lip_path = os.path.join(base_path, 'ckpt/humanparsing/parsing_lip.onnx')
56
+
57
+ # Download models if they don't exist
58
+ base_url = "https://huggingface.co/Longcat2957/humanparsing-onnx/resolve/main"
59
+
60
+ if not os.path.exists(parsing_atr_path):
61
+ download_model_file(parsing_atr_path, f"{base_url}/parsing_atr.onnx")
62
+
63
+ if not os.path.exists(parsing_lip_path):
64
+ download_model_file(parsing_lip_path, f"{base_url}/parsing_lip.onnx")
65
+
66
  session_options = ort.SessionOptions()
67
  session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
68
  session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
69
  # session_options.add_session_config_entry('gpu_id', str(gpu_id))
70
+ self.session = ort.InferenceSession(parsing_atr_path,
71
  sess_options=session_options, providers=['CPUExecutionProvider'])
72
+ self.lip_session = ort.InferenceSession(parsing_lip_path,
73
  sess_options=session_options, providers=['CPUExecutionProvider'])
74
 
75