wop commited on
Commit
cfc7d65
ยท
verified ยท
1 Parent(s): cfa6140

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -65
app.py CHANGED
@@ -1,106 +1,55 @@
1
- # Copyright (c) 2025 ByteDance Ltd. and/or its affiliates
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- Hugging Face Spaces App for Depth Anything 3.
17
-
18
- This app uses the @spaces.GPU decorator to dynamically allocate GPU resources
19
- for model inference on Hugging Face Spaces.
20
- """
21
-
22
  import os
23
- import spaces
24
  from depth_anything_3.app.gradio_app import DepthAnything3App
25
  from depth_anything_3.app.modules.model_inference import ModelInference
26
 
27
- # Apply @spaces.GPU decorator to run_inference method
28
- # This ensures GPU operations happen in isolated subprocess
29
- # Model loading and inference will occur in GPU subprocess, not main process
30
  original_run_inference = ModelInference.run_inference
31
 
32
- @spaces.GPU(duration=120) # Request GPU for up to 120 seconds per inference
33
- def gpu_run_inference(self, *args, **kwargs):
34
- """
35
- GPU-accelerated inference with Spaces decorator.
36
-
37
- This function runs in a GPU subprocess where:
38
- - Model is loaded and moved to GPU (safe)
39
- - CUDA operations are allowed
40
- - All CUDA tensors are moved to CPU before return (for pickle safety)
41
- """
42
  return original_run_inference(self, *args, **kwargs)
43
 
44
- # Replace the original method with the GPU-decorated version
45
- ModelInference.run_inference = gpu_run_inference
46
 
47
- # Initialize and launch the app
48
  if __name__ == "__main__":
49
- # Configure directories for Hugging Face Spaces
50
  model_dir = os.environ.get("DA3_MODEL_DIR", "depth-anything/DA3NESTED-GIANT-LARGE")
51
  workspace_dir = os.environ.get("DA3_WORKSPACE_DIR", "workspace/gradio")
52
  gallery_dir = os.environ.get("DA3_GALLERY_DIR", "workspace/gallery")
53
 
54
- # Create directories if they don't exist
55
  os.makedirs(workspace_dir, exist_ok=True)
56
  os.makedirs(gallery_dir, exist_ok=True)
57
 
58
- # Initialize the app
59
  app = DepthAnything3App(
60
  model_dir=model_dir,
61
  workspace_dir=workspace_dir,
62
  gallery_dir=gallery_dir
63
  )
64
 
65
- # Check if examples directory exists
66
  examples_dir = os.path.join(workspace_dir, "examples")
67
  examples_exist = os.path.exists(examples_dir)
68
 
69
- # Check if caching is enabled via environment variable (default: True if examples exist)
70
- # Allow disabling via environment variable: DA3_CACHE_EXAMPLES=false
71
  cache_examples_env = os.environ.get("DA3_CACHE_EXAMPLES", "").lower()
72
  if cache_examples_env in ("false", "0", "no"):
73
  cache_examples = False
74
  elif cache_examples_env in ("true", "1", "yes"):
75
  cache_examples = True
76
  else:
77
- # Default: enable caching if examples directory exists
78
  cache_examples = examples_exist
79
 
80
- # Get cache_gs_tag from environment variable (default: "dl3dv")
81
  cache_gs_tag = os.environ.get("DA3_CACHE_GS_TAG", "dl3dv")
82
 
83
- # Launch with Spaces-friendly settings
84
- print("๐Ÿš€ Launching Depth Anything 3 on Hugging Face Spaces...")
85
  print(f"๐Ÿ“ฆ Model Directory: {model_dir}")
86
  print(f"๐Ÿ“ Workspace Directory: {workspace_dir}")
87
  print(f"๐Ÿ–ผ๏ธ Gallery Directory: {gallery_dir}")
88
  print(f"๐Ÿ’พ Cache Examples: {cache_examples}")
89
- if cache_examples:
90
- if cache_gs_tag:
91
- print(f"๐Ÿท๏ธ Cache GS Tag: '{cache_gs_tag}' (scenes matching this tag will use high-res + 3DGS)")
92
- else:
93
- print("๐Ÿท๏ธ Cache GS Tag: None (all scenes will use low-res only)")
94
 
95
- # Pre-cache examples if requested
96
  if cache_examples:
97
  print("\n" + "=" * 60)
98
- print("Pre-caching mode enabled")
99
- if cache_gs_tag:
100
- print(f"Scenes containing '{cache_gs_tag}' will use HIGH-RES + 3DGS")
101
- print("Other scenes will use LOW-RES only")
102
- else:
103
- print("All scenes will use LOW-RES only")
104
  print("=" * 60)
105
  app.cache_examples(
106
  show_cam=True,
@@ -113,10 +62,8 @@ if __name__ == "__main__":
113
  gs_video_quality="low",
114
  )
115
 
116
- # Launch with minimal, Spaces-compatible configuration
117
- # Some parameters may cause routing issues, so we use minimal config
118
  app.launch(
119
- host="0.0.0.0", # Required for Spaces
120
- port=7860, # Standard Gradio port
121
- share=False # Not needed on Spaces
122
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
 
2
  from depth_anything_3.app.gradio_app import DepthAnything3App
3
  from depth_anything_3.app.modules.model_inference import ModelInference
4
 
5
+ # Remove GPU decorator; run normally on CPU
 
 
6
  original_run_inference = ModelInference.run_inference
7
 
8
+ def cpu_run_inference(self, *args, **kwargs):
9
+ """CPU-only inference"""
 
 
 
 
 
 
 
 
10
  return original_run_inference(self, *args, **kwargs)
11
 
12
+ # Replace original method
13
+ ModelInference.run_inference = cpu_run_inference
14
 
 
15
  if __name__ == "__main__":
 
16
  model_dir = os.environ.get("DA3_MODEL_DIR", "depth-anything/DA3NESTED-GIANT-LARGE")
17
  workspace_dir = os.environ.get("DA3_WORKSPACE_DIR", "workspace/gradio")
18
  gallery_dir = os.environ.get("DA3_GALLERY_DIR", "workspace/gallery")
19
 
 
20
  os.makedirs(workspace_dir, exist_ok=True)
21
  os.makedirs(gallery_dir, exist_ok=True)
22
 
 
23
  app = DepthAnything3App(
24
  model_dir=model_dir,
25
  workspace_dir=workspace_dir,
26
  gallery_dir=gallery_dir
27
  )
28
 
 
29
  examples_dir = os.path.join(workspace_dir, "examples")
30
  examples_exist = os.path.exists(examples_dir)
31
 
 
 
32
  cache_examples_env = os.environ.get("DA3_CACHE_EXAMPLES", "").lower()
33
  if cache_examples_env in ("false", "0", "no"):
34
  cache_examples = False
35
  elif cache_examples_env in ("true", "1", "yes"):
36
  cache_examples = True
37
  else:
 
38
  cache_examples = examples_exist
39
 
 
40
  cache_gs_tag = os.environ.get("DA3_CACHE_GS_TAG", "dl3dv")
41
 
42
+ print("๐Ÿš€ Launching Depth Anything 3 (CPU-only) on Hugging Face Spaces...")
 
43
  print(f"๐Ÿ“ฆ Model Directory: {model_dir}")
44
  print(f"๐Ÿ“ Workspace Directory: {workspace_dir}")
45
  print(f"๐Ÿ–ผ๏ธ Gallery Directory: {gallery_dir}")
46
  print(f"๐Ÿ’พ Cache Examples: {cache_examples}")
47
+ if cache_examples and cache_gs_tag:
48
+ print(f"๐Ÿท๏ธ Cache GS Tag: '{cache_gs_tag}'")
 
 
 
49
 
 
50
  if cache_examples:
51
  print("\n" + "=" * 60)
52
+ print("Pre-caching mode enabled (CPU)")
 
 
 
 
 
53
  print("=" * 60)
54
  app.cache_examples(
55
  show_cam=True,
 
62
  gs_video_quality="low",
63
  )
64
 
 
 
65
  app.launch(
66
+ host="0.0.0.0",
67
+ port=7860,
68
+ share=False
69
  )