Files changed (9) hide show
  1. .gitignore +0 -207
  2. README.md +1 -1
  3. app.py +91 -226
  4. constants.py +33 -54
  5. image_processor.py +2 -2
  6. packages.txt +1 -1
  7. pre-requirements.txt +0 -1
  8. requirements.txt +3 -11
  9. utils.py +485 -714
.gitignore DELETED
@@ -1,207 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[codz]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py.cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # UV
98
- # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- #uv.lock
102
-
103
- # poetry
104
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
- # This is especially recommended for binary packages to ensure reproducibility, and is more
106
- # commonly ignored for libraries.
107
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
- #poetry.lock
109
- #poetry.toml
110
-
111
- # pdm
112
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
- # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
- # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
- #pdm.lock
116
- #pdm.toml
117
- .pdm-python
118
- .pdm-build/
119
-
120
- # pixi
121
- # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
- #pixi.lock
123
- # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
- # in the .venv directory. It is recommended not to include this directory in version control.
125
- .pixi
126
-
127
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
- __pypackages__/
129
-
130
- # Celery stuff
131
- celerybeat-schedule
132
- celerybeat.pid
133
-
134
- # SageMath parsed files
135
- *.sage.py
136
-
137
- # Environments
138
- .env
139
- .envrc
140
- .venv
141
- env/
142
- venv/
143
- ENV/
144
- env.bak/
145
- venv.bak/
146
-
147
- # Spyder project settings
148
- .spyderproject
149
- .spyproject
150
-
151
- # Rope project settings
152
- .ropeproject
153
-
154
- # mkdocs documentation
155
- /site
156
-
157
- # mypy
158
- .mypy_cache/
159
- .dmypy.json
160
- dmypy.json
161
-
162
- # Pyre type checker
163
- .pyre/
164
-
165
- # pytype static type analyzer
166
- .pytype/
167
-
168
- # Cython debug symbols
169
- cython_debug/
170
-
171
- # PyCharm
172
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
- # and can be added to the global gitignore or merged into this file. For a more nuclear
175
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
- #.idea/
177
-
178
- # Abstra
179
- # Abstra is an AI-powered process automation framework.
180
- # Ignore directories containing user credentials, local state, and settings.
181
- # Learn more at https://abstra.io/docs
182
- .abstra/
183
-
184
- # Visual Studio Code
185
- # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
- # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
- # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
- # you could uncomment the following to ignore the entire vscode folder
189
- # .vscode/
190
-
191
- # Ruff stuff:
192
- .ruff_cache/
193
-
194
- # PyPI configuration file
195
- .pypirc
196
-
197
- # Cursor
198
- # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
- # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
- # refer to https://docs.cursor.com/context/ignore-files
201
- .cursorignore
202
- .cursorindexingignore
203
-
204
- # Marimo
205
- marimo/_static/
206
- marimo/_lsp/
207
- __marimo__/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.44.1
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import spaces
2
  import os
3
- from argparse import ArgumentParser
4
  from stablepy import (
5
  Model_Diffusers,
6
  SCHEDULE_TYPE_OPTIONS,
@@ -41,8 +40,6 @@ from constants import (
41
  DIFFUSERS_CONTROLNET_MODEL,
42
  IP_MODELS,
43
  MODE_IP_OPTIONS,
44
- CACHE_HF_ROOT,
45
- CACHE_HF,
46
  )
47
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
48
  import torch
@@ -63,7 +60,6 @@ from utils import (
63
  progress_step_bar,
64
  html_template_message,
65
  escape_html,
66
- clear_hf_cache,
67
  )
68
  from image_processor import preprocessor_tab
69
  from datetime import datetime
@@ -76,18 +72,11 @@ from diffusers import FluxPipeline
76
  # import urllib.parse
77
  import subprocess
78
 
79
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
80
- HIDE_API = bool(os.getenv("HIDE_API"))
81
- if IS_ZERO_GPU:
82
- subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
83
- IS_GPU_MODE = True if IS_ZERO_GPU else (True if torch.cuda.is_available() else False)
84
- img_path = "./images/"
85
- allowed_path = os.path.abspath(img_path)
86
- delete_cache_time = (9600, 9600) if IS_ZERO_GPU else (86400, 86400)
87
-
88
  ImageFile.LOAD_TRUNCATED_IMAGES = True
89
  torch.backends.cuda.matmul.allow_tf32 = True
90
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
 
91
 
92
  directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
93
  for directory in directories:
@@ -95,15 +84,19 @@ for directory in directories:
95
 
96
  # Download stuffs
97
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
98
- download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
 
99
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
100
- download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
 
101
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
102
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
 
103
 
104
  # Download Embeddings
105
  for url_embed in DOWNLOAD_EMBEDS:
106
- download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
 
107
 
108
  # Build list models
109
  embed_list = get_model_list(DIRECTORY_EMBEDS)
@@ -121,16 +114,15 @@ vae_model_list.insert(0, "None")
121
 
122
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
123
 
124
- components = None
125
- if IS_ZERO_GPU:
126
- flux_repo = "camenduru/FLUX.1-dev-diffusers"
127
- flux_pipe = FluxPipeline.from_pretrained(
128
- flux_repo,
129
- transformer=None,
130
- torch_dtype=torch.bfloat16,
131
- ).to("cuda")
132
- components = flux_pipe.components
133
- delete_model(flux_repo)
134
 
135
  #######################
136
  # GUI
@@ -140,17 +132,7 @@ diffusers.utils.logging.set_verbosity(40)
140
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
141
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
142
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
143
-
144
- parser = ArgumentParser(description='DiffuseCraft: Create images from text prompts.', add_help=True)
145
- parser.add_argument("--share", action="store_true", dest="share_enabled", default=False, help="Enable sharing")
146
- parser.add_argument('--theme', type=str, default="NoCrypt/miku", help='Set the theme (default: NoCrypt/miku)')
147
- parser.add_argument("--ssr", action="store_true", help="Enable SSR (Server-Side Rendering)")
148
- parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Set logging level (default: INFO)")
149
- args = parser.parse_args()
150
-
151
- logger.setLevel(
152
- "INFO" if IS_ZERO_GPU else getattr(logging, args.log_level.upper())
153
- )
154
 
155
  CSS = """
156
  .contain { display: flex; flex-direction: column; }
@@ -160,12 +142,6 @@ CSS = """
160
  """
161
 
162
 
163
- def lora_chk(lora_):
164
- if isinstance(lora_, str) and lora_.strip() not in ["", "None"]:
165
- return lora_
166
- return None
167
-
168
-
169
  class GuiSD:
170
  def __init__(self, stream=True):
171
  self.model = None
@@ -174,22 +150,13 @@ class GuiSD:
174
  self.last_load = datetime.now()
175
  self.inventory = []
176
 
177
- def update_storage_models(self, storage_floor_gb=30, required_inventory_for_purge=3):
178
  while get_used_storage_gb() > storage_floor_gb:
179
  if len(self.inventory) < required_inventory_for_purge:
180
  break
181
  removal_candidate = self.inventory.pop(0)
182
  delete_model(removal_candidate)
183
 
184
- # Cleanup after 60 seconds of inactivity
185
- lowPrioCleanup = max((datetime.now() - self.last_load).total_seconds(), 0) > 60
186
- if lowPrioCleanup and (len(self.inventory) >= required_inventory_for_purge - 1) and not self.status_loading and get_used_storage_gb(CACHE_HF_ROOT) > (storage_floor_gb * 2):
187
- print("Cleaning up Hugging Face cache...")
188
- clear_hf_cache()
189
- self.inventory = [
190
- m for m in self.inventory if os.path.exists(m)
191
- ]
192
-
193
  def update_inventory(self, model_name):
194
  if model_name not in single_file_model_list:
195
  self.inventory = [
@@ -200,21 +167,14 @@ class GuiSD:
200
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
201
 
202
  # download link model > model_name
203
- if model_name.startswith("http"):
204
- yield f"Downloading model: {model_name}"
205
- model_name = download_things(DIRECTORY_MODELS, model_name, HF_TOKEN, CIVITAI_API_KEY)
206
- if not model_name:
207
- raise ValueError("Error retrieving model information from URL")
208
 
209
- if IS_ZERO_GPU:
210
- self.update_storage_models()
211
 
212
  vae_model = vae_model if vae_model != "None" else None
213
  model_type = get_model_type(model_name)
214
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
215
 
216
  if not os.path.exists(model_name):
217
- logger.debug(f"model_name={model_name}, vae_model={vae_model}, task={task}, controlnet_model={controlnet_model}")
218
  _ = download_diffuser_repo(
219
  repo_name=model_name,
220
  model_type=model_type,
@@ -259,10 +219,10 @@ class GuiSD:
259
  type_model_precision=dtype_model,
260
  retain_task_model_in_cache=False,
261
  controlnet_model=controlnet_model,
262
- device="cpu" if IS_ZERO_GPU else None,
263
  env_components=components,
264
  )
265
- self.model.advanced_params(image_preprocessor_cuda_active=IS_GPU_MODE)
266
  else:
267
  if self.model.base_model_id != model_name:
268
  load_now_time = datetime.now()
@@ -272,8 +232,7 @@ class GuiSD:
272
  print("Waiting for the previous model's time ops...")
273
  time.sleep(9 - elapsed_time)
274
 
275
- if IS_ZERO_GPU:
276
- self.model.device = torch.device("cpu")
277
  self.model.load_pipe(
278
  model_name,
279
  task_name=TASK_STABLEPY[task],
@@ -427,7 +386,7 @@ class GuiSD:
427
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
428
  msg_lora = ""
429
 
430
- logger.debug(f"Config model: {model_name}, {vae_model}, {loras_list}")
431
 
432
  task = TASK_STABLEPY[task]
433
 
@@ -525,19 +484,19 @@ class GuiSD:
525
  "distance_threshold": distance_threshold,
526
  "recolor_gamma_correction": float(recolor_gamma_correction),
527
  "tile_blur_sigma": int(tile_blur_sigma),
528
- "lora_A": lora_chk(lora1),
529
  "lora_scale_A": lora_scale1,
530
- "lora_B": lora_chk(lora2),
531
  "lora_scale_B": lora_scale2,
532
- "lora_C": lora_chk(lora3),
533
  "lora_scale_C": lora_scale3,
534
- "lora_D": lora_chk(lora4),
535
  "lora_scale_D": lora_scale4,
536
- "lora_E": lora_chk(lora5),
537
  "lora_scale_E": lora_scale5,
538
- "lora_F": lora_chk(lora6),
539
  "lora_scale_F": lora_scale6,
540
- "lora_G": lora_chk(lora7),
541
  "lora_scale_G": lora_scale7,
542
  "textual_inversion": embed_list if textual_inversion else [],
543
  "syntax_weights": syntax_weights, # "Classic"
@@ -595,11 +554,11 @@ class GuiSD:
595
  # kwargs for diffusers pipeline
596
  if guidance_rescale:
597
  pipe_params["guidance_rescale"] = guidance_rescale
598
- if IS_ZERO_GPU:
599
- self.model.device = torch.device("cuda:0")
600
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
601
- self.model.pipe.transformer.to(self.model.device)
602
- logger.debug("transformer to cuda")
603
 
604
  actual_progress = 0
605
  info_images = gr.update()
@@ -629,7 +588,7 @@ class GuiSD:
629
 
630
  download_links = "<br>".join(
631
  [
632
- f'<a href="{path.replace("/images/", f"/gradio_api/file={allowed_path}/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
633
  for i, path in enumerate(image_path)
634
  ]
635
  )
@@ -738,8 +697,7 @@ def sd_gen_generate_pipeline(*args):
738
 
739
  @spaces.GPU(duration=15)
740
  def process_upscale(image, upscaler_name, upscaler_size):
741
- if image is None:
742
- return None
743
 
744
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
745
  from stablepy import load_upscaler_model
@@ -756,7 +714,7 @@ def process_upscale(image, upscaler_name, upscaler_size):
756
 
757
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
758
 
759
- scaler_beta = load_upscaler_model(model=name_upscaler, tile=(0 if IS_ZERO_GPU else 192), tile_overlap=8, device=("cuda" if IS_GPU_MODE else "cpu"), half=IS_GPU_MODE)
760
  image_up = scaler_beta.upscale(image, upscaler_size, True)
761
 
762
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
@@ -765,11 +723,11 @@ def process_upscale(image, upscaler_name, upscaler_size):
765
 
766
 
767
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
768
- # dynamic_gpu_duration.zerogpu = True
769
- # sd_gen_generate_pipeline.zerogpu = True
770
  sd_gen = GuiSD()
771
 
772
- with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as app:
773
  gr.Markdown("# 🧩 DiffuseCraft")
774
  gr.Markdown(SUBTITLE_GUI)
775
  with gr.Tab("Generation"):
@@ -818,7 +776,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
818
 
819
  actual_task_info = gr.HTML()
820
 
821
- with gr.Row(equal_height=False, variant="default", visible=IS_ZERO_GPU):
822
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
823
  with gr.Column():
824
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
@@ -854,22 +812,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
854
  "Schedule type": gr.update(value="Automatic"),
855
  "PAG": gr.update(value=.0),
856
  "FreeU": gr.update(value=False),
857
- "Hires upscaler": gr.update(),
858
- "Hires upscale": gr.update(),
859
- "Hires steps": gr.update(),
860
- "Hires denoising strength": gr.update(),
861
- "Hires CFG": gr.update(),
862
- "Hires sampler": gr.update(),
863
- "Hires schedule type": gr.update(),
864
- "Image resolution": gr.update(value=1024),
865
- "Strength": gr.update(),
866
  }
867
-
868
- # Generate up to 7 LoRAs
869
- for i in range(1, 8):
870
- valid_receptors[f"Lora_{i}"] = gr.update()
871
- valid_receptors[f"Lora_scale_{i}"] = gr.update()
872
-
873
  valid_keys = list(valid_receptors.keys())
874
 
875
  parameters = extract_parameters(base_prompt)
@@ -883,36 +826,6 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
883
  parameters["Sampler"] = value_sampler
884
  parameters["Schedule type"] = s_type
885
 
886
- params_lora = []
887
- if ">" in parameters["prompt"] and "<" in parameters["prompt"]:
888
- params_lora = re.findall(r'<lora:[^>]+>', parameters["prompt"])
889
- if "Loras" in parameters:
890
- params_lora += re.findall(r'<lora:[^>]+>', parameters["Loras"])
891
-
892
- if params_lora:
893
- parsed_params = []
894
- for tag_l in params_lora:
895
- try:
896
- inner = tag_l.strip("<>") # remove < >
897
- _, data_l = inner.split(":", 1) # remove the "lora:" part
898
- parts_l = data_l.split(":")
899
-
900
- name_l = parts_l[0]
901
- weight_l = float(parts_l[1]) if len(parts_l) > 1 else 1.0 # default weight = 1.0
902
-
903
- parsed_params.append((name_l, weight_l))
904
- except Exception as e:
905
- print(f"Error parsing LoRA tag {tag_l}: {e}")
906
-
907
- num_lora = 1
908
- for parsed_l, parsed_s in parsed_params:
909
- filtered_loras = [m for m in lora_model_list if parsed_l in m]
910
- if filtered_loras:
911
- parameters[f"Lora_{num_lora}"] = filtered_loras[0]
912
- parameters[f"Lora_scale_{num_lora}"] = parsed_s
913
- num_lora += 1
914
-
915
- # continue = discard new value
916
  for key, val in parameters.items():
917
  # print(val)
918
  if key in valid_keys:
@@ -920,12 +833,9 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
920
  if key == "Sampler":
921
  if val not in scheduler_names:
922
  continue
923
- if key in ["Schedule type", "Hires schedule type"]:
924
  if val not in SCHEDULE_TYPE_OPTIONS:
925
- continue
926
- if key == "Hires sampler":
927
- if val not in POST_PROCESSING_SAMPLER:
928
- continue
929
  elif key == "Clip skip":
930
  if "," in str(val):
931
  val = val.replace(",", "")
@@ -933,15 +843,15 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
933
  val = True
934
  if key == "prompt":
935
  if ">" in val and "<" in val:
936
- val = re.sub(r'<[^>]+>', '', val) # Delete html and loras
937
  print("Removed LoRA written in the prompt")
938
  if key in ["prompt", "neg_prompt"]:
939
  val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
940
- if key in ["Steps", "width", "height", "Seed", "Hires steps", "Image resolution"]:
941
  val = int(val)
942
  if key == "FreeU":
943
  val = True
944
- if key in ["CFG scale", "PAG", "Hires upscale", "Hires denoising strength", "Hires CFG", "Strength"]:
945
  val = float(val)
946
  if key == "Model":
947
  filtered_models = [m for m in model_list if val in m]
@@ -949,12 +859,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
949
  val = filtered_models[0]
950
  else:
951
  val = name_model
952
- if key == "Hires upscaler":
953
- if val not in UPSCALER_KEYS:
954
- continue
955
  if key == "Seed":
956
  continue
957
-
958
  valid_receptors[key] = gr.update(value=val)
959
  # print(val, type(val))
960
  # print(valid_receptors)
@@ -962,6 +868,24 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
962
  print(str(e))
963
  return [value for value in valid_receptors.values()]
964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
965
  def run_clear_prompt_gui():
966
  return gr.update(value=""), gr.update(value="")
967
  clear_prompt_gui.click(
@@ -974,15 +898,15 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
974
  run_set_random_seed, [], seed_gui
975
  )
976
 
977
- num_images_gui = gr.Slider(minimum=1, maximum=(8 if IS_ZERO_GPU else 20), step=1, value=1, label="Images")
978
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[0][1])
979
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
980
 
981
  with gr.Accordion("Hires fix", open=False, visible=True):
982
 
983
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
984
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
985
- upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=(0 if IS_ZERO_GPU else 192), label="Upscaler Tile Size", info="0 = no tiling")
986
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
987
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
988
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
@@ -999,8 +923,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
999
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
1000
 
1001
  def lora_scale_slider(label, visible=True):
1002
- val_lora = 8 if IS_ZERO_GPU else 10
1003
- return gr.Slider(minimum=-val_lora, maximum=val_lora, step=0.01, value=0.33, label=label, visible=visible)
1004
 
1005
  lora1_gui = lora_dropdown("Lora1")
1006
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
@@ -1012,10 +935,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1012
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
1013
  lora5_gui = lora_dropdown("Lora5")
1014
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
1015
- lora6_gui = lora_dropdown("Lora6", visible=(not IS_ZERO_GPU))
1016
- lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=(not IS_ZERO_GPU))
1017
- lora7_gui = lora_dropdown("Lora7", visible=(not IS_ZERO_GPU))
1018
- lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=(not IS_ZERO_GPU))
1019
 
1020
  with gr.Accordion("From URL", open=False, visible=True):
1021
  text_lora = gr.Textbox(
@@ -1024,7 +947,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1024
  lines=1,
1025
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
1026
  )
1027
- romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=(not IS_ZERO_GPU))
1028
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
1029
  new_lora_status = gr.HTML()
1030
  button_lora.click(
@@ -1089,8 +1012,8 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1089
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1090
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1091
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1092
- value_threshold_gui = gr.Slider(minimum=0.0, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1093
- distance_threshold_gui = gr.Slider(minimum=0.0, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1094
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1095
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1096
 
@@ -1125,7 +1048,7 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1125
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1126
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1127
 
1128
- style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1129
 
1130
  with gr.Accordion("Textual inversion", open=False, visible=False):
1131
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
@@ -1175,62 +1098,20 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1175
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1176
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1177
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1178
- with gr.Column(visible=(not IS_ZERO_GPU)):
1179
- image_storage_location_gui = gr.Textbox(value=img_path, label="Image Storage Location")
1180
- disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1181
- leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1182
 
1183
  with gr.Accordion("More settings", open=False, visible=False):
1184
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1185
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
 
 
1186
  display_images_gui = gr.Checkbox(value=False, label="Display Images")
1187
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
 
1188
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1189
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1190
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1191
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1192
 
1193
- set_params_gui.click(
1194
- run_set_params_gui, [prompt_gui, model_name_gui], [
1195
- prompt_gui,
1196
- neg_prompt_gui,
1197
- steps_gui,
1198
- img_width_gui,
1199
- img_height_gui,
1200
- seed_gui,
1201
- sampler_gui,
1202
- cfg_gui,
1203
- clip_skip_gui,
1204
- model_name_gui,
1205
- schedule_type_gui,
1206
- pag_scale_gui,
1207
- free_u_gui,
1208
- upscaler_model_path_gui,
1209
- upscaler_increases_size_gui,
1210
- hires_steps_gui,
1211
- hires_denoising_strength_gui,
1212
- hires_guidance_scale_gui,
1213
- hires_sampler_gui,
1214
- hires_schedule_type_gui,
1215
- image_resolution_gui,
1216
- strength_gui,
1217
- lora1_gui,
1218
- lora_scale_1_gui,
1219
- lora2_gui,
1220
- lora_scale_2_gui,
1221
- lora3_gui,
1222
- lora_scale_3_gui,
1223
- lora4_gui,
1224
- lora_scale_4_gui,
1225
- lora5_gui,
1226
- lora_scale_5_gui,
1227
- lora6_gui,
1228
- lora_scale_6_gui,
1229
- lora7_gui,
1230
- lora_scale_7_gui,
1231
- ],
1232
- )
1233
-
1234
  with gr.Accordion("Examples and help", open=False, visible=True):
1235
  gr.Markdown(HELP_GUI)
1236
  gr.Markdown(EXAMPLES_GUI_HELP)
@@ -1286,21 +1167,10 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1286
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1287
  ]
1288
  ),
1289
- eraser=gr.Eraser(default_size="16"),
1290
- render=True,
1291
- visible=False,
1292
- interactive=False,
1293
  )
1294
-
1295
- show_canvas = gr.Button("SHOW INPAINT CANVAS")
1296
-
1297
- def change_visibility_canvas():
1298
- return gr.update(visible=True, interactive=True), gr.update(visible=False)
1299
- show_canvas.click(change_visibility_canvas, [], [image_base, show_canvas])
1300
-
1301
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1302
  btn = gr.Button("Create mask")
1303
-
1304
  with gr.Column(scale=1):
1305
  img_source = gr.Image(interactive=False)
1306
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
@@ -1362,7 +1232,6 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1362
  outputs=[load_model_gui],
1363
  queue=True,
1364
  show_progress="minimal",
1365
- api_name=(False if HIDE_API else None),
1366
  ).success(
1367
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1368
  inputs=[
@@ -1491,16 +1360,12 @@ with gr.Blocks(theme=args.theme, css=CSS, fill_width=True, fill_height=False) as
1491
  outputs=[load_model_gui, result_images, actual_task_info],
1492
  queue=True,
1493
  show_progress="minimal",
1494
- # api_name=(False if HIDE_API else None),
1495
  )
1496
 
1497
- if __name__ == "__main__":
1498
- app.queue()
1499
- app.launch(
1500
- show_error=True,
1501
- share=args.share_enabled,
1502
- debug=True,
1503
- ssr_mode=args.ssr,
1504
- allowed_paths=[allowed_path],
1505
- show_api=(not HIDE_API),
1506
- )
 
1
  import spaces
2
  import os
 
3
  from stablepy import (
4
  Model_Diffusers,
5
  SCHEDULE_TYPE_OPTIONS,
 
40
  DIFFUSERS_CONTROLNET_MODEL,
41
  IP_MODELS,
42
  MODE_IP_OPTIONS,
 
 
43
  )
44
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
45
  import torch
 
60
  progress_step_bar,
61
  html_template_message,
62
  escape_html,
 
63
  )
64
  from image_processor import preprocessor_tab
65
  from datetime import datetime
 
72
  # import urllib.parse
73
  import subprocess
74
 
75
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
 
 
 
 
 
 
 
 
76
  ImageFile.LOAD_TRUNCATED_IMAGES = True
77
  torch.backends.cuda.matmul.allow_tf32 = True
78
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
79
+ print(os.getenv("SPACES_ZERO_GPU"))
80
 
81
  directories = [DIRECTORY_MODELS, DIRECTORY_LORAS, DIRECTORY_VAES, DIRECTORY_EMBEDS, DIRECTORY_UPSCALERS]
82
  for directory in directories:
 
84
 
85
  # Download stuffs
86
  for url in [url.strip() for url in DOWNLOAD_MODEL.split(',')]:
87
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
88
+ download_things(DIRECTORY_MODELS, url, HF_TOKEN, CIVITAI_API_KEY)
89
  for url in [url.strip() for url in DOWNLOAD_VAE.split(',')]:
90
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
91
+ download_things(DIRECTORY_VAES, url, HF_TOKEN, CIVITAI_API_KEY)
92
  for url in [url.strip() for url in DOWNLOAD_LORA.split(',')]:
93
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
94
+ download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
95
 
96
  # Download Embeddings
97
  for url_embed in DOWNLOAD_EMBEDS:
98
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
99
+ download_things(DIRECTORY_EMBEDS, url_embed, HF_TOKEN, CIVITAI_API_KEY)
100
 
101
  # Build list models
102
  embed_list = get_model_list(DIRECTORY_EMBEDS)
 
114
 
115
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
116
 
117
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
118
+ flux_pipe = FluxPipeline.from_pretrained(
119
+ flux_repo,
120
+ transformer=None,
121
+ torch_dtype=torch.bfloat16,
122
+ ).to("cuda")
123
+ components = flux_pipe.components
124
+ delete_model(flux_repo)
125
+ # components = None
 
126
 
127
  #######################
128
  # GUI
 
132
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
133
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
134
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
135
+ logger.setLevel(logging.DEBUG)
 
 
 
 
 
 
 
 
 
 
136
 
137
  CSS = """
138
  .contain { display: flex; flex-direction: column; }
 
142
  """
143
 
144
 
 
 
 
 
 
 
145
  class GuiSD:
146
  def __init__(self, stream=True):
147
  self.model = None
 
150
  self.last_load = datetime.now()
151
  self.inventory = []
152
 
153
+ def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
154
  while get_used_storage_gb() > storage_floor_gb:
155
  if len(self.inventory) < required_inventory_for_purge:
156
  break
157
  removal_candidate = self.inventory.pop(0)
158
  delete_model(removal_candidate)
159
 
 
 
 
 
 
 
 
 
 
160
  def update_inventory(self, model_name):
161
  if model_name not in single_file_model_list:
162
  self.inventory = [
 
167
  def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
168
 
169
  # download link model > model_name
 
 
 
 
 
170
 
171
+ self.update_storage_models()
 
172
 
173
  vae_model = vae_model if vae_model != "None" else None
174
  model_type = get_model_type(model_name)
175
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
176
 
177
  if not os.path.exists(model_name):
 
178
  _ = download_diffuser_repo(
179
  repo_name=model_name,
180
  model_type=model_type,
 
219
  type_model_precision=dtype_model,
220
  retain_task_model_in_cache=False,
221
  controlnet_model=controlnet_model,
222
+ device="cpu",
223
  env_components=components,
224
  )
225
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
226
  else:
227
  if self.model.base_model_id != model_name:
228
  load_now_time = datetime.now()
 
232
  print("Waiting for the previous model's time ops...")
233
  time.sleep(9 - elapsed_time)
234
 
235
+ self.model.device = torch.device("cpu")
 
236
  self.model.load_pipe(
237
  model_name,
238
  task_name=TASK_STABLEPY[task],
 
386
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
387
  msg_lora = ""
388
 
389
+ print("Config model:", model_name, vae_model, loras_list)
390
 
391
  task = TASK_STABLEPY[task]
392
 
 
484
  "distance_threshold": distance_threshold,
485
  "recolor_gamma_correction": float(recolor_gamma_correction),
486
  "tile_blur_sigma": int(tile_blur_sigma),
487
+ "lora_A": lora1 if lora1 != "None" else None,
488
  "lora_scale_A": lora_scale1,
489
+ "lora_B": lora2 if lora2 != "None" else None,
490
  "lora_scale_B": lora_scale2,
491
+ "lora_C": lora3 if lora3 != "None" else None,
492
  "lora_scale_C": lora_scale3,
493
+ "lora_D": lora4 if lora4 != "None" else None,
494
  "lora_scale_D": lora_scale4,
495
+ "lora_E": lora5 if lora5 != "None" else None,
496
  "lora_scale_E": lora_scale5,
497
+ "lora_F": lora6 if lora6 != "None" else None,
498
  "lora_scale_F": lora_scale6,
499
+ "lora_G": lora7 if lora7 != "None" else None,
500
  "lora_scale_G": lora_scale7,
501
  "textual_inversion": embed_list if textual_inversion else [],
502
  "syntax_weights": syntax_weights, # "Classic"
 
554
  # kwargs for diffusers pipeline
555
  if guidance_rescale:
556
  pipe_params["guidance_rescale"] = guidance_rescale
557
+
558
+ self.model.device = torch.device("cuda:0")
559
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
560
+ self.model.pipe.transformer.to(self.model.device)
561
+ print("transformer to cuda")
562
 
563
  actual_progress = 0
564
  info_images = gr.update()
 
588
 
589
  download_links = "<br>".join(
590
  [
591
+ f'<a href="{path.replace("/images/", "/file=/home/user/app/images/")}" download="{os.path.basename(path)}">Download Image {i + 1}</a>'
592
  for i, path in enumerate(image_path)
593
  ]
594
  )
 
697
 
698
  @spaces.GPU(duration=15)
699
  def process_upscale(image, upscaler_name, upscaler_size):
700
+ if image is None: return None
 
701
 
702
  from stablepy.diffusers_vanilla.utils import save_pil_image_with_metadata
703
  from stablepy import load_upscaler_model
 
714
 
715
  name_upscaler = f"./{DIRECTORY_UPSCALERS}/{name_upscaler.split('/')[-1]}"
716
 
717
+ scaler_beta = load_upscaler_model(model=name_upscaler, tile=0, tile_overlap=8, device="cuda", half=True)
718
  image_up = scaler_beta.upscale(image, upscaler_size, True)
719
 
720
  image_path = save_pil_image_with_metadata(image_up, f'{os.getcwd()}/up_images', exif_image)
 
723
 
724
 
725
  # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
726
+ dynamic_gpu_duration.zerogpu = True
727
+ sd_gen_generate_pipeline.zerogpu = True
728
  sd_gen = GuiSD()
729
 
730
+ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
731
  gr.Markdown("# 🧩 DiffuseCraft")
732
  gr.Markdown(SUBTITLE_GUI)
733
  with gr.Tab("Generation"):
 
776
 
777
  actual_task_info = gr.HTML()
778
 
779
+ with gr.Row(equal_height=False, variant="default"):
780
  gpu_duration_gui = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
781
  with gr.Column():
782
  verbose_info_gui = gr.Checkbox(value=False, container=False, label="Status info")
 
812
  "Schedule type": gr.update(value="Automatic"),
813
  "PAG": gr.update(value=.0),
814
  "FreeU": gr.update(value=False),
 
 
 
 
 
 
 
 
 
815
  }
 
 
 
 
 
 
816
  valid_keys = list(valid_receptors.keys())
817
 
818
  parameters = extract_parameters(base_prompt)
 
826
  parameters["Sampler"] = value_sampler
827
  parameters["Schedule type"] = s_type
828
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
829
  for key, val in parameters.items():
830
  # print(val)
831
  if key in valid_keys:
 
833
  if key == "Sampler":
834
  if val not in scheduler_names:
835
  continue
836
+ if key == "Schedule type":
837
  if val not in SCHEDULE_TYPE_OPTIONS:
838
+ val = "Automatic"
 
 
 
839
  elif key == "Clip skip":
840
  if "," in str(val):
841
  val = val.replace(",", "")
 
843
  val = True
844
  if key == "prompt":
845
  if ">" in val and "<" in val:
846
+ val = re.sub(r'<[^>]+>', '', val)
847
  print("Removed LoRA written in the prompt")
848
  if key in ["prompt", "neg_prompt"]:
849
  val = re.sub(r'\s+', ' ', re.sub(r',+', ',', val)).strip()
850
+ if key in ["Steps", "width", "height", "Seed"]:
851
  val = int(val)
852
  if key == "FreeU":
853
  val = True
854
+ if key in ["CFG scale", "PAG"]:
855
  val = float(val)
856
  if key == "Model":
857
  filtered_models = [m for m in model_list if val in m]
 
859
  val = filtered_models[0]
860
  else:
861
  val = name_model
 
 
 
862
  if key == "Seed":
863
  continue
 
864
  valid_receptors[key] = gr.update(value=val)
865
  # print(val, type(val))
866
  # print(valid_receptors)
 
868
  print(str(e))
869
  return [value for value in valid_receptors.values()]
870
 
871
+ set_params_gui.click(
872
+ run_set_params_gui, [prompt_gui, model_name_gui], [
873
+ prompt_gui,
874
+ neg_prompt_gui,
875
+ steps_gui,
876
+ img_width_gui,
877
+ img_height_gui,
878
+ seed_gui,
879
+ sampler_gui,
880
+ cfg_gui,
881
+ clip_skip_gui,
882
+ model_name_gui,
883
+ schedule_type_gui,
884
+ pag_scale_gui,
885
+ free_u_gui,
886
+ ],
887
+ )
888
+
889
  def run_clear_prompt_gui():
890
  return gr.update(value=""), gr.update(value="")
891
  clear_prompt_gui.click(
 
898
  run_set_random_seed, [], seed_gui
899
  )
900
 
901
+ num_images_gui = gr.Slider(minimum=1, maximum=5, step=1, value=1, label="Images")
902
+ prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
903
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list, value=vae_model_list[0])
904
 
905
  with gr.Accordion("Hires fix", open=False, visible=True):
906
 
907
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
908
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
909
+ upscaler_tile_size_gui = gr.Slider(minimum=0, maximum=512, step=16, value=0, label="Upscaler Tile Size", info="0 = no tiling")
910
  upscaler_tile_overlap_gui = gr.Slider(minimum=0, maximum=48, step=1, value=8, label="Upscaler Tile Overlap")
911
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
912
  hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
 
923
  return gr.Dropdown(label=label, choices=lora_model_list, value="None", allow_custom_value=True, visible=visible)
924
 
925
  def lora_scale_slider(label, visible=True):
926
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label=label, visible=visible)
 
927
 
928
  lora1_gui = lora_dropdown("Lora1")
929
  lora_scale_1_gui = lora_scale_slider("Lora Scale 1")
 
935
  lora_scale_4_gui = lora_scale_slider("Lora Scale 4")
936
  lora5_gui = lora_dropdown("Lora5")
937
  lora_scale_5_gui = lora_scale_slider("Lora Scale 5")
938
+ lora6_gui = lora_dropdown("Lora6", visible=False)
939
+ lora_scale_6_gui = lora_scale_slider("Lora Scale 6", visible=False)
940
+ lora7_gui = lora_dropdown("Lora7", visible=False)
941
+ lora_scale_7_gui = lora_scale_slider("Lora Scale 7", visible=False)
942
 
943
  with gr.Accordion("From URL", open=False, visible=True):
944
  text_lora = gr.Textbox(
 
947
  lines=1,
948
  info="It has to be .safetensors files, and you can also download them from Hugging Face.",
949
  )
950
+ romanize_text = gr.Checkbox(value=False, label="Transliterate name", visible=False)
951
  button_lora = gr.Button("Get and Refresh the LoRA Lists")
952
  new_lora_status = gr.HTML()
953
  button_lora.click(
 
1012
  preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
1013
  low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
1014
  high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
1015
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
1016
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
1017
  recolor_gamma_correction_gui = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
1018
  tile_blur_sigma_gui = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
1019
 
 
1048
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1049
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1050
 
1051
+ style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1052
 
1053
  with gr.Accordion("Textual inversion", open=False, visible=False):
1054
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
 
1098
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1099
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1100
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
 
 
 
 
1101
 
1102
  with gr.Accordion("More settings", open=False, visible=False):
1103
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1104
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1105
+ leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1106
+ disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1107
  display_images_gui = gr.Checkbox(value=False, label="Display Images")
1108
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1109
+ image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1110
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1111
  retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1112
  retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1113
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1115
  with gr.Accordion("Examples and help", open=False, visible=True):
1116
  gr.Markdown(HELP_GUI)
1117
  gr.Markdown(EXAMPLES_GUI_HELP)
 
1167
  # "hsl(360, 120, 120)" # in fact any valid colorstring
1168
  ]
1169
  ),
1170
+ eraser=gr.Eraser(default_size="16")
 
 
 
1171
  )
 
 
 
 
 
 
 
1172
  invert_mask = gr.Checkbox(value=False, label="Invert mask")
1173
  btn = gr.Button("Create mask")
 
1174
  with gr.Column(scale=1):
1175
  img_source = gr.Image(interactive=False)
1176
  img_result = gr.Image(label="Mask image", show_label=True, interactive=False)
 
1232
  outputs=[load_model_gui],
1233
  queue=True,
1234
  show_progress="minimal",
 
1235
  ).success(
1236
  fn=sd_gen_generate_pipeline, # fn=sd_gen.generate_pipeline,
1237
  inputs=[
 
1360
  outputs=[load_model_gui, result_images, actual_task_info],
1361
  queue=True,
1362
  show_progress="minimal",
 
1363
  )
1364
 
1365
+ app.queue()
1366
+
1367
+ app.launch(
1368
+ show_error=True,
1369
+ debug=True,
1370
+ allowed_paths=["./images/"],
1371
+ )
 
 
 
constants.py CHANGED
@@ -7,13 +7,10 @@ from stablepy import (
7
  ALL_BUILTIN_UPSCALERS,
8
  IP_ADAPTERS_SD,
9
  IP_ADAPTERS_SDXL,
10
- PROMPT_WEIGHT_OPTIONS_PRIORITY,
11
  )
12
 
13
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
14
-
15
  # - **Download Models**
16
- DOWNLOAD_MODEL = "https://huggingface.co/zuv0/test/resolve/main/milkyWonderland_v40.safetensors"
17
 
18
  # - **Download VAEs**
19
  DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
@@ -26,12 +23,12 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
26
  'Laxhar/noobai-XL-1.1',
27
  'Laxhar/noobai-XL-Vpred-1.0',
28
  'black-forest-labs/FLUX.1-dev',
29
- 'black-forest-labs/FLUX.1-Krea-dev',
30
  'John6666/blue-pencil-flux1-v021-fp8-flux',
31
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
32
  'John6666/xe-anime-flux-v04-fp8-flux',
33
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
34
  'John6666/carnival-unchained-v10-fp8-flux',
 
35
  'Freepik/flux.1-lite-8B-alpha',
36
  'shauray/FluxDev-HyperSD-merged',
37
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
@@ -40,19 +37,23 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
40
  # 'ostris/OpenFLUX.1',
41
  'shuttleai/shuttle-3-diffusion',
42
  'Laxhar/noobai-XL-1.0',
 
43
  'Laxhar/noobai-XL-0.77',
44
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
45
  'Laxhar/noobai-XL-0.6',
46
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
47
  'John6666/noobai-cyberfix-v10-sdxl',
48
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
49
- 'John6666/ripplemix-noob-vpred10-illustrious01-v14-sdxl',
50
- 'John6666/sigmaih-15-sdxl',
 
 
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
- 'John6666/hassaku-xl-illustrious-v22-sdxl',
 
56
  'John6666/haruki-mix-illustrious-v10-sdxl',
57
  'John6666/noobreal-v10-sdxl',
58
  'John6666/complicated-noobai-merge-vprediction-sdxl',
@@ -63,7 +64,6 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
63
  'Laxhar/noobai-XL-Vpred-0.65',
64
  'Laxhar/noobai-XL-Vpred-0.6',
65
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
66
- 'John6666/cat-tower-noobai-xl-checkpoint-v15vpred-sdxl',
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
@@ -74,34 +74,19 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
74
  'John6666/obsession-illustriousxl-v21-sdxl',
75
  'John6666/obsession-illustriousxl-v30-sdxl',
76
  'John6666/obsession-illustriousxl-v31-sdxl',
77
- 'John6666/one-obsession-13-sdxl',
78
- 'John6666/one-obsession-14-24d-sdxl',
79
- 'John6666/one-obsession-15-noobai-sdxl',
80
- 'John6666/one-obsession-v16-noobai-sdxl',
81
- 'John6666/prefect-illustrious-xl-v3-sdxl',
82
  'John6666/wai-nsfw-illustrious-v70-sdxl',
83
- 'John6666/wai-nsfw-illustrious-sdxl-v140-sdxl',
84
  'John6666/illustrious-pony-mix-v3-sdxl',
85
- 'John6666/nova-anime-xl-il-v90-sdxl',
86
- 'John6666/nova-anime-xl-il-v110-sdxl',
87
- 'John6666/nova-orange-xl-re-v10-sdxl',
88
- 'John6666/nova-orange-xl-v110-sdxl',
89
- 'John6666/nova-orange-xl-re-v20-sdxl',
90
- 'John6666/nova-unreal-xl-v60-sdxl',
91
- 'John6666/nova-unreal-xl-v70-sdxl',
92
- 'John6666/nova-unreal-xl-v80-sdxl',
93
- 'John6666/nova-cartoon-xl-v40-sdxl',
94
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
95
  'eienmojiki/Anything-XL',
96
  'eienmojiki/Starry-XL-v5.2',
97
- 'votepurchase/plantMilkModelSuite_walnut',
98
  'John6666/meinaxl-v2-sdxl',
99
  'Eugeoter/artiwaifu-diffusion-2.0',
100
  'comin/IterComp',
101
- 'John6666/epicrealism-xl-v8kiss-sdxl',
102
- 'John6666/epicrealism-xl-v10kiss2-sdxl',
103
  'John6666/epicrealism-xl-vxiabeast-sdxl',
104
- 'John6666/epicrealism-xl-vxvii-crystal-clear-realism-sdxl',
 
105
  'misri/zavychromaxl_v80',
106
  'SG161222/RealVisXL_V4.0',
107
  'SG161222/RealVisXL_V5.0',
@@ -117,10 +102,8 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
117
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
118
  'John6666/t-ponynai3-v51-sdxl',
119
  'John6666/t-ponynai3-v65-sdxl',
120
- 'John6666/t-ponynai3-v7-sdxl',
121
  'John6666/prefect-pony-xl-v3-sdxl',
122
  'John6666/prefect-pony-xl-v4-sdxl',
123
- 'John6666/prefect-pony-xl-v50-sdxl',
124
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
125
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
126
  'John6666/wai-real-mix-v11-sdxl',
@@ -128,14 +111,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
128
  'John6666/wai-c-v6-sdxl',
129
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
130
  'John6666/sifw-annihilation-xl-v2-sdxl',
131
- 'John6666/sifw-annihilation-xl-v305illustrious-beta-sdxl',
132
  'John6666/photo-realistic-pony-v5-sdxl',
133
  'John6666/pony-realism-v21main-sdxl',
134
  'John6666/pony-realism-v22main-sdxl',
135
- 'John6666/pony-realism-v23-ultra-sdxl',
 
136
  'John6666/cyberrealistic-pony-v65-sdxl',
137
  'John6666/cyberrealistic-pony-v7-sdxl',
138
- 'John6666/cyberrealistic-pony-v127-alternative-sdxl',
139
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
140
  'John6666/nova-anime-xl-pony-v5-sdxl',
141
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
@@ -145,15 +127,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
145
  'John6666/duchaiten-pony-real-v11fix-sdxl',
146
  'John6666/duchaiten-pony-real-v20-sdxl',
147
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
 
 
148
  'KBlueLeaf/Kohaku-XL-Zeta',
149
  'cagliostrolab/animagine-xl-3.1',
150
- 'cagliostrolab/animagine-xl-4.0',
151
  'yodayo-ai/kivotos-xl-2.0',
152
  'yodayo-ai/holodayo-xl-2.1',
153
  'yodayo-ai/clandestine-xl-1.0',
154
- 'https://huggingface.co/chemwolf/Karmix-XL-v0/resolve/main/Karmix-XL-v0.safetensors?download=true',
155
- 'https://civitai.com/api/download/models/128713?type=Model&format=SafeTensor&size=pruned&fp=fp16',
156
- 'https://civitai.com/models/30240?modelVersionId=125771',
157
  'digiplay/majicMIX_sombre_v2',
158
  'digiplay/majicMIX_realistic_v6',
159
  'digiplay/majicMIX_realistic_v7',
@@ -179,9 +159,9 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
179
  'GraydientPlatformAPI/realcartoon3d-17',
180
  'GraydientPlatformAPI/realcartoon-pixar11',
181
  'GraydientPlatformAPI/realcartoon-real17',
 
182
  ]
183
 
184
-
185
  DIFFUSERS_FORMAT_LORAS = [
186
  "nerijs/animation2k-flux",
187
  "XLabs-AI/flux-RealismLora",
@@ -203,11 +183,8 @@ DIRECTORY_VAES = 'vaes'
203
  DIRECTORY_EMBEDS = 'embedings'
204
  DIRECTORY_UPSCALERS = 'upscalers'
205
 
 
206
  STORAGE_ROOT = "/home/user/"
207
- CACHE_HF_ROOT = os.path.expanduser("~/.cache/huggingface")
208
- CACHE_HF = os.path.join(CACHE_HF_ROOT, "hub")
209
- if IS_ZERO_GPU:
210
- os.environ["HF_HOME"] = CACHE_HF
211
 
212
  TASK_STABLEPY = {
213
  'txt2img': 'txt2img',
@@ -249,7 +226,6 @@ UPSCALER_DICT_GUI = {
249
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
250
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
251
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
252
- "Real-ESRGAN-Anime-finetuning": "https://huggingface.co/danhtran2mind/Real-ESRGAN-Anime-finetuning/resolve/main/Real-ESRGAN-Anime-finetuning.pth",
253
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
254
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
255
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
@@ -322,9 +298,15 @@ DIFFUSERS_CONTROLNET_MODEL = [
322
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
323
  ]
324
 
325
- PROMPT_W_OPTIONS = [(pwf, pwf) for pwf in PROMPT_WEIGHT_OPTIONS_PRIORITY]
326
- PROMPT_W_OPTIONS[0] = ("Classic format: (word:weight)", "Classic")
327
- PROMPT_W_OPTIONS[1] = ("Compel format: (word)weight", "Compel")
 
 
 
 
 
 
328
 
329
  WARNING_MSG_VAE = (
330
  "Use the right VAE for your model to maintain image quality. The wrong"
@@ -377,11 +359,9 @@ SUBTITLE_GUI = (
377
  " to perform different tasks in image generation."
378
  )
379
 
380
- msg_zero = "" if not IS_ZERO_GPU else "- The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'."
381
-
382
  HELP_GUI = (
383
- f"""### Help:
384
- {msg_zero}
385
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
386
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
387
  """
@@ -505,7 +485,7 @@ EXAMPLES_GUI = [
505
  20,
506
  4.0,
507
  -1,
508
- ("loras/Coloring_book_-_LineArt.safetensors" if os.path.exists("loras/Coloring_book_-_LineArt.safetensors") else "None"),
509
  1.0,
510
  "DPM++ 2M SDE",
511
  1024,
@@ -600,7 +580,6 @@ EXAMPLES_GUI = [
600
  RESOURCES = (
601
  """### Resources
602
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
603
- - Try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
604
- - `DiffuseCraft` in Colab:[link](https://github.com/R3gm/DiffuseCraft?tab=readme-ov-file#diffusecraft).
605
  """
606
- )
 
7
  ALL_BUILTIN_UPSCALERS,
8
  IP_ADAPTERS_SD,
9
  IP_ADAPTERS_SDXL,
 
10
  )
11
 
 
 
12
  # - **Download Models**
13
+ DOWNLOAD_MODEL = "https://huggingface.co/TechnoByte/MilkyWonderland/resolve/main/milkyWonderland_v40.safetensors"
14
 
15
  # - **Download VAEs**
16
  DOWNLOAD_VAE = "https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
 
23
  'Laxhar/noobai-XL-1.1',
24
  'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
 
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
28
  'John6666/xe-anime-flux-v04-fp8-flux',
29
  'John6666/lyh-anime-flux-v2a1-fp8-flux',
30
  'John6666/carnival-unchained-v10-fp8-flux',
31
+ 'John6666/iniverse-mix-xl-sfwnsfw-fluxdfp16nsfwv11-fp8-flux',
32
  'Freepik/flux.1-lite-8B-alpha',
33
  'shauray/FluxDev-HyperSD-merged',
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
 
37
  # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
+ 'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
41
  'Laxhar/noobai-XL-0.77',
42
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
43
  'Laxhar/noobai-XL-0.6',
44
  'John6666/noobai-xl-nai-xl-epsilonpred05version-sdxl',
45
  'John6666/noobai-cyberfix-v10-sdxl',
46
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
47
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
48
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
49
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
50
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
51
  'John6666/ntr-mix-illustrious-xl-noob-xl-xi-sdxl',
52
  'John6666/ntr-mix-illustrious-xl-noob-xl-xii-sdxl',
53
  'John6666/ntr-mix-illustrious-xl-noob-xl-xiii-sdxl',
54
  'John6666/mistoon-anime-v10illustrious-sdxl',
55
+ 'John6666/hassaku-xl-illustrious-v10-sdxl',
56
+ 'John6666/hassaku-xl-illustrious-v10style-sdxl',
57
  'John6666/haruki-mix-illustrious-v10-sdxl',
58
  'John6666/noobreal-v10-sdxl',
59
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
64
  'Laxhar/noobai-XL-Vpred-0.65',
65
  'Laxhar/noobai-XL-Vpred-0.6',
66
  'John6666/cat-tower-noobai-xl-checkpoint-v14vpred-sdxl',
 
67
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
68
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
69
  'John6666/noobai-xl-nai-xl-vpredtestversion-sdxl',
 
74
  'John6666/obsession-illustriousxl-v21-sdxl',
75
  'John6666/obsession-illustriousxl-v30-sdxl',
76
  'John6666/obsession-illustriousxl-v31-sdxl',
 
 
 
 
 
77
  'John6666/wai-nsfw-illustrious-v70-sdxl',
 
78
  'John6666/illustrious-pony-mix-v3-sdxl',
79
+ 'John6666/nova-anime-xl-illustriousv10-sdxl',
80
+ 'John6666/nova-orange-xl-v30-sdxl',
 
 
 
 
 
 
 
81
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
82
  'eienmojiki/Anything-XL',
83
  'eienmojiki/Starry-XL-v5.2',
 
84
  'John6666/meinaxl-v2-sdxl',
85
  'Eugeoter/artiwaifu-diffusion-2.0',
86
  'comin/IterComp',
 
 
87
  'John6666/epicrealism-xl-vxiabeast-sdxl',
88
+ 'John6666/epicrealism-xl-v10kiss2-sdxl',
89
+ 'John6666/epicrealism-xl-v8kiss-sdxl',
90
  'misri/zavychromaxl_v80',
91
  'SG161222/RealVisXL_V4.0',
92
  'SG161222/RealVisXL_V5.0',
 
102
  'John6666/ebara-mfcg-pony-mix-v12-sdxl',
103
  'John6666/t-ponynai3-v51-sdxl',
104
  'John6666/t-ponynai3-v65-sdxl',
 
105
  'John6666/prefect-pony-xl-v3-sdxl',
106
  'John6666/prefect-pony-xl-v4-sdxl',
 
107
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
108
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
109
  'John6666/wai-real-mix-v11-sdxl',
 
111
  'John6666/wai-c-v6-sdxl',
112
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
113
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
114
  'John6666/photo-realistic-pony-v5-sdxl',
115
  'John6666/pony-realism-v21main-sdxl',
116
  'John6666/pony-realism-v22main-sdxl',
117
+ 'John6666/cyberrealistic-pony-v63-sdxl',
118
+ 'John6666/cyberrealistic-pony-v64-sdxl',
119
  'John6666/cyberrealistic-pony-v65-sdxl',
120
  'John6666/cyberrealistic-pony-v7-sdxl',
 
121
  'GraydientPlatformAPI/realcartoon-pony-diffusion',
122
  'John6666/nova-anime-xl-pony-v5-sdxl',
123
  'John6666/autismmix-sdxl-autismmix-pony-sdxl',
 
127
  'John6666/duchaiten-pony-real-v11fix-sdxl',
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
+ 'odyssey-labs/OdysseyXL-3.0',
131
+ 'odyssey-labs/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
 
134
  'yodayo-ai/kivotos-xl-2.0',
135
  'yodayo-ai/holodayo-xl-2.1',
136
  'yodayo-ai/clandestine-xl-1.0',
 
 
 
137
  'digiplay/majicMIX_sombre_v2',
138
  'digiplay/majicMIX_realistic_v6',
139
  'digiplay/majicMIX_realistic_v7',
 
159
  'GraydientPlatformAPI/realcartoon3d-17',
160
  'GraydientPlatformAPI/realcartoon-pixar11',
161
  'GraydientPlatformAPI/realcartoon-real17',
162
+ 'nitrosocke/Ghibli-Diffusion',
163
  ]
164
 
 
165
  DIFFUSERS_FORMAT_LORAS = [
166
  "nerijs/animation2k-flux",
167
  "XLabs-AI/flux-RealismLora",
 
183
  DIRECTORY_EMBEDS = 'embedings'
184
  DIRECTORY_UPSCALERS = 'upscalers'
185
 
186
+ CACHE_HF = "/home/user/.cache/huggingface/hub/"
187
  STORAGE_ROOT = "/home/user/"
 
 
 
 
188
 
189
  TASK_STABLEPY = {
190
  'txt2img': 'txt2img',
 
226
  # "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
227
  # "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
228
  "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
 
229
  "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
230
  "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
231
  "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
 
298
  # "InstantX/FLUX.1-dev-Controlnet-Canny",
299
  ]
300
 
301
+ PROMPT_W_OPTIONS = [
302
+ ("Compel format: (word)weight", "Compel"),
303
+ ("Classic format: (word:weight)", "Classic"),
304
+ ("Classic-original format: (word:weight)", "Classic-original"),
305
+ ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
306
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
307
+ ("Classic-ignore", "Classic-ignore"),
308
+ ("None", "None"),
309
+ ]
310
 
311
  WARNING_MSG_VAE = (
312
  "Use the right VAE for your model to maintain image quality. The wrong"
 
359
  " to perform different tasks in image generation."
360
  )
361
 
 
 
362
  HELP_GUI = (
363
+ """### Help:
364
+ - The current space runs on a ZERO GPU which is assigned for approximately 60 seconds; Therefore, if you submit expensive tasks, the operation may be canceled upon reaching the maximum allowed time with 'GPU TASK ABORTED'.
365
  - Distorted or strange images often result from high prompt weights, so it's best to use low weights and scales, and consider using Classic variants like 'Classic-original'.
366
  - For better results with Pony Diffusion, try using sampler DPM++ 1s or DPM2 with Compel or Classic prompt weights.
367
  """
 
485
  20,
486
  4.0,
487
  -1,
488
+ "loras/Coloring_book_-_LineArt.safetensors",
489
  1.0,
490
  "DPM++ 2M SDE",
491
  1024,
 
580
  RESOURCES = (
581
  """### Resources
582
  - John6666's space has some great features you might find helpful [link](https://huggingface.co/spaces/John6666/DiffuseCraftMod).
583
+ - You can also try the image generator in Colab’s free tier, which provides free GPU [link](https://github.com/R3gm/SD_diffusers_interactive).
 
584
  """
585
+ )
image_processor.py CHANGED
@@ -92,8 +92,8 @@ def preprocessor_tab():
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
- pre_value_threshold = gr.Slider(minimum=0., maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
- pre_distance_threshold = gr.Slider(minimum=0., maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
 
92
  pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
  pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
  pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
  pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
  pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
  pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
packages.txt CHANGED
@@ -1,3 +1,3 @@
1
  git-lfs
2
- aria2
3
  ffmpeg
 
1
  git-lfs
2
+ aria2 -y
3
  ffmpeg
pre-requirements.txt DELETED
@@ -1 +0,0 @@
1
- pip>=23.0.0
 
 
requirements.txt CHANGED
@@ -1,13 +1,5 @@
1
- stablepy==0.6.5
2
- torch==2.5.1
3
- diffusers
4
  gdown
5
  opencv-python
6
- unidecode
7
- pydantic==2.10.6
8
- huggingface_hub
9
- hf_transfer
10
- hf_xet
11
- spaces
12
- gradio==5.44.1
13
- matplotlib-inline
 
1
+ stablepy==0.6.0
2
+ torch==2.2.0
 
3
  gdown
4
  opencv-python
5
+ unidecode
 
 
 
 
 
 
 
utils.py CHANGED
@@ -1,714 +1,485 @@
1
- import os
2
- import re
3
- import gradio as gr
4
- from constants import (
5
- DIFFUSERS_FORMAT_LORAS,
6
- CIVITAI_API_KEY,
7
- HF_TOKEN,
8
- MODEL_TYPE_CLASS,
9
- DIRECTORY_LORAS,
10
- DIRECTORY_MODELS,
11
- DIFFUSECRAFT_CHECKPOINT_NAME,
12
- CACHE_HF_ROOT,
13
- CACHE_HF,
14
- STORAGE_ROOT,
15
- )
16
- from huggingface_hub import HfApi, get_hf_file_metadata, snapshot_download
17
- from diffusers import DiffusionPipeline
18
- from huggingface_hub import model_info as model_info_data
19
- from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
- from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
- from pathlib import PosixPath
22
- from unidecode import unidecode
23
- import urllib.parse
24
- import copy
25
- import requests
26
- from requests.adapters import HTTPAdapter
27
- from urllib3.util import Retry
28
- import shutil
29
- import subprocess
30
- import json
31
- import html as _html
32
-
33
- IS_ZERO_GPU = bool(os.getenv("SPACES_ZERO_GPU"))
34
- USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
35
- MODEL_ARCH = {
36
- 'stable-diffusion-xl-v1-base/lora': "Stable Diffusion XL (Illustrious, Pony, NoobAI)",
37
- 'stable-diffusion-v1/lora': "Stable Diffusion 1.5",
38
- 'flux-1-dev/lora': "Flux",
39
- }
40
-
41
-
42
- def read_safetensors_header_from_url(url: str):
43
- """Read safetensors header from a remote Hugging Face file."""
44
- meta = get_hf_file_metadata(url)
45
-
46
- # Step 1: first 8 bytes → header length
47
- resp = requests.get(meta.location, headers={"Range": "bytes=0-7"})
48
- resp.raise_for_status()
49
- header_len = int.from_bytes(resp.content, "little")
50
-
51
- # Step 2: fetch full header JSON
52
- end = 8 + header_len - 1
53
- resp = requests.get(meta.location, headers={"Range": f"bytes=8-{end}"})
54
- resp.raise_for_status()
55
- header_json = resp.content.decode("utf-8")
56
-
57
- return json.loads(header_json)
58
-
59
-
60
- def read_safetensors_header_from_file(path: str):
61
- """Read safetensors header from a local file."""
62
- with open(path, "rb") as f:
63
- # Step 1: first 8 bytes → header length
64
- header_len = int.from_bytes(f.read(8), "little")
65
-
66
- # Step 2: read header JSON
67
- header_json = f.read(header_len).decode("utf-8")
68
-
69
- return json.loads(header_json)
70
-
71
-
72
- class LoraHeaderInformation:
73
- """
74
- Encapsulates parsed info from a LoRA JSON header and provides
75
- a compact HTML summary via .to_html().
76
- """
77
-
78
- def __init__(self, json_data):
79
- self.original_json = copy.deepcopy(json_data or {})
80
-
81
- # Check if text encoder was trained
82
- # guard for json_data being a mapping
83
- try:
84
- self.text_encoder_trained = any("text_model" in ln for ln in json_data)
85
- except Exception:
86
- self.text_encoder_trained = False
87
-
88
- # Metadata (may be None)
89
- metadata = (json_data or {}).get("__metadata__", None)
90
- self.metadata = metadata
91
-
92
- # Default values
93
- self.architecture = "undefined"
94
- self.prediction_type = "undefined"
95
- self.base_model = "undefined"
96
- self.author = "undefined"
97
- self.title = "undefined"
98
- self.common_tags_list = []
99
-
100
- if metadata:
101
- self.architecture = MODEL_ARCH.get(
102
- metadata.get('modelspec.architecture', None),
103
- "undefined"
104
- )
105
-
106
- self.prediction_type = metadata.get('modelspec.prediction_type', "undefined")
107
- self.base_model = metadata.get('ss_sd_model_name', "undefined")
108
- self.author = metadata.get('modelspec.author', "undefined")
109
- self.title = metadata.get('modelspec.title', "undefined")
110
-
111
- base_model_hash = metadata.get('ss_new_sd_model_hash', None) # SHA256
112
- # AUTOV1 ss_sd_model_hash
113
- # https://civitai.com/api/v1/model-versions/by-hash/{base_model_hash} # Info
114
- if base_model_hash:
115
- self.base_model += f" hash={base_model_hash}"
116
-
117
- # Extract tags
118
- try:
119
- tags = metadata.get('ss_tag_frequency') if "ss_tag_frequency" in metadata else metadata.get('ss_datasets', "")
120
- tags = json.loads(tags) if tags else ""
121
-
122
- if isinstance(tags, list):
123
- tags = tags[0].get("tag_frequency", {})
124
-
125
- if tags:
126
- self.common_tags_list = list(tags[list(tags.keys())[0]].keys())
127
- except Exception:
128
- self.common_tags_list = []
129
-
130
- def to_dict(self):
131
- """Return a plain dict summary of parsed fields."""
132
- return {
133
- "architecture": self.architecture,
134
- "prediction_type": self.prediction_type,
135
- "base_model": self.base_model,
136
- "author": self.author,
137
- "title": self.title,
138
- "text_encoder_trained": bool(self.text_encoder_trained),
139
- "common_tags": self.common_tags_list,
140
- }
141
-
142
- def to_html(self, limit_tags=20):
143
- """
144
- Return a compact HTML snippet (string) showing the parsed info
145
- in a small font. Values are HTML-escaped.
146
- """
147
- # helper to escape
148
- esc = _html.escape
149
-
150
- rows = [
151
- ("Title", esc(str(self.title))),
152
- ("Author", esc(str(self.author))),
153
- ("Architecture", esc(str(self.architecture))),
154
- ("Base model", esc(str(self.base_model))),
155
- ("Prediction type", esc(str(self.prediction_type))),
156
- ("Text encoder trained", esc(str(self.text_encoder_trained))),
157
- ("Reference tags", esc(str(", ".join(self.common_tags_list[:limit_tags])))),
158
- ]
159
-
160
- # small, compact table with inline styling (small font)
161
- html_rows = "".join(
162
- f"<tr><th style='text-align:left;padding:2px 6px;white-space:nowrap'>{k}</th>"
163
- f"<td style='padding:2px 6px'>{v}</td></tr>"
164
- for k, v in rows
165
- )
166
-
167
- html_snippet = (
168
- "<div style='font-family:system-ui, -apple-system, \"Segoe UI\", Roboto, "
169
- "Helvetica, Arial, \"Noto Sans\", sans-serif; font-size:12px; line-height:1.2; "
170
- "'>"
171
- f"<table style='border-collapse:collapse; font-size:12px;'>"
172
- f"{html_rows}"
173
- "</table>"
174
- "</div>"
175
- )
176
-
177
- return html_snippet
178
-
179
-
180
- def request_json_data(url):
181
- model_version_id = url.split('/')[-1]
182
- if "?modelVersionId=" in model_version_id:
183
- match = re.search(r'modelVersionId=(\d+)', url)
184
- model_version_id = match.group(1)
185
-
186
- endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
187
-
188
- params = {}
189
- headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
190
- session = requests.Session()
191
- retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
192
- session.mount("https://", HTTPAdapter(max_retries=retries))
193
-
194
- try:
195
- result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
196
- result.raise_for_status()
197
- json_data = result.json()
198
- return json_data if json_data else None
199
- except Exception as e:
200
- print(f"Error: {e}")
201
- return None
202
-
203
-
204
- class ModelInformation:
205
- def __init__(self, json_data):
206
- self.model_version_id = json_data.get("id", "")
207
- self.model_id = json_data.get("modelId", "")
208
- self.download_url = json_data.get("downloadUrl", "")
209
- self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
210
- self.filename_url = next(
211
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
212
- )
213
- self.filename_url = self.filename_url if self.filename_url else ""
214
- self.description = json_data.get("description", "")
215
- if self.description is None:
216
- self.description = ""
217
- self.model_name = json_data.get("model", {}).get("name", "")
218
- self.model_type = json_data.get("model", {}).get("type", "")
219
- self.nsfw = json_data.get("model", {}).get("nsfw", False)
220
- self.poi = json_data.get("model", {}).get("poi", False)
221
- self.images = [img.get("url", "") for img in json_data.get("images", [])]
222
- self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
223
- self.original_json = copy.deepcopy(json_data)
224
-
225
-
226
- def get_civit_params(url):
227
- try:
228
- json_data = request_json_data(url)
229
- mdc = ModelInformation(json_data)
230
- if mdc.download_url and mdc.filename_url:
231
- return mdc.download_url, mdc.filename_url, mdc.model_url
232
- else:
233
- ValueError("Invalid Civitai model URL")
234
- except Exception as e:
235
- print(f"Error retrieving Civitai metadata: {e} — fallback to direct download")
236
- return url, None, None
237
-
238
-
239
- def civ_redirect_down(url, dir_, civitai_api_key, romanize, alternative_name):
240
- filename_base = filename = None
241
-
242
- if alternative_name:
243
- output_path = os.path.join(dir_, alternative_name)
244
- if os.path.exists(output_path):
245
- return output_path, alternative_name
246
-
247
- # Follow the redirect to get the actual download URL
248
- curl_command = (
249
- f'curl -L -sI --connect-timeout 5 --max-time 5 '
250
- f'-H "Content-Type: application/json" '
251
- f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
252
- )
253
-
254
- headers = os.popen(curl_command).read()
255
-
256
- # Look for the redirected "Location" URL
257
- location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
258
-
259
- if location_match:
260
- redirect_url = location_match.group(1).strip()
261
-
262
- # Extract the filename from the redirect URL's "Content-Disposition"
263
- filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
264
- if filename_match:
265
- encoded_filename = filename_match.group(1)
266
- # Decode the URL-encoded filename
267
- decoded_filename = urllib.parse.unquote(encoded_filename)
268
-
269
- filename = unidecode(decoded_filename) if romanize else decoded_filename
270
- # print(f"Filename redirect: {filename}")
271
-
272
- filename_base = alternative_name if alternative_name else filename
273
- if not filename_base:
274
- return None, None
275
- elif os.path.exists(os.path.join(dir_, filename_base)):
276
- return os.path.join(dir_, filename_base), filename_base
277
-
278
- aria2_command = (
279
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
280
- f'-k 1M -s 16 -d "{dir_}" -o "{filename_base}" "{redirect_url}"'
281
- )
282
- r_code = os.system(aria2_command) # noqa
283
-
284
- # if r_code != 0:
285
- # raise RuntimeError(f"Failed to download file: {filename_base}. Error code: {r_code}")
286
-
287
- output_path = os.path.join(dir_, filename_base)
288
- if not os.path.exists(output_path):
289
- return None, filename_base
290
-
291
- return output_path, filename_base
292
-
293
-
294
- def civ_api_down(url, dir_, civitai_api_key, civ_filename):
295
- """
296
- This method is susceptible to being blocked because it generates a lot of temp redirect links with aria2c.
297
- If an API key limit is reached, generating a new API key and using it can fix the issue.
298
- """
299
- output_path = None
300
-
301
- url_dl = url + f"?token={civitai_api_key}"
302
- if not civ_filename:
303
- aria2_command = f'aria2c -c -x 1 -s 1 -d "{dir_}" "{url_dl}"'
304
- os.system(aria2_command)
305
- else:
306
- output_path = os.path.join(dir_, civ_filename)
307
- if not os.path.exists(output_path):
308
- aria2_command = (
309
- f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
310
- f'-k 1M -s 16 -d "{dir_}" -o "{civ_filename}" "{url_dl}"'
311
- )
312
- os.system(aria2_command)
313
-
314
- return output_path
315
-
316
-
317
- def drive_down(url, dir_):
318
- import gdown
319
-
320
- output_path = None
321
-
322
- drive_id, _ = gdown.parse_url.parse_url(url, warning=False)
323
- dir_files = os.listdir(dir_)
324
-
325
- for dfile in dir_files:
326
- if drive_id in dfile:
327
- output_path = os.path.join(dir_, dfile)
328
- break
329
-
330
- if not output_path:
331
- original_path = gdown.download(url, f"{dir_}/", fuzzy=True)
332
-
333
- dir_name, base_name = os.path.split(original_path)
334
- name, ext = base_name.rsplit(".", 1)
335
- new_name = f"{name}_{drive_id}.{ext}"
336
- output_path = os.path.join(dir_name, new_name)
337
-
338
- os.rename(original_path, output_path)
339
-
340
- return output_path
341
-
342
-
343
- def hf_down(url, dir_, hf_token, romanize):
344
- url = url.replace("?download=true", "")
345
- # url = urllib.parse.quote(url, safe=':/') # fix encoding
346
-
347
- filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
348
- output_path = os.path.join(dir_, filename)
349
-
350
- if os.path.exists(output_path):
351
- return output_path
352
-
353
- if "/blob/" in url:
354
- url = url.replace("/blob/", "/resolve/")
355
-
356
- if hf_token:
357
- user_header = f'"Authorization: Bearer {hf_token}"'
358
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
359
- else:
360
- os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {dir_} -o {filename}")
361
-
362
- return output_path
363
-
364
-
365
- def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
366
- url = url.strip()
367
- downloaded_file_path = None
368
-
369
- if "drive.google.com" in url:
370
- downloaded_file_path = drive_down(url, directory)
371
- elif "huggingface.co" in url:
372
- downloaded_file_path = hf_down(url, directory, hf_token, romanize)
373
- elif "civitai.com" in url:
374
- if not civitai_api_key:
375
- msg = "You need an API key to download Civitai models."
376
- print(f"\033[91m{msg}\033[0m")
377
- gr.Warning(msg)
378
- return None
379
-
380
- url, civ_filename, civ_page = get_civit_params(url)
381
- if civ_page and not IS_ZERO_GPU:
382
- print(f"\033[92mCivitai model: {civ_filename} [page: {civ_page}]\033[0m")
383
-
384
- downloaded_file_path, civ_filename = civ_redirect_down(url, directory, civitai_api_key, romanize, civ_filename)
385
-
386
- if not downloaded_file_path:
387
- msg = (
388
- "Download failed.\n"
389
- "If this is due to an API limit, generating a new API key may resolve the issue.\n"
390
- "Attempting to download using the old method..."
391
- )
392
- print(msg)
393
- gr.Warning(msg)
394
- downloaded_file_path = civ_api_down(url, directory, civitai_api_key, civ_filename)
395
- else:
396
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
397
-
398
- return downloaded_file_path
399
-
400
-
401
- def get_model_list(directory_path):
402
- model_list = []
403
- valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
404
-
405
- for filename in os.listdir(directory_path):
406
- if os.path.splitext(filename)[1] in valid_extensions:
407
- # name_without_extension = os.path.splitext(filename)[0]
408
- file_path = os.path.join(directory_path, filename)
409
- # model_list.append((name_without_extension, file_path))
410
- model_list.append(file_path)
411
- print('\033[34mFILE: ' + file_path + '\033[0m')
412
- return model_list
413
-
414
-
415
- def extract_parameters(input_string):
416
- parameters = {}
417
- input_string = input_string.replace("\n", "")
418
-
419
- if "Negative prompt:" not in input_string:
420
- if "Steps:" in input_string:
421
- input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
422
- else:
423
- msg = "Generation data is invalid."
424
- gr.Warning(msg)
425
- print(msg)
426
- parameters["prompt"] = input_string
427
- return parameters
428
-
429
- parm = input_string.split("Negative prompt:")
430
- parameters["prompt"] = parm[0].strip()
431
- if "Steps:" not in parm[1]:
432
- parameters["neg_prompt"] = parm[1].strip()
433
- return parameters
434
- parm = parm[1].split("Steps:")
435
- parameters["neg_prompt"] = parm[0].strip()
436
- input_string = "Steps:" + parm[1]
437
-
438
- # Extracting Steps
439
- steps_match = re.search(r'Steps: (\d+)', input_string)
440
- if steps_match:
441
- parameters['Steps'] = int(steps_match.group(1))
442
-
443
- # Extracting Size
444
- size_match = re.search(r'Size: (\d+x\d+)', input_string)
445
- if size_match:
446
- parameters['Size'] = size_match.group(1)
447
- width, height = map(int, parameters['Size'].split('x'))
448
- parameters['width'] = width
449
- parameters['height'] = height
450
-
451
- # Extracting other parameters
452
- other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
453
- for param in other_parameters:
454
- parameters[param[0].strip()] = param[1].strip('"')
455
-
456
- return parameters
457
-
458
-
459
- def get_my_lora(link_url, romanize):
460
- l_name = ""
461
- for url in [url.strip() for url in link_url.split(',')]:
462
- if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
463
- l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
464
- new_lora_model_list = get_model_list(DIRECTORY_LORAS)
465
- new_lora_model_list.insert(0, "None")
466
- new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
467
- msg_lora = "Downloaded"
468
- if l_name:
469
- msg_lora += f": <b>{l_name}</b>"
470
- print(msg_lora)
471
-
472
- try:
473
- # Works with non-Civitai loras.
474
- json_data = read_safetensors_header_from_file(l_name)
475
- metadata_lora = LoraHeaderInformation(json_data)
476
- msg_lora += "<br>" + metadata_lora.to_html()
477
- except Exception:
478
- pass
479
-
480
- return gr.update(
481
- choices=new_lora_model_list
482
- ), gr.update(
483
- choices=new_lora_model_list
484
- ), gr.update(
485
- choices=new_lora_model_list
486
- ), gr.update(
487
- choices=new_lora_model_list
488
- ), gr.update(
489
- choices=new_lora_model_list
490
- ), gr.update(
491
- choices=new_lora_model_list
492
- ), gr.update(
493
- choices=new_lora_model_list
494
- ), gr.update(
495
- value=msg_lora
496
- )
497
-
498
-
499
- def info_html(json_data, title, subtitle):
500
- return f"""
501
- <div style='padding: 0; border-radius: 10px;'>
502
- <p style='margin: 0; font-weight: bold;'>{title}</p>
503
- <details>
504
- <summary>Details</summary>
505
- <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
506
- </details>
507
- </div>
508
- """
509
-
510
-
511
- def get_model_type(repo_id: str):
512
- api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
513
- default = "SD 1.5"
514
- try:
515
- if os.path.exists(repo_id):
516
- tag, _, _, _ = checkpoint_model_type(repo_id)
517
- return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
518
- else:
519
- model = api.model_info(repo_id=repo_id, timeout=5.0)
520
- tags = model.tags
521
- for tag in tags:
522
- if tag in MODEL_TYPE_CLASS.keys():
523
- return MODEL_TYPE_CLASS.get(tag, default)
524
-
525
- except Exception:
526
- return default
527
- return default
528
-
529
-
530
- def restart_space(repo_id: str, factory_reboot: bool):
531
- api = HfApi(token=os.environ.get("HF_TOKEN"))
532
- try:
533
- runtime = api.get_space_runtime(repo_id=repo_id)
534
- if runtime.stage == "RUNNING":
535
- api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
536
- print(f"Restarting space: {repo_id}")
537
- else:
538
- print(f"Space {repo_id} is in stage: {runtime.stage}")
539
- except Exception as e:
540
- print(e)
541
-
542
-
543
- def extract_exif_data(image):
544
- if image is None:
545
- return ""
546
-
547
- try:
548
- metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
549
-
550
- for key in metadata_keys:
551
- if key in image.info:
552
- return image.info[key]
553
-
554
- return str(image.info)
555
-
556
- except Exception as e:
557
- return f"Error extracting metadata: {str(e)}"
558
-
559
-
560
- def create_mask_now(img, invert):
561
- import numpy as np
562
- import time
563
-
564
- time.sleep(0.5)
565
-
566
- transparent_image = img["layers"][0]
567
-
568
- # Extract the alpha channel
569
- alpha_channel = np.array(transparent_image)[:, :, 3]
570
-
571
- # Create a binary mask by thresholding the alpha channel
572
- binary_mask = alpha_channel > 1
573
-
574
- if invert:
575
- print("Invert")
576
- # Invert the binary mask so that the drawn shape is white and the rest is black
577
- binary_mask = np.invert(binary_mask)
578
-
579
- # Convert the binary mask to a 3-channel RGB mask
580
- rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
581
-
582
- # Convert the mask to uint8
583
- rgb_mask = rgb_mask.astype(np.uint8) * 255
584
-
585
- return img["background"], rgb_mask
586
-
587
-
588
- def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
589
-
590
- variant = None
591
- if token is True and not os.environ.get("HF_TOKEN"):
592
- token = None
593
-
594
- if model_type == "SDXL":
595
- info = model_info_data(
596
- repo_name,
597
- token=token,
598
- revision=revision,
599
- timeout=5.0,
600
- )
601
-
602
- filenames = {sibling.rfilename for sibling in info.siblings}
603
- model_filenames, variant_filenames = variant_compatible_siblings(
604
- filenames, variant="fp16"
605
- )
606
-
607
- if len(variant_filenames):
608
- variant = "fp16"
609
-
610
- if model_type == "FLUX":
611
- cached_folder = snapshot_download(
612
- repo_id=repo_name,
613
- allow_patterns="transformer/*"
614
- )
615
- else:
616
- cached_folder = DiffusionPipeline.download(
617
- pretrained_model_name=repo_name,
618
- force_download=False,
619
- token=token,
620
- revision=revision,
621
- # mirror="https://hf-mirror.com",
622
- variant=variant,
623
- use_safetensors=True,
624
- trust_remote_code=False,
625
- timeout=5.0,
626
- )
627
-
628
- if isinstance(cached_folder, PosixPath):
629
- cached_folder = cached_folder.as_posix()
630
-
631
- # Task model
632
- # from huggingface_hub import hf_hub_download
633
- # hf_hub_download(
634
- # task_model,
635
- # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
636
- # )
637
-
638
- return cached_folder
639
-
640
-
641
- def get_folder_size_gb(folder_path):
642
- result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
643
-
644
- total_size_kb = int(result.stdout.split()[0])
645
- total_size_gb = total_size_kb / (1024 ** 2)
646
-
647
- return total_size_gb
648
-
649
-
650
- def get_used_storage_gb(path_storage=STORAGE_ROOT):
651
- try:
652
- used_gb = get_folder_size_gb(path_storage)
653
- print(f"Used Storage: {used_gb:.2f} GB")
654
- except Exception as e:
655
- used_gb = 999
656
- print(f"Error while retrieving the used storage: {e}.")
657
-
658
- return used_gb
659
-
660
-
661
- def delete_model(removal_candidate):
662
- print(f"Removing: {removal_candidate}")
663
-
664
- if os.path.exists(removal_candidate):
665
- os.remove(removal_candidate)
666
- else:
667
- diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
668
- if os.path.isdir(diffusers_model):
669
- shutil.rmtree(diffusers_model)
670
-
671
-
672
- def clear_hf_cache():
673
- """
674
- Clears the entire Hugging Face cache at ~/.cache/huggingface.
675
- Hugging Face will re-download models as needed later.
676
- """
677
- try:
678
- if os.path.exists(CACHE_HF):
679
- shutil.rmtree(CACHE_HF, ignore_errors=True)
680
- print(f"Hugging Face cache cleared: {CACHE_HF}")
681
- else:
682
- print(f"No Hugging Face cache found at: {CACHE_HF}")
683
- except Exception as e:
684
- print(f"Error clearing Hugging Face cache: {e}")
685
-
686
-
687
- def progress_step_bar(step, total):
688
- # Calculate the percentage for the progress bar width
689
- percentage = min(100, ((step / total) * 100))
690
-
691
- return f"""
692
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
693
- <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
694
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
695
- {int(percentage)}%
696
- </div>
697
- </div>
698
- """
699
-
700
-
701
- def html_template_message(msg):
702
- return f"""
703
- <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
704
- <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
705
- <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
706
- {msg}
707
- </div>
708
- </div>
709
- """
710
-
711
-
712
- def escape_html(text):
713
- """Escapes HTML special characters in the input text."""
714
- return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")
 
1
+ import os
2
+ import re
3
+ import gradio as gr
4
+ from constants import (
5
+ DIFFUSERS_FORMAT_LORAS,
6
+ CIVITAI_API_KEY,
7
+ HF_TOKEN,
8
+ MODEL_TYPE_CLASS,
9
+ DIRECTORY_LORAS,
10
+ DIRECTORY_MODELS,
11
+ DIFFUSECRAFT_CHECKPOINT_NAME,
12
+ CACHE_HF,
13
+ STORAGE_ROOT,
14
+ )
15
+ from huggingface_hub import HfApi
16
+ from huggingface_hub import snapshot_download
17
+ from diffusers import DiffusionPipeline
18
+ from huggingface_hub import model_info as model_info_data
19
+ from diffusers.pipelines.pipeline_loading_utils import variant_compatible_siblings
20
+ from stablepy.diffusers_vanilla.utils import checkpoint_model_type
21
+ from pathlib import PosixPath
22
+ from unidecode import unidecode
23
+ import urllib.parse
24
+ import copy
25
+ import requests
26
+ from requests.adapters import HTTPAdapter
27
+ from urllib3.util import Retry
28
+ import shutil
29
+ import subprocess
30
+
31
+ USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:127.0) Gecko/20100101 Firefox/127.0'
32
+
33
+
34
+ def request_json_data(url):
35
+ model_version_id = url.split('/')[-1]
36
+ if "?modelVersionId=" in model_version_id:
37
+ match = re.search(r'modelVersionId=(\d+)', url)
38
+ model_version_id = match.group(1)
39
+
40
+ endpoint_url = f"https://civitai.com/api/v1/model-versions/{model_version_id}"
41
+
42
+ params = {}
43
+ headers = {'User-Agent': USER_AGENT, 'content-type': 'application/json'}
44
+ session = requests.Session()
45
+ retries = Retry(total=5, backoff_factor=1, status_forcelist=[500, 502, 503, 504])
46
+ session.mount("https://", HTTPAdapter(max_retries=retries))
47
+
48
+ try:
49
+ result = session.get(endpoint_url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
50
+ result.raise_for_status()
51
+ json_data = result.json()
52
+ return json_data if json_data else None
53
+ except Exception as e:
54
+ print(f"Error: {e}")
55
+ return None
56
+
57
+
58
+ class ModelInformation:
59
+ def __init__(self, json_data):
60
+ self.model_version_id = json_data.get("id", "")
61
+ self.model_id = json_data.get("modelId", "")
62
+ self.download_url = json_data.get("downloadUrl", "")
63
+ self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
+ self.filename_url = next(
65
+ (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "") and v.get("type", "Model") == "Model"), ""
66
+ )
67
+ self.filename_url = self.filename_url if self.filename_url else ""
68
+ self.description = json_data.get("description", "")
69
+ if self.description is None: self.description = ""
70
+ self.model_name = json_data.get("model", {}).get("name", "")
71
+ self.model_type = json_data.get("model", {}).get("type", "")
72
+ self.nsfw = json_data.get("model", {}).get("nsfw", False)
73
+ self.poi = json_data.get("model", {}).get("poi", False)
74
+ self.images = [img.get("url", "") for img in json_data.get("images", [])]
75
+ self.example_prompt = json_data.get("trainedWords", [""])[0] if json_data.get("trainedWords") else ""
76
+ self.original_json = copy.deepcopy(json_data)
77
+
78
+
79
+ def retrieve_model_info(url):
80
+ json_data = request_json_data(url)
81
+ if not json_data:
82
+ return None
83
+ model_descriptor = ModelInformation(json_data)
84
+ return model_descriptor
85
+
86
+
87
+ def download_things(directory, url, hf_token="", civitai_api_key="", romanize=False):
88
+ url = url.strip()
89
+ downloaded_file_path = None
90
+
91
+ if "drive.google.com" in url:
92
+ original_dir = os.getcwd()
93
+ os.chdir(directory)
94
+ os.system(f"gdown --fuzzy {url}")
95
+ os.chdir(original_dir)
96
+ elif "huggingface.co" in url:
97
+ url = url.replace("?download=true", "")
98
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
99
+ if "/blob/" in url:
100
+ url = url.replace("/blob/", "/resolve/")
101
+ user_header = f'"Authorization: Bearer {hf_token}"'
102
+
103
+ filename = unidecode(url.split('/')[-1]) if romanize else url.split('/')[-1]
104
+
105
+ if hf_token:
106
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
107
+ else:
108
+ os.system(f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {filename}")
109
+
110
+ downloaded_file_path = os.path.join(directory, filename)
111
+
112
+ elif "civitai.com" in url:
113
+
114
+ if not civitai_api_key:
115
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
116
+
117
+ model_profile = retrieve_model_info(url)
118
+ if (
119
+ model_profile is not None
120
+ and model_profile.download_url
121
+ and model_profile.filename_url
122
+ ):
123
+ url = model_profile.download_url
124
+ filename = unidecode(model_profile.filename_url) if romanize else model_profile.filename_url
125
+ else:
126
+ if "?" in url:
127
+ url = url.split("?")[0]
128
+ filename = ""
129
+
130
+ url_dl = url + f"?token={civitai_api_key}"
131
+ print(f"Filename: {filename}")
132
+
133
+ param_filename = ""
134
+ if filename:
135
+ param_filename = f"-o '{filename}'"
136
+
137
+ aria2_command = (
138
+ f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
139
+ f'-k 1M -s 16 -d "{directory}" {param_filename} "{url_dl}"'
140
+ )
141
+ os.system(aria2_command)
142
+
143
+ if param_filename and os.path.exists(os.path.join(directory, filename)):
144
+ downloaded_file_path = os.path.join(directory, filename)
145
+
146
+ # # PLAN B
147
+ # # Follow the redirect to get the actual download URL
148
+ # curl_command = (
149
+ # f'curl -L -sI --connect-timeout 5 --max-time 5 '
150
+ # f'-H "Content-Type: application/json" '
151
+ # f'-H "Authorization: Bearer {civitai_api_key}" "{url}"'
152
+ # )
153
+
154
+ # headers = os.popen(curl_command).read()
155
+
156
+ # # Look for the redirected "Location" URL
157
+ # location_match = re.search(r'location: (.+)', headers, re.IGNORECASE)
158
+
159
+ # if location_match:
160
+ # redirect_url = location_match.group(1).strip()
161
+
162
+ # # Extract the filename from the redirect URL's "Content-Disposition"
163
+ # filename_match = re.search(r'filename%3D%22(.+?)%22', redirect_url)
164
+ # if filename_match:
165
+ # encoded_filename = filename_match.group(1)
166
+ # # Decode the URL-encoded filename
167
+ # decoded_filename = urllib.parse.unquote(encoded_filename)
168
+
169
+ # filename = unidecode(decoded_filename) if romanize else decoded_filename
170
+ # print(f"Filename: {filename}")
171
+
172
+ # aria2_command = (
173
+ # f'aria2c --console-log-level=error --summary-interval=10 -c -x 16 '
174
+ # f'-k 1M -s 16 -d "{directory}" -o "{filename}" "{redirect_url}"'
175
+ # )
176
+ # return_code = os.system(aria2_command)
177
+
178
+ # # if return_code != 0:
179
+ # # raise RuntimeError(f"Failed to download file: {filename}. Error code: {return_code}")
180
+ # downloaded_file_path = os.path.join(directory, filename)
181
+ # if not os.path.exists(downloaded_file_path):
182
+ # downloaded_file_path = None
183
+
184
+ # if not downloaded_file_path:
185
+ # # Old method
186
+ # if "?" in url:
187
+ # url = url.split("?")[0]
188
+ # url = url + f"?token={civitai_api_key}"
189
+ # os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
190
+
191
+ else:
192
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
193
+
194
+ return downloaded_file_path
195
+
196
+
197
+ def get_model_list(directory_path):
198
+ model_list = []
199
+ valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
200
+
201
+ for filename in os.listdir(directory_path):
202
+ if os.path.splitext(filename)[1] in valid_extensions:
203
+ # name_without_extension = os.path.splitext(filename)[0]
204
+ file_path = os.path.join(directory_path, filename)
205
+ # model_list.append((name_without_extension, file_path))
206
+ model_list.append(file_path)
207
+ print('\033[34mFILE: ' + file_path + '\033[0m')
208
+ return model_list
209
+
210
+
211
+ def extract_parameters(input_string):
212
+ parameters = {}
213
+ input_string = input_string.replace("\n", "")
214
+
215
+ if "Negative prompt:" not in input_string:
216
+ if "Steps:" in input_string:
217
+ input_string = input_string.replace("Steps:", "Negative prompt: Steps:")
218
+ else:
219
+ print("Invalid metadata")
220
+ parameters["prompt"] = input_string
221
+ return parameters
222
+
223
+ parm = input_string.split("Negative prompt:")
224
+ parameters["prompt"] = parm[0].strip()
225
+ if "Steps:" not in parm[1]:
226
+ print("Steps not detected")
227
+ parameters["neg_prompt"] = parm[1].strip()
228
+ return parameters
229
+ parm = parm[1].split("Steps:")
230
+ parameters["neg_prompt"] = parm[0].strip()
231
+ input_string = "Steps:" + parm[1]
232
+
233
+ # Extracting Steps
234
+ steps_match = re.search(r'Steps: (\d+)', input_string)
235
+ if steps_match:
236
+ parameters['Steps'] = int(steps_match.group(1))
237
+
238
+ # Extracting Size
239
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
240
+ if size_match:
241
+ parameters['Size'] = size_match.group(1)
242
+ width, height = map(int, parameters['Size'].split('x'))
243
+ parameters['width'] = width
244
+ parameters['height'] = height
245
+
246
+ # Extracting other parameters
247
+ other_parameters = re.findall(r'([^,:]+): (.*?)(?=, [^,:]+:|$)', input_string)
248
+ for param in other_parameters:
249
+ parameters[param[0].strip()] = param[1].strip('"')
250
+
251
+ return parameters
252
+
253
+
254
+ def get_my_lora(link_url, romanize):
255
+ l_name = ""
256
+ for url in [url.strip() for url in link_url.split(',')]:
257
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
258
+ l_name = download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY, romanize)
259
+ new_lora_model_list = get_model_list(DIRECTORY_LORAS)
260
+ new_lora_model_list.insert(0, "None")
261
+ new_lora_model_list = new_lora_model_list + DIFFUSERS_FORMAT_LORAS
262
+ msg_lora = "Downloaded"
263
+ if l_name:
264
+ msg_lora += f": <b>{l_name}</b>"
265
+ print(msg_lora)
266
+
267
+ return gr.update(
268
+ choices=new_lora_model_list
269
+ ), gr.update(
270
+ choices=new_lora_model_list
271
+ ), gr.update(
272
+ choices=new_lora_model_list
273
+ ), gr.update(
274
+ choices=new_lora_model_list
275
+ ), gr.update(
276
+ choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
+ ), gr.update(
282
+ value=msg_lora
283
+ )
284
+
285
+
286
+ def info_html(json_data, title, subtitle):
287
+ return f"""
288
+ <div style='padding: 0; border-radius: 10px;'>
289
+ <p style='margin: 0; font-weight: bold;'>{title}</p>
290
+ <details>
291
+ <summary>Details</summary>
292
+ <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
293
+ </details>
294
+ </div>
295
+ """
296
+
297
+
298
+ def get_model_type(repo_id: str):
299
+ api = HfApi(token=os.environ.get("HF_TOKEN")) # if use private or gated model
300
+ default = "SD 1.5"
301
+ try:
302
+ if os.path.exists(repo_id):
303
+ tag, _, _, _ = checkpoint_model_type(repo_id)
304
+ return DIFFUSECRAFT_CHECKPOINT_NAME[tag]
305
+ else:
306
+ model = api.model_info(repo_id=repo_id, timeout=5.0)
307
+ tags = model.tags
308
+ for tag in tags:
309
+ if tag in MODEL_TYPE_CLASS.keys(): return MODEL_TYPE_CLASS.get(tag, default)
310
+
311
+ except Exception:
312
+ return default
313
+ return default
314
+
315
+
316
+ def restart_space(repo_id: str, factory_reboot: bool):
317
+ api = HfApi(token=os.environ.get("HF_TOKEN"))
318
+ try:
319
+ runtime = api.get_space_runtime(repo_id=repo_id)
320
+ if runtime.stage == "RUNNING":
321
+ api.restart_space(repo_id=repo_id, factory_reboot=factory_reboot)
322
+ print(f"Restarting space: {repo_id}")
323
+ else:
324
+ print(f"Space {repo_id} is in stage: {runtime.stage}")
325
+ except Exception as e:
326
+ print(e)
327
+
328
+
329
+ def extract_exif_data(image):
330
+ if image is None:
331
+ return ""
332
+
333
+ try:
334
+ metadata_keys = ['parameters', 'metadata', 'prompt', 'Comment']
335
+
336
+ for key in metadata_keys:
337
+ if key in image.info:
338
+ return image.info[key]
339
+
340
+ return str(image.info)
341
+
342
+ except Exception as e:
343
+ return f"Error extracting metadata: {str(e)}"
344
+
345
+
346
+ def create_mask_now(img, invert):
347
+ import numpy as np
348
+ import time
349
+
350
+ time.sleep(0.5)
351
+
352
+ transparent_image = img["layers"][0]
353
+
354
+ # Extract the alpha channel
355
+ alpha_channel = np.array(transparent_image)[:, :, 3]
356
+
357
+ # Create a binary mask by thresholding the alpha channel
358
+ binary_mask = alpha_channel > 1
359
+
360
+ if invert:
361
+ print("Invert")
362
+ # Invert the binary mask so that the drawn shape is white and the rest is black
363
+ binary_mask = np.invert(binary_mask)
364
+
365
+ # Convert the binary mask to a 3-channel RGB mask
366
+ rgb_mask = np.stack((binary_mask,) * 3, axis=-1)
367
+
368
+ # Convert the mask to uint8
369
+ rgb_mask = rgb_mask.astype(np.uint8) * 255
370
+
371
+ return img["background"], rgb_mask
372
+
373
+
374
+ def download_diffuser_repo(repo_name: str, model_type: str, revision: str = "main", token=True):
375
+
376
+ variant = None
377
+ if token is True and not os.environ.get("HF_TOKEN"):
378
+ token = None
379
+
380
+ if model_type == "SDXL":
381
+ info = model_info_data(
382
+ repo_name,
383
+ token=token,
384
+ revision=revision,
385
+ timeout=5.0,
386
+ )
387
+
388
+ filenames = {sibling.rfilename for sibling in info.siblings}
389
+ model_filenames, variant_filenames = variant_compatible_siblings(
390
+ filenames, variant="fp16"
391
+ )
392
+
393
+ if len(variant_filenames):
394
+ variant = "fp16"
395
+
396
+ if model_type == "FLUX":
397
+ cached_folder = snapshot_download(
398
+ repo_id=repo_name,
399
+ allow_patterns="transformer/*"
400
+ )
401
+ else:
402
+ cached_folder = DiffusionPipeline.download(
403
+ pretrained_model_name=repo_name,
404
+ force_download=False,
405
+ token=token,
406
+ revision=revision,
407
+ # mirror="https://hf-mirror.com",
408
+ variant=variant,
409
+ use_safetensors=True,
410
+ trust_remote_code=False,
411
+ timeout=5.0,
412
+ )
413
+
414
+ if isinstance(cached_folder, PosixPath):
415
+ cached_folder = cached_folder.as_posix()
416
+
417
+ # Task model
418
+ # from huggingface_hub import hf_hub_download
419
+ # hf_hub_download(
420
+ # task_model,
421
+ # filename="diffusion_pytorch_model.safetensors", # fix fp16 variant
422
+ # )
423
+
424
+ return cached_folder
425
+
426
+
427
+ def get_folder_size_gb(folder_path):
428
+ result = subprocess.run(["du", "-s", folder_path], capture_output=True, text=True)
429
+
430
+ total_size_kb = int(result.stdout.split()[0])
431
+ total_size_gb = total_size_kb / (1024 ** 2)
432
+
433
+ return total_size_gb
434
+
435
+
436
+ def get_used_storage_gb():
437
+ try:
438
+ used_gb = get_folder_size_gb(STORAGE_ROOT)
439
+ print(f"Used Storage: {used_gb:.2f} GB")
440
+ except Exception as e:
441
+ used_gb = 999
442
+ print(f"Error while retrieving the used storage: {e}.")
443
+
444
+ return used_gb
445
+
446
+
447
+ def delete_model(removal_candidate):
448
+ print(f"Removing: {removal_candidate}")
449
+
450
+ if os.path.exists(removal_candidate):
451
+ os.remove(removal_candidate)
452
+ else:
453
+ diffusers_model = f"{CACHE_HF}{DIRECTORY_MODELS}--{removal_candidate.replace('/', '--')}"
454
+ if os.path.isdir(diffusers_model):
455
+ shutil.rmtree(diffusers_model)
456
+
457
+
458
+ def progress_step_bar(step, total):
459
+ # Calculate the percentage for the progress bar width
460
+ percentage = min(100, ((step / total) * 100))
461
+
462
+ return f"""
463
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
464
+ <div style="width: {percentage}%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
465
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 13px;">
466
+ {int(percentage)}%
467
+ </div>
468
+ </div>
469
+ """
470
+
471
+
472
+ def html_template_message(msg):
473
+ return f"""
474
+ <div style="position: relative; width: 100%; background-color: gray; border-radius: 5px; overflow: hidden;">
475
+ <div style="width: 0%; height: 17px; background-color: #800080; transition: width 0.5s;"></div>
476
+ <div style="position: absolute; width: 100%; text-align: center; color: white; top: 0; line-height: 19px; font-size: 14px; font-weight: bold; text-shadow: 1px 1px 2px black;">
477
+ {msg}
478
+ </div>
479
+ </div>
480
+ """
481
+
482
+
483
+ def escape_html(text):
484
+ """Escapes HTML special characters in the input text."""
485
+ return text.replace("<", "&lt;").replace(">", "&gt;").replace("\n", "<br>")