Spaces:
Running
on
Zero
Running
on
Zero
v3p4
Browse files
app.py
CHANGED
|
@@ -13,8 +13,6 @@ from datetime import datetime
|
|
| 13 |
from diffusers.models import AutoencoderKL
|
| 14 |
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
|
| 15 |
|
| 16 |
-
# ... (keep the existing imports and configurations)
|
| 17 |
-
|
| 18 |
logging.basicConfig(level=logging.INFO)
|
| 19 |
logger = logging.getLogger(__name__)
|
| 20 |
|
|
@@ -40,7 +38,6 @@ torch.backends.cudnn.benchmark = False
|
|
| 40 |
|
| 41 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 42 |
|
| 43 |
-
|
| 44 |
def load_pipeline(model_name):
|
| 45 |
vae = AutoencoderKL.from_pretrained(
|
| 46 |
"madebyollin/sdxl-vae-fp16-fix",
|
|
@@ -66,8 +63,6 @@ def load_pipeline(model_name):
|
|
| 66 |
pipe.to(device)
|
| 67 |
return pipe
|
| 68 |
|
| 69 |
-
|
| 70 |
-
# Add a new function to parse and validate JSON input
|
| 71 |
def parse_json_parameters(json_str):
|
| 72 |
try:
|
| 73 |
params = json.loads(json_str)
|
|
@@ -81,7 +76,6 @@ def parse_json_parameters(json_str):
|
|
| 81 |
except Exception as e:
|
| 82 |
raise ValueError(f"Error parsing JSON: {str(e)}")
|
| 83 |
|
| 84 |
-
# Modify the generate function to accept JSON parameters
|
| 85 |
@spaces.GPU
|
| 86 |
def generate(
|
| 87 |
prompt: str,
|
|
@@ -206,7 +200,7 @@ generation_history = []
|
|
| 206 |
|
| 207 |
# Function to update the history dropdown
|
| 208 |
def update_history_dropdown():
|
| 209 |
-
return
|
| 210 |
|
| 211 |
# Modify the generate function to add results to the history
|
| 212 |
def generate_and_update_history(*args, **kwargs):
|
|
@@ -442,4 +436,4 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 442 |
outputs=[history_image, history_metadata],
|
| 443 |
)
|
| 444 |
|
| 445 |
-
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|
|
|
|
| 13 |
from diffusers.models import AutoencoderKL
|
| 14 |
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
|
| 15 |
|
|
|
|
|
|
|
| 16 |
logging.basicConfig(level=logging.INFO)
|
| 17 |
logger = logging.getLogger(__name__)
|
| 18 |
|
|
|
|
| 38 |
|
| 39 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 40 |
|
|
|
|
| 41 |
def load_pipeline(model_name):
|
| 42 |
vae = AutoencoderKL.from_pretrained(
|
| 43 |
"madebyollin/sdxl-vae-fp16-fix",
|
|
|
|
| 63 |
pipe.to(device)
|
| 64 |
return pipe
|
| 65 |
|
|
|
|
|
|
|
| 66 |
def parse_json_parameters(json_str):
|
| 67 |
try:
|
| 68 |
params = json.loads(json_str)
|
|
|
|
| 76 |
except Exception as e:
|
| 77 |
raise ValueError(f"Error parsing JSON: {str(e)}")
|
| 78 |
|
|
|
|
| 79 |
@spaces.GPU
|
| 80 |
def generate(
|
| 81 |
prompt: str,
|
|
|
|
| 200 |
|
| 201 |
# Function to update the history dropdown
|
| 202 |
def update_history_dropdown():
|
| 203 |
+
return [f"{item['prompt']} ({item['timestamp']})" for item in generation_history]
|
| 204 |
|
| 205 |
# Modify the generate function to add results to the history
|
| 206 |
def generate_and_update_history(*args, **kwargs):
|
|
|
|
| 436 |
outputs=[history_image, history_metadata],
|
| 437 |
)
|
| 438 |
|
| 439 |
+
demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)
|