Update app.py
Browse files
app.py
CHANGED
|
@@ -30,6 +30,7 @@ global text_encoder
|
|
| 30 |
global tokenizer
|
| 31 |
global noise_scheduler
|
| 32 |
global network
|
|
|
|
| 33 |
device = "cuda:0"
|
| 34 |
generator = torch.Generator(device=device)
|
| 35 |
from gradio_imageslider import ImageSlider
|
|
@@ -110,9 +111,8 @@ def inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed):
|
|
| 110 |
|
| 111 |
|
| 112 |
|
| 113 |
-
# @spaces.GPU()
|
| 114 |
@torch.no_grad()
|
| 115 |
-
def edit_inference(
|
| 116 |
|
| 117 |
global device
|
| 118 |
global generator
|
|
@@ -125,9 +125,12 @@ def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_st
|
|
| 125 |
global pointy
|
| 126 |
global wavy
|
| 127 |
global large
|
|
|
|
|
|
|
| 128 |
|
| 129 |
original_weights = network.proj.clone()
|
| 130 |
|
|
|
|
| 131 |
#pad to same number of PCs
|
| 132 |
pcs_original = original_weights.shape[1]
|
| 133 |
pcs_edits = young.shape[1]
|
|
@@ -194,20 +197,21 @@ def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_st
|
|
| 194 |
#reset weights back to original
|
| 195 |
network.proj = torch.nn.Parameter(original_weights)
|
| 196 |
network.reset()
|
|
|
|
|
|
|
| 197 |
|
| 198 |
-
return [image]
|
| 199 |
-
|
| 200 |
-
# @spaces.GPU()
|
| 201 |
def sample_then_run():
|
|
|
|
| 202 |
sample_model()
|
| 203 |
prompt = "sks person"
|
| 204 |
negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
| 205 |
seed = 5
|
| 206 |
cfg = 3.0
|
| 207 |
steps = 50
|
| 208 |
-
|
| 209 |
torch.save(network.proj, "model.pt" )
|
| 210 |
-
|
|
|
|
| 211 |
|
| 212 |
|
| 213 |
global young
|
|
@@ -406,7 +410,7 @@ def file_upload(file):
|
|
| 406 |
|
| 407 |
|
| 408 |
|
| 409 |
-
|
| 410 |
|
| 411 |
|
| 412 |
|
|
@@ -438,11 +442,11 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 438 |
with gr.Column():
|
| 439 |
with gr.Row():
|
| 440 |
with gr.Column():
|
| 441 |
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
# invert_button = gr.Button("⏪ Invert")
|
| 447 |
with gr.Column():
|
| 448 |
image_slider = ImageSlider(position=0.5, type="pil", height=512, width=512)
|
|
@@ -507,10 +511,10 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 507 |
|
| 508 |
|
| 509 |
|
| 510 |
-
gr.Markdown("""<div style="text-align: justify;"> After sampling a new model or inverting, you can download the model below.""")
|
| 511 |
|
| 512 |
-
with gr.Row():
|
| 513 |
-
|
| 514 |
|
| 515 |
|
| 516 |
|
|
@@ -521,7 +525,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 521 |
# outputs = [image_slider, file_output])
|
| 522 |
|
| 523 |
|
| 524 |
-
|
| 525 |
|
| 526 |
# submit1.click(fn=inference,
|
| 527 |
# inputs=[prompt1, negative_prompt1, cfg1, steps1, seed1],
|
|
@@ -529,12 +533,18 @@ with gr.Blocks(css="style.css") as demo:
|
|
| 529 |
# submit1.click(fn=edit_inference,
|
| 530 |
# inputs=[input_image, prompt1, negative_prompt1, cfg1, steps1, seed1, injection_step, a1, a2, a3, a4],
|
| 531 |
# outputs=image_slider)
|
| 532 |
-
|
| 533 |
-
|
| 534 |
-
)
|
| 535 |
-
|
| 536 |
|
| 537 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 538 |
|
| 539 |
|
| 540 |
|
|
|
|
| 30 |
global tokenizer
|
| 31 |
global noise_scheduler
|
| 32 |
global network
|
| 33 |
+
global original_image
|
| 34 |
device = "cuda:0"
|
| 35 |
generator = torch.Generator(device=device)
|
| 36 |
from gradio_imageslider import ImageSlider
|
|
|
|
| 111 |
|
| 112 |
|
| 113 |
|
|
|
|
| 114 |
@torch.no_grad()
|
| 115 |
+
def edit_inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
|
| 116 |
|
| 117 |
global device
|
| 118 |
global generator
|
|
|
|
| 125 |
global pointy
|
| 126 |
global wavy
|
| 127 |
global large
|
| 128 |
+
global original_image
|
| 129 |
+
|
| 130 |
|
| 131 |
original_weights = network.proj.clone()
|
| 132 |
|
| 133 |
+
|
| 134 |
#pad to same number of PCs
|
| 135 |
pcs_original = original_weights.shape[1]
|
| 136 |
pcs_edits = young.shape[1]
|
|
|
|
| 197 |
#reset weights back to original
|
| 198 |
network.proj = torch.nn.Parameter(original_weights)
|
| 199 |
network.reset()
|
| 200 |
+
|
| 201 |
+
return (original_image, image)
|
| 202 |
|
|
|
|
|
|
|
|
|
|
| 203 |
def sample_then_run():
|
| 204 |
+
global original_image
|
| 205 |
sample_model()
|
| 206 |
prompt = "sks person"
|
| 207 |
negative_prompt = "low quality, blurry, unfinished, nudity, weapon"
|
| 208 |
seed = 5
|
| 209 |
cfg = 3.0
|
| 210 |
steps = 50
|
| 211 |
+
original_image = inference( prompt, negative_prompt, cfg, steps, seed)
|
| 212 |
torch.save(network.proj, "model.pt" )
|
| 213 |
+
|
| 214 |
+
return (original_image, original_image), "model.pt"
|
| 215 |
|
| 216 |
|
| 217 |
global young
|
|
|
|
| 410 |
|
| 411 |
|
| 412 |
|
| 413 |
+
|
| 414 |
|
| 415 |
|
| 416 |
|
|
|
|
| 442 |
with gr.Column():
|
| 443 |
with gr.Row():
|
| 444 |
with gr.Column():
|
| 445 |
+
sample = gr.Button("🎲 Sample New Model")
|
| 446 |
+
file_input = gr.File(label="Upload Model", container=True)
|
| 447 |
+
file_output = gr.File(label="Download Sampled Model", container=True, interactive=False)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
# invert_button = gr.Button("⏪ Invert")
|
| 451 |
with gr.Column():
|
| 452 |
image_slider = ImageSlider(position=0.5, type="pil", height=512, width=512)
|
|
|
|
| 511 |
|
| 512 |
|
| 513 |
|
| 514 |
+
# gr.Markdown("""<div style="text-align: justify;"> After sampling a new model or inverting, you can download the model below.""")
|
| 515 |
|
| 516 |
+
# with gr.Row():
|
| 517 |
+
# file_output = gr.File(label="Download Sampled Model", container=True, interactive=False)
|
| 518 |
|
| 519 |
|
| 520 |
|
|
|
|
| 525 |
# outputs = [image_slider, file_output])
|
| 526 |
|
| 527 |
|
| 528 |
+
sample.click(fn=sample_then_run, outputs=[image_slider, file_output])
|
| 529 |
|
| 530 |
# submit1.click(fn=inference,
|
| 531 |
# inputs=[prompt1, negative_prompt1, cfg1, steps1, seed1],
|
|
|
|
| 533 |
# submit1.click(fn=edit_inference,
|
| 534 |
# inputs=[input_image, prompt1, negative_prompt1, cfg1, steps1, seed1, injection_step, a1, a2, a3, a4],
|
| 535 |
# outputs=image_slider)
|
| 536 |
+
submit1.click(
|
| 537 |
+
fn=edit_inference, inputs=[ prompt1, negative_prompt1, cfg1, steps1, seed1, injection_step, a1, a2, a3, a4], outputs=image_slider)
|
| 538 |
+
file_input.change(fn=file_upload, inputs=file_input, outputs = input_image)
|
| 539 |
+
|
| 540 |
|
| 541 |
|
| 542 |
+
demo.queue().launch(share=True)
|
| 543 |
+
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
|
| 548 |
|
| 549 |
|
| 550 |
|