Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
-
import
|
| 2 |
-
sys.path.append('./')
|
| 3 |
-
from PIL import Image
|
| 4 |
import gradio as gr
|
|
|
|
| 5 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
| 6 |
from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
|
| 7 |
from src.unet_hacked_tryon import UNet2DConditionModel
|
|
@@ -26,7 +25,6 @@ from preprocess.openpose.run_openpose import OpenPose
|
|
| 26 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
| 27 |
from torchvision.transforms.functional import to_pil_image
|
| 28 |
|
| 29 |
-
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
|
| 30 |
|
| 31 |
def pil_to_binary_mask(pil_image, threshold=0):
|
| 32 |
np_image = np.array(pil_image)
|
|
@@ -123,9 +121,9 @@ pipe = TryonPipeline.from_pretrained(
|
|
| 123 |
)
|
| 124 |
pipe.unet_encoder = UNet_Encoder
|
| 125 |
|
| 126 |
-
|
| 127 |
@spaces.GPU
|
| 128 |
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
|
|
|
| 129 |
|
| 130 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
| 131 |
pipe.to(device)
|
|
@@ -262,7 +260,7 @@ for ex_human in human_list_path:
|
|
| 262 |
|
| 263 |
image_blocks = gr.Blocks().queue()
|
| 264 |
with image_blocks as demo:
|
| 265 |
-
gr.Markdown("##
|
| 266 |
gr.Markdown("Virtual Try-on with your image and garment image. Check out the [source codes](https://github.com/yisol/IDM-VTON) and the [model](https://huggingface.co/yisol/IDM-VTON)")
|
| 267 |
with gr.Row():
|
| 268 |
with gr.Column():
|
|
@@ -313,3 +311,4 @@ with image_blocks as demo:
|
|
| 313 |
|
| 314 |
image_blocks.launch()
|
| 315 |
|
|
|
|
|
|
| 1 |
+
import spaces
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
+
from PIL import Image
|
| 4 |
from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
|
| 5 |
from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
|
| 6 |
from src.unet_hacked_tryon import UNet2DConditionModel
|
|
|
|
| 25 |
from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
|
| 26 |
from torchvision.transforms.functional import to_pil_image
|
| 27 |
|
|
|
|
| 28 |
|
| 29 |
def pil_to_binary_mask(pil_image, threshold=0):
|
| 30 |
np_image = np.array(pil_image)
|
|
|
|
| 121 |
)
|
| 122 |
pipe.unet_encoder = UNet_Encoder
|
| 123 |
|
|
|
|
| 124 |
@spaces.GPU
|
| 125 |
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
| 126 |
+
device = "cuda"
|
| 127 |
|
| 128 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
| 129 |
pipe.to(device)
|
|
|
|
| 260 |
|
| 261 |
image_blocks = gr.Blocks().queue()
|
| 262 |
with image_blocks as demo:
|
| 263 |
+
gr.Markdown("## IDM-VTON πππ")
|
| 264 |
gr.Markdown("Virtual Try-on with your image and garment image. Check out the [source codes](https://github.com/yisol/IDM-VTON) and the [model](https://huggingface.co/yisol/IDM-VTON)")
|
| 265 |
with gr.Row():
|
| 266 |
with gr.Column():
|
|
|
|
| 311 |
|
| 312 |
image_blocks.launch()
|
| 313 |
|
| 314 |
+
|