| | |
| | import torch |
| | import os |
| | from huggingface_hub import HfApi |
| | from pathlib import Path |
| | from diffusers.utils import load_image |
| | from transformers import AutoImageProcessor, UperNetForSemanticSegmentation |
| | from PIL import Image |
| | import numpy as np |
| |
|
| | from diffusers import ( |
| | ControlNetModel, |
| | StableDiffusionControlNetPipeline, |
| | UniPCMultistepScheduler, |
| | ) |
| | import sys |
| |
|
| | image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small") |
| | image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small") |
| |
|
| | checkpoint = sys.argv[1] |
| |
|
| | ada_palette = np.asarray([ |
| | [0, 0, 0], |
| | [120, 120, 120], |
| | [180, 120, 120], |
| | [6, 230, 230], |
| | [80, 50, 50], |
| | [4, 200, 3], |
| | [120, 120, 80], |
| | [140, 140, 140], |
| | [204, 5, 255], |
| | [230, 230, 230], |
| | [4, 250, 7], |
| | [224, 5, 255], |
| | [235, 255, 7], |
| | [150, 5, 61], |
| | [120, 120, 70], |
| | [8, 255, 51], |
| | [255, 6, 82], |
| | [143, 255, 140], |
| | [204, 255, 4], |
| | [255, 51, 7], |
| | [204, 70, 3], |
| | [0, 102, 200], |
| | [61, 230, 250], |
| | [255, 6, 51], |
| | [11, 102, 255], |
| | [255, 7, 71], |
| | [255, 9, 224], |
| | [9, 7, 230], |
| | [220, 220, 220], |
| | [255, 9, 92], |
| | [112, 9, 255], |
| | [8, 255, 214], |
| | [7, 255, 224], |
| | [255, 184, 6], |
| | [10, 255, 71], |
| | [255, 41, 10], |
| | [7, 255, 255], |
| | [224, 255, 8], |
| | [102, 8, 255], |
| | [255, 61, 6], |
| | [255, 194, 7], |
| | [255, 122, 8], |
| | [0, 255, 20], |
| | [255, 8, 41], |
| | [255, 5, 153], |
| | [6, 51, 255], |
| | [235, 12, 255], |
| | [160, 150, 20], |
| | [0, 163, 255], |
| | [140, 140, 140], |
| | [250, 10, 15], |
| | [20, 255, 0], |
| | [31, 255, 0], |
| | [255, 31, 0], |
| | [255, 224, 0], |
| | [153, 255, 0], |
| | [0, 0, 255], |
| | [255, 71, 0], |
| | [0, 235, 255], |
| | [0, 173, 255], |
| | [31, 0, 255], |
| | [11, 200, 200], |
| | [255, 82, 0], |
| | [0, 255, 245], |
| | [0, 61, 255], |
| | [0, 255, 112], |
| | [0, 255, 133], |
| | [255, 0, 0], |
| | [255, 163, 0], |
| | [255, 102, 0], |
| | [194, 255, 0], |
| | [0, 143, 255], |
| | [51, 255, 0], |
| | [0, 82, 255], |
| | [0, 255, 41], |
| | [0, 255, 173], |
| | [10, 0, 255], |
| | [173, 255, 0], |
| | [0, 255, 153], |
| | [255, 92, 0], |
| | [255, 0, 255], |
| | [255, 0, 245], |
| | [255, 0, 102], |
| | [255, 173, 0], |
| | [255, 0, 20], |
| | [255, 184, 184], |
| | [0, 31, 255], |
| | [0, 255, 61], |
| | [0, 71, 255], |
| | [255, 0, 204], |
| | [0, 255, 194], |
| | [0, 255, 82], |
| | [0, 10, 255], |
| | [0, 112, 255], |
| | [51, 0, 255], |
| | [0, 194, 255], |
| | [0, 122, 255], |
| | [0, 255, 163], |
| | [255, 153, 0], |
| | [0, 255, 10], |
| | [255, 112, 0], |
| | [143, 255, 0], |
| | [82, 0, 255], |
| | [163, 255, 0], |
| | [255, 235, 0], |
| | [8, 184, 170], |
| | [133, 0, 255], |
| | [0, 255, 92], |
| | [184, 0, 255], |
| | [255, 0, 31], |
| | [0, 184, 255], |
| | [0, 214, 255], |
| | [255, 0, 112], |
| | [92, 255, 0], |
| | [0, 224, 255], |
| | [112, 224, 255], |
| | [70, 184, 160], |
| | [163, 0, 255], |
| | [153, 0, 255], |
| | [71, 255, 0], |
| | [255, 0, 163], |
| | [255, 204, 0], |
| | [255, 0, 143], |
| | [0, 255, 235], |
| | [133, 255, 0], |
| | [255, 0, 235], |
| | [245, 0, 255], |
| | [255, 0, 122], |
| | [255, 245, 0], |
| | [10, 190, 212], |
| | [214, 255, 0], |
| | [0, 204, 255], |
| | [20, 0, 255], |
| | [255, 255, 0], |
| | [0, 153, 255], |
| | [0, 41, 255], |
| | [0, 255, 204], |
| | [41, 0, 255], |
| | [41, 255, 0], |
| | [173, 0, 255], |
| | [0, 245, 255], |
| | [71, 0, 255], |
| | [122, 0, 255], |
| | [0, 255, 184], |
| | [0, 92, 255], |
| | [184, 255, 0], |
| | [0, 133, 255], |
| | [255, 214, 0], |
| | [25, 194, 194], |
| | [102, 255, 0], |
| | [92, 0, 255], |
| | ]) |
| |
|
| | image = load_image("https://huggingface.co/lllyasviel/sd-controlnet-seg/resolve/main/images/house.png").convert('RGB') |
| |
|
| | prompt = "old house in stormy weather with rain and wind" |
| |
|
| | pixel_values = image_processor(image, return_tensors="pt").pixel_values |
| | with torch.no_grad(): |
| | outputs = image_segmentor(pixel_values) |
| | seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] |
| | color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) |
| | for label, color in enumerate(ada_palette): |
| | color_seg[seg == label, :] = color |
| | color_seg = color_seg.astype(np.uint8) |
| | image = Image.fromarray(color_seg) |
| |
|
| |
|
| | controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16) |
| | pipe = StableDiffusionControlNetPipeline.from_pretrained( |
| | "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 |
| | ) |
| |
|
| | pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) |
| | pipe.enable_model_cpu_offload() |
| |
|
| | generator = torch.manual_seed(0) |
| | out_image = pipe(prompt, num_inference_steps=30, generator=generator, image=image).images[0] |
| |
|
| | path = os.path.join(Path.home(), "images", "aa.png") |
| | out_image.save(path) |
| |
|
| | api = HfApi() |
| |
|
| | api.upload_file( |
| | path_or_fileobj=path, |
| | path_in_repo=path.split("/")[-1], |
| | repo_id="patrickvonplaten/images", |
| | repo_type="dataset", |
| | ) |
| | print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png") |
| |
|