Fill in this form to get access

BRIA 3.2 ControlNet-Generative-Fill requires access to BRIA 3.2 Text-to-Image model

Log in or Sign Up to review the conditions and access this model content.

BRIA-3.2-ControlNet-Generative-Fill, Model Card

BRIA 3.2 ControlNet-Generative-Fill, trained on the foundation of BRIA 3.2 Text-to-Image, enables the generation of high-quality images guided by a textual prompt and a mask. This allows for immersive addition and modification of objects in a given input image.

examples

Model Description

  • Developed by: BRIA AI

  • Model type: ControlNet for Latent diffusion

  • License: bria-3.2

  • Model Description: ControlNet Generative-Fill for BRIA 3.2 Text-to-Image model. The model inpaints objects guided by a textual prompt and a mask.

  • Resources for more information: BRIA AI

Usage

Installation

pip install -qr https://huggingface.co/briaai/bria-3.2-controlnet-generative-fill/resolve/main/requirements.txt

install the latest version of diffusers:

pip install git+https://github.com/huggingface/diffusers
from huggingface_hub import hf_hub_download
import os

try:
    local_dir = os.path.dirname(__file__)
except:
    local_dir = '.'
    
hf_hub_download(repo_id="briaai/BRIA-3.2-ControlNet-Generative-Fill", filename='controlnet_bria.py', local_dir=local_dir)
hf_hub_download(repo_id="briaai/BRIA-3.2-ControlNet-Generative-Fill", filename='pipeline_bria_controlnet_inpainting.py', local_dir=local_dir)

Run Inpainting script

from io import BytesIO
import requests
import torch
from PIL import Image
from PIL.Image import Image as ImageType
from controlnet_bria import BriaControlNetModel
from pipeline_bria_controlnet_inpainting import BriaControlNetInpaintingPipeline


GRANULARITY_VAL = 8 

def load_gpu_pipeline_from_models():
    transformer_path = "briaai/BRIA-3.2"
    
    print("Done getting transformer")
    print("Getting ControlNet")

    controlnet = BriaControlNetModel.from_pretrained("briaai/BRIA-3.2-ControlNet-Generative-Fill")
    print("Done getting controlnet")

    transformer_path = "briaai/BRIA-3.2"
    pipeline = BriaControlNetInpaintingPipeline.from_pretrained(transformer_path,
                                                                revision="pre_diffusers_support" ,
                                                                controlnet=controlnet, torch_dtype=torch.bfloat16,
                                                                trust_remote_code=True)

    pipeline = pipeline.to(device="cuda", dtype=torch.bfloat16)
    pipeline.enable_model_cpu_offload()
    
    return pipeline

def download_image(url):
    response = requests.get(url)
    return Image.open(BytesIO(response.content))



def resize_image_to_retain_ratio(image):
    pixel_number = 1024*1024
    ratio = image.size[0] / image.size[1]
    width = int((pixel_number * ratio) ** 0.5)
    width = width - (width % GRANULARITY_VAL)
    height = int(pixel_number / width)
    height = height - (height % GRANULARITY_VAL)

    image = image.resize((width, height))
    return image


def infer(pipeline: BriaControlNetInpaintingPipeline,
            input_image: ImageType,
            mask_image: ImageType,
            prompt: str,
            negative_prompt: str,
            num_inference_steps: int, 
            seed: int, 
            guidance_scale: float, 
            controlnet_conditioning_scale: float,
            ):
    generator = torch.Generator(device="cuda").manual_seed(seed)

    with torch.no_grad():
      res_image = self.pipeline(
          prompt=prompt,
          control_image=[image],
          num_inference_steps=num_inference_steps,
          generator=generator,
          guidance_scale=guidance_scale,
          height=image.size[1],
          width=image.size[0],
          negative_prompt=negative_prompt,
          max_sequence_length=128,
          control_mode=None,
          controlnet_conditioning_scale=self.eval_args.controlnet_conditioning_scale,
          mask=mask,
      ).images[0]
    return image_arr


gpu_pipeline = load_gpu_pipeline_from_models()
seed = 9871256

# call parameters we set for optimal results
num_inference_steps = 50
guidance_scale = 5.0
controlnet_conditioning_scale = 1.0    
Downloads last month
-
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support