AP123 commited on
Commit
414790d
1 Parent(s): a95e4b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -37
app.py CHANGED
@@ -1,11 +1,13 @@
1
  import spaces
2
- from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, EulerAncestralDiscreteScheduler
3
- import torch
4
- import gradio as gr
5
  from PIL import Image
 
6
  import numpy as np
 
 
 
7
 
8
- # Load models
9
  controlnet = ControlNetModel.from_pretrained(
10
  "briaai/BRIA-2.2-ControlNet-Recoloring",
11
  torch_dtype=torch.float16
@@ -19,7 +21,6 @@ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
19
  low_cpu_mem_usage=True,
20
  offload_state_dict=True,
21
  ).to('cuda')
22
-
23
  pipe.scheduler = EulerAncestralDiscreteScheduler(
24
  beta_start=0.00085,
25
  beta_end=0.012,
@@ -27,52 +28,67 @@ pipe.scheduler = EulerAncestralDiscreteScheduler(
27
  num_train_timesteps=1000,
28
  steps_offset=1
29
  )
30
-
 
31
  pipe.force_zeros_for_empty_prompt = False
32
 
33
  def resize_image(image):
34
  image = image.convert('RGB')
35
  current_size = image.size
36
- transform = gr.Image(height=1024, width=1024, keep_aspect_ratio=True, source="upload", tool="editor")
37
- resized_image = transform.postprocess(image)
 
 
 
38
  return resized_image
39
 
40
- @spaces.GPU(enable_queue=True)
41
- def generate_image(input_image, prompt, controlnet_conditioning_scale):
42
- # Always use a random seed for diversity in outputs
43
- seed = np.random.randint(2147483647)
44
- generator = torch.Generator("cuda").manual_seed(seed)
45
-
46
- # Resize and prepare the image
47
- input_image = resize_image(input_image)
48
- grayscale_image = input_image.convert('L').convert('RGB')
49
-
50
- # Generate the image with fixed 30 steps
51
  images = pipe(
52
- prompt=prompt,
53
- image=grayscale_image,
54
- num_inference_steps=30,
55
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
56
- generator=generator,
57
  ).images
 
 
 
 
 
 
 
58
 
59
- return images[0]
 
 
60
 
61
- # Gradio
62
- description = "Anything to Anything. Transform anything to anything with just a prompt!"
 
63
 
64
- with gr.Blocks() as demo:
65
- gr.Markdown("<h1><center>Image Transformation with Bria Recolor ControlNet</center></h1>")
66
- gr.Markdown(description)
 
 
 
 
 
 
67
  with gr.Row():
68
  with gr.Column():
69
- input_image = gr.Image(label='Upload your image', type="pil")
70
- prompt = gr.Textbox(label='Enter your prompt')
 
 
71
  controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
72
- submit_button = gr.Button('Transform Image')
 
 
 
73
  with gr.Column():
74
- output_image = gr.Image(label='Transformed Image')
75
-
76
- submit_button.click(fn=generate_image, inputs=[input_image, prompt, controlnet_conditioning_scale], outputs=output_image)
77
 
78
- demo.queue().launch()
 
1
  import spaces
2
+ from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
3
+ from diffusers.utils import load_image
 
4
  from PIL import Image
5
+ import torch
6
  import numpy as np
7
+ import cv2
8
+ import gradio as gr
9
+ from torchvision import transforms
10
 
 
11
  controlnet = ControlNetModel.from_pretrained(
12
  "briaai/BRIA-2.2-ControlNet-Recoloring",
13
  torch_dtype=torch.float16
 
21
  low_cpu_mem_usage=True,
22
  offload_state_dict=True,
23
  ).to('cuda')
 
24
  pipe.scheduler = EulerAncestralDiscreteScheduler(
25
  beta_start=0.00085,
26
  beta_end=0.012,
 
28
  num_train_timesteps=1000,
29
  steps_offset=1
30
  )
31
+ # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
32
+ # pipe.enable_xformers_memory_efficient_attention()
33
  pipe.force_zeros_for_empty_prompt = False
34
 
35
  def resize_image(image):
36
  image = image.convert('RGB')
37
  current_size = image.size
38
+ if current_size[0] > current_size[1]:
39
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[1], current_size[1]))
40
+ else:
41
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[0], current_size[0]))
42
+ resized_image = transforms.functional.resize(center_cropped_image, (1024, 1024))
43
  return resized_image
44
 
45
+
46
+ @spaces.GPU
47
+ def generate_(prompt, negative_prompt, grayscale_image, num_steps, controlnet_conditioning_scale, seed):
48
+ generator = torch.Generator("cuda").manual_seed(seed)
 
 
 
 
 
 
 
49
  images = pipe(
50
+ prompt, negative_prompt=negative_prompt, image=grayscale_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
51
+ generator=generator,
 
 
 
52
  ).images
53
+ return images
54
+
55
+ @spaces.GPU
56
+ def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
57
+
58
+ # resize input_image to 1024x1024
59
+ input_image = resize_image(input_image)
60
 
61
+ grayscale_image = input_image.convert('L').convert('RGB')
62
+
63
+ images = generate_(prompt, negative_prompt, grayscale_image, num_steps, controlnet_conditioning_scale, seed)
64
 
65
+ return [grayscale_image,images[0]]
66
+
67
+ block = gr.Blocks().queue()
68
 
69
+ with block:
70
+ gr.Markdown("## BRIA 2.2 ControlNet Recoloring")
71
+ gr.HTML('''
72
+ <p style="margin-bottom: 10px; font-size: 94%">
73
+ This is a demo for ControlNet Recoloring that using
74
+ <a href="https://huggingface.co/briaai/BRIA-2.2" target="_blank">BRIA 2.2 text-to-image model</a> as backbone.
75
+ Trained on licensed data, BRIA 2.2 provide full legal liability coverage for copyright and privacy infringement.
76
+ </p>
77
+ ''')
78
  with gr.Row():
79
  with gr.Column():
80
+ input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
81
+ prompt = gr.Textbox(label="Prompt")
82
+ negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
83
+ num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
84
  controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
85
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
86
+ run_button = gr.Button(value="Run")
87
+
88
+
89
  with gr.Column():
90
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery", columns=[2], height='auto')
91
+ ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
92
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
93
 
94
+ block.launch(debug = True)