Add a Guidance Scale parameter

#2
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -50,26 +50,27 @@ with gr.Blocks() as demo:
50
  # steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
51
  # eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
52
  prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
 
53
  seed = gr.Number(label="Seed", value=3413, interactive=True)
54
  btn = gr.Button(value="run")
55
  with gr.Column():
56
  output = gr.Gallery(height=1024)
57
 
58
  @spaces.GPU
59
- def process_image(num_images, height, width, prompt, seed):
60
  global pipe
61
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
62
  return pipe(
63
  prompt=[prompt]*num_images,
64
  generator=torch.Generator().manual_seed(int(seed)),
65
  num_inference_steps=1,
66
- guidance_scale=0.,
67
  height=int(height),
68
  width=int(width),
69
  timesteps=[800]
70
  ).images
71
 
72
- reactive_controls = [num_images, height, width, prompt, seed]
73
 
74
  # for control in reactive_controls:
75
  # control.change(fn=process_image, inputs=reactive_controls, outputs=[output])
 
50
  # steps = gr.Slider(label="Inference Steps", minimum=1, maximum=8, step=1, value=1, interactive=True)
51
  # eta = gr.Number(label="Eta (Corresponds to parameter eta (η) in the DDIM paper, i.e. 0.0 eqauls DDIM, 1.0 equals LCM)", value=1., interactive=True)
52
  prompt = gr.Text(label="Prompt", value="a photo of a cat", interactive=True)
53
+ guidance_scale = gr.Slider(minimum = 0, maximum = 13, value = 0, step = 0.1, label = "Classifier-Free Guidance Scale", info = "lower=image quality, higher=follow the prompt", interactive=True)
54
  seed = gr.Number(label="Seed", value=3413, interactive=True)
55
  btn = gr.Button(value="run")
56
  with gr.Column():
57
  output = gr.Gallery(height=1024)
58
 
59
  @spaces.GPU
60
+ def process_image(num_images, height, width, prompt, guidance_scale, seed):
61
  global pipe
62
  with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
63
  return pipe(
64
  prompt=[prompt]*num_images,
65
  generator=torch.Generator().manual_seed(int(seed)),
66
  num_inference_steps=1,
67
+ guidance_scale=guidance_scale,
68
  height=int(height),
69
  width=int(width),
70
  timesteps=[800]
71
  ).images
72
 
73
+ reactive_controls = [num_images, height, width, prompt, guidance_scale, seed]
74
 
75
  # for control in reactive_controls:
76
  # control.change(fn=process_image, inputs=reactive_controls, outputs=[output])