Spaces:
Runtime error
Runtime error
Commit
•
f9b09af
1
Parent(s):
44ed88e
Update app.py
Browse files
app.py
CHANGED
@@ -26,19 +26,19 @@ pipe = pipe.to(device)
|
|
26 |
def run(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
27 |
prompt = prompt.strip()
|
28 |
negative_prompt = negative_prompt.strip() if negative_prompt and negative_prompt.strip() else None
|
29 |
-
print("Initial seed
|
30 |
if(randomize_seed):
|
31 |
seed = random.randint(0, sys.maxsize)
|
32 |
|
33 |
if not prompt and not negative_prompt:
|
34 |
guidance_scale = 0.0
|
35 |
-
print("Seed before sending to generator:", seed)
|
36 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
37 |
image_pag = pipe(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]
|
38 |
|
39 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
40 |
image_normal = pipe(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=25).images[0]
|
41 |
-
print("Seed at the end of generation:", seed)
|
42 |
return (image_pag, image_normal), seed
|
43 |
|
44 |
css = '''
|
|
|
26 |
def run(prompt, negative_prompt=None, guidance_scale=7.0, pag_scale=3.0, pag_layers=["mid"], randomize_seed=True, seed=42, progress=gr.Progress(track_tqdm=True)):
|
27 |
prompt = prompt.strip()
|
28 |
negative_prompt = negative_prompt.strip() if negative_prompt and negative_prompt.strip() else None
|
29 |
+
print(f"Initial seed for prompt `{prompt}`", seed)
|
30 |
if(randomize_seed):
|
31 |
seed = random.randint(0, sys.maxsize)
|
32 |
|
33 |
if not prompt and not negative_prompt:
|
34 |
guidance_scale = 0.0
|
35 |
+
print(f"Seed before sending to generator for prompt: `{prompt}`", seed)
|
36 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
37 |
image_pag = pipe(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, pag_scale=pag_scale, pag_applied_layers=pag_layers, generator=generator, num_inference_steps=25).images[0]
|
38 |
|
39 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
40 |
image_normal = pipe(prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, generator=generator, num_inference_steps=25).images[0]
|
41 |
+
print(f"Seed at the end of generation for prompt: `{prompt}`", seed)
|
42 |
return (image_pag, image_normal), seed
|
43 |
|
44 |
css = '''
|