patrickvonplaten
commited on
Commit
•
a57626b
1
Parent(s):
c9c864b
add pipeline and give image a 2nd try
Browse files- generated_image_pipeline.png +0 -0
- generated_image_unrolled.png +0 -0
- run.py +27 -2
generated_image_pipeline.png
ADDED
generated_image_unrolled.png
ADDED
run.py
CHANGED
@@ -5,6 +5,8 @@ import PIL.Image
|
|
5 |
import numpy as np
|
6 |
import tqdm
|
7 |
|
|
|
|
|
8 |
# load all models
|
9 |
unet = UNetUnconditionalModel.from_pretrained("./", subfolder="unet")
|
10 |
vqvae = VQModel.from_pretrained("./", subfolder="vqvae")
|
@@ -44,8 +46,31 @@ with torch.no_grad():
|
|
44 |
|
45 |
# process image
|
46 |
image_processed = image.cpu().permute(0, 2, 3, 1)
|
47 |
-
image_processed =
|
48 |
image_processed = image_processed.numpy().astype(np.uint8)
|
49 |
image_pil = PIL.Image.fromarray(image_processed[0])
|
50 |
|
51 |
-
image_pil.save("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
import numpy as np
|
6 |
import tqdm
|
7 |
|
8 |
+
# 1. Unroll the full loop
|
9 |
+
# ==================================================================
|
10 |
# load all models
|
11 |
unet = UNetUnconditionalModel.from_pretrained("./", subfolder="unet")
|
12 |
vqvae = VQModel.from_pretrained("./", subfolder="vqvae")
|
|
|
46 |
|
47 |
# process image
|
48 |
image_processed = image.cpu().permute(0, 2, 3, 1)
|
49 |
+
image_processed = image_processed * 255.
|
50 |
image_processed = image_processed.numpy().astype(np.uint8)
|
51 |
image_pil = PIL.Image.fromarray(image_processed[0])
|
52 |
|
53 |
+
image_pil.save("generated_image_unrolled.png")
|
54 |
+
|
55 |
+
|
56 |
+
# 2. Use pipeline
|
57 |
+
# ==================================================================
|
58 |
+
from diffusers import LatentDiffusionUncondPipeline
|
59 |
+
import torch
|
60 |
+
import PIL.Image
|
61 |
+
import numpy as np
|
62 |
+
import tqdm
|
63 |
+
|
64 |
+
pipeline = LatentDiffusionUncondPipeline.from_pretrained("./")
|
65 |
+
|
66 |
+
# generatae image by calling the pipeline
|
67 |
+
generator = torch.manual_seed(0)
|
68 |
+
image = pipeline(generator=generator, num_inference_steps=50)["sample"]
|
69 |
+
|
70 |
+
# process image
|
71 |
+
image_processed = image.cpu().permute(0, 2, 3, 1)
|
72 |
+
image_processed = image_processed * 255.
|
73 |
+
image_processed = image_processed.numpy().astype(np.uint8)
|
74 |
+
image_pil = PIL.Image.fromarray(image_processed[0])
|
75 |
+
|
76 |
+
image_pil.save("generated_image_pipeline.png")
|