JoPmt commited on
Commit
c5f2e99
1 Parent(s): dd98085

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -48
app.py CHANGED
@@ -18,9 +18,7 @@ pipe_prior = accelerator.prepare(KandinskyV22PriorPipeline.from_pretrained("kand
18
  pipe_prior.to("cpu")
19
  pipe = accelerator.prepare(KandinskyV22ControlnetPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float32, use_safetensors=False))
20
  pipe.to("cpu")
21
- ##pipe.unet.to(memory_format=torch.channels_last)
22
- ##qrocessor = GLPNImageProcessor.from_pretrained("vinvino02/glpn-nyu")
23
- ##zodel = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-nyu")
24
  generator = torch.Generator(device="cpu").manual_seed(random.randint(0, MAX_SEED))
25
 
26
  def make_hint(image, depth_estimator):
@@ -28,58 +26,17 @@ def make_hint(image, depth_estimator):
28
  image = np.array(image)
29
  image = image[:, :, None]
30
  image = np.concatenate([image, image, image], axis=2)
31
- ##hint = Image.fromarray(image)
32
  detected_map = torch.from_numpy(image).float() / 255.0
33
  hint = detected_map.permute(2, 0, 1)
34
- ##deputs = qrocessor(images=image, return_tensors="pt")
35
- ##with torch.no_grad():
36
- ## edputs = zodel(**deputs)
37
- ## predicted_depth = edputs.predicted_depth
38
-
39
- # interpolate to original size
40
- ##prediction = torch.nn.functional.interpolate(
41
- ## predicted_depth.unsqueeze(1),
42
- ## size=image.size[::-1],
43
- ## mode="bicubic",
44
- ## align_corners=False,
45
- ##)
46
-
47
- ### visualize the prediction
48
- ##edput = prediction.squeeze().cpu().numpy()
49
- ##formatted = (edput * 255 / np.max(edput)).astype("uint8")
50
- ##depth = Image.fromarray(formatted)
51
- ##depth = Image.open(depth).resize((512, 512))
52
- ##depth = depth.convert("RGB")
53
- ##depth = np.array(depth)
54
- ##depth = depth[:, :, None]
55
- ##depth = np.concatenate([depth, depth, depth], axis=2)
56
- ##detected_map = torch.from_numpy(depth).float() / 255.0
57
- ##hint = detected_map.permute(2, 0, 1)
58
- ##hint = depth
59
- ##image = depth_estimator(image)['predicted_depth'][0]
60
- ##image = image.numpy()
61
- ##image_depth = image.copy()
62
- ##image_depth -= np.min(image_depth)
63
- ##image_depth /= np.max(image_depth)
64
- ##bg_threhold = 0.4
65
- ##x = cv2.Sobel(image, cv2.CV_32F, 1, 0, ksize=3)
66
- ##x[image_depth < bg_threhold] = 0
67
- ##y = cv2.Sobel(image, cv2.CV_32F, 0, 1, ksize=3)
68
- ##y[image_depth < bg_threhold] = 0
69
- ##z = np.ones_like(x) * np.pi * 2.0
70
- ##image = np.stack([x, y, z], axis=2)
71
- ##image /= np.sum(image ** 2.0, axis=2, keepdims=True) ** 0.5
72
- ##image = (image * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
73
- ##image = np.array(Image.fromarray(image))
74
- ##hint = torch.from_numpy(image).float() / 255.0
75
  return hint
76
 
77
  def plex(prompt,goof):
78
- ##goof = Image.open(goof).resize((512, 512))
79
  goof = load_image(goof)
80
  goof = goof.convert("RGB")
81
- ##goof.thumbnail((512, 512))
82
- ##hint = make_hint(goof).unsqueeze(0).to("cpu")
83
  hint = make_hint(goof, depth_estimator).unsqueeze(0).to("cpu")
84
  negative_prior_prompt = "lowres,text,bad quality,jpeg artifacts,ugly,bad face,extra fingers,blurry,bad anatomy,extra limbs,fused fingers,long neck,watermark,signature"
85
  image_emb, zero_image_emb = pipe_prior(prompt=prompt, negative_prompt=negative_prior_prompt, num_inference_steps=5,generator=generator).to_tuple()
 
18
  pipe_prior.to("cpu")
19
  pipe = accelerator.prepare(KandinskyV22ControlnetPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float32, use_safetensors=False))
20
  pipe.to("cpu")
21
+
 
 
22
  generator = torch.Generator(device="cpu").manual_seed(random.randint(0, MAX_SEED))
23
 
24
  def make_hint(image, depth_estimator):
 
26
  image = np.array(image)
27
  image = image[:, :, None]
28
  image = np.concatenate([image, image, image], axis=2)
29
+
30
  detected_map = torch.from_numpy(image).float() / 255.0
31
  hint = detected_map.permute(2, 0, 1)
32
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  return hint
34
 
35
  def plex(prompt,goof):
36
+
37
  goof = load_image(goof)
38
  goof = goof.convert("RGB")
39
+
 
40
  hint = make_hint(goof, depth_estimator).unsqueeze(0).to("cpu")
41
  negative_prior_prompt = "lowres,text,bad quality,jpeg artifacts,ugly,bad face,extra fingers,blurry,bad anatomy,extra limbs,fused fingers,long neck,watermark,signature"
42
  image_emb, zero_image_emb = pipe_prior(prompt=prompt, negative_prompt=negative_prior_prompt, num_inference_steps=5,generator=generator).to_tuple()