SauravMaheshkar commited on
Commit
6447433
1 Parent(s): a54af4b

feat: use util fn from library

Browse files
Files changed (4) hide show
  1. app.py +15 -7
  2. requirements.txt +2 -3
  3. src/__init__.py +0 -0
  4. src/plot_utils.py +0 -89
app.py CHANGED
@@ -5,11 +5,10 @@ import gradio as gr
5
  import numpy as np
6
  from gradio_image_annotation import image_annotator
7
  from sam2 import load_model
 
8
  from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator
9
  from sam2.sam2_image_predictor import SAM2ImagePredictor
10
 
11
- from src.plot_utils import export_mask
12
-
13
 
14
  # @spaces.GPU()
15
  def predict(model_choice, annotations: Dict[str, Any]):
@@ -38,16 +37,25 @@ def predict(model_choice, annotations: Dict[str, Any]):
38
  multimask_output=False,
39
  )
40
 
41
- if masks.shape[0] == 1:
42
- # handle single mask cases
43
- masks = np.expand_dims(masks, axis=0)
44
 
45
- return export_mask(masks)
 
 
 
 
 
46
 
47
  else:
48
  mask_generator = SAM2AutomaticMaskGenerator(sam2_model) # type: ignore
49
  masks = mask_generator.generate(annotations["image"])
50
- return export_mask(masks, autogenerated=True)
 
 
 
 
 
 
51
 
52
 
53
  with gr.Blocks(delete_cache=(30, 30)) as demo:
 
5
  import numpy as np
6
  from gradio_image_annotation import image_annotator
7
  from sam2 import load_model
8
+ from sam2.utils.visualization import show_masks
9
  from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator
10
  from sam2.sam2_image_predictor import SAM2ImagePredictor
11
 
 
 
12
 
13
  # @spaces.GPU()
14
  def predict(model_choice, annotations: Dict[str, Any]):
 
37
  multimask_output=False,
38
  )
39
 
40
+ multi_box = len(scores) > 1
 
 
41
 
42
+ return show_masks(
43
+ image=annotations["image"],
44
+ masks=masks,
45
+ scores=scores if len(scores) == 1 else None,
46
+ only_best=not multi_box,
47
+ )
48
 
49
  else:
50
  mask_generator = SAM2AutomaticMaskGenerator(sam2_model) # type: ignore
51
  masks = mask_generator.generate(annotations["image"])
52
+ return show_masks(
53
+ image=annotations["image"],
54
+ masks=masks, # type: ignore
55
+ scores=None,
56
+ only_best=False,
57
+ autogenerated_mask=True
58
+ )
59
 
60
 
61
  with gr.Blocks(delete_cache=(30, 30)) as demo:
requirements.txt CHANGED
@@ -1,9 +1,8 @@
1
- --extra-index-url https://download.pytorch.org/whl/cu113
2
 
 
3
  gradio
4
  gradio_image_annotation
5
  opencv-python
6
- samv2>=0.0.2
7
- pytest
8
  spaces
9
  torch
 
1
+ # --extra-index-url https://download.pytorch.org/whl/cu113
2
 
3
+ git+https://github.com/SauravMaheshkar/samv2.git
4
  gradio
5
  gradio_image_annotation
6
  opencv-python
 
 
7
  spaces
8
  torch
src/__init__.py DELETED
File without changes
src/plot_utils.py DELETED
@@ -1,89 +0,0 @@
1
- from typing import Optional
2
-
3
- import cv2
4
- import numpy as np
5
- from PIL import Image
6
-
7
-
8
- def export_mask(
9
- masks,
10
- autogenerated: Optional[bool] = False,
11
- random_color: Optional[bool] = True,
12
- smoothen_contours: Optional[bool] = True,
13
- ) -> Image:
14
- if not autogenerated:
15
- num_masks, _, h, w = masks.shape
16
- num_masks = len(masks)
17
-
18
- # Ensure masks are 2D by squeezing channel dimension
19
- masks = masks.squeeze(axis=1)
20
-
21
- # Create a single uint8 image with unique values for each mask
22
- combined_mask = np.zeros((h, w), dtype=np.uint8)
23
-
24
- for i in range(num_masks):
25
- mask = masks[i]
26
- mask = mask.astype(np.uint8)
27
- combined_mask[mask > 0] = i + 1
28
-
29
- # Create color map for visualization
30
- if random_color:
31
- colors = np.random.rand(num_masks, 3) # Random colors for each mask
32
- else:
33
- colors = np.array(
34
- [[30 / 255, 144 / 255, 255 / 255]] * num_masks
35
- ) # Use fixed color
36
-
37
- # Create an RGB image where each mask has its own color
38
- color_image = np.zeros((h, w, 3), dtype=np.uint8)
39
-
40
- for i in range(1, num_masks + 1):
41
- mask_color = colors[i - 1] * 255
42
- color_image[combined_mask == i] = mask_color
43
-
44
- # Convert the NumPy array to a PIL Image
45
- pil_image = Image.fromarray(color_image)
46
-
47
- # Optional: Add contours to the mask image
48
- if smoothen_contours:
49
- contours_image = np.zeros((h, w, 4), dtype=np.float32)
50
-
51
- for i in range(1, num_masks + 1):
52
- mask = (combined_mask == i).astype(np.uint8)
53
- contours_image = smoothen(mask, contours_image)
54
-
55
- # Convert contours to PIL image and blend with the color image
56
- contours_image = (contours_image[:, :, :3] * 255).astype(np.uint8)
57
- contours_pil_image = Image.fromarray(contours_image)
58
- pil_image = Image.blend(pil_image, contours_pil_image, alpha=0.6)
59
-
60
- return pil_image
61
- else:
62
- sorted_anns = sorted(masks, key=(lambda x: x["area"]), reverse=True)
63
- img_shape = sorted_anns[0]["segmentation"].shape
64
- img = np.ones((img_shape[0], img_shape[1], 4))
65
- img[:, :, 3] = 0
66
-
67
- for ann in sorted_anns:
68
- m = ann["segmentation"]
69
- color_mask = np.concatenate([np.random.random(3), [0.5]])
70
- img[m] = color_mask
71
-
72
- if smoothen_contours:
73
- img = smoothen(m, img)
74
-
75
- img = (img * 255).astype(np.uint8)
76
- pil_image = Image.fromarray(img)
77
-
78
- return pil_image
79
-
80
-
81
- def smoothen(mask: np.ndarray, image: np.ndarray) -> np.ndarray:
82
- contours, _ = cv2.findContours(
83
- mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
84
- )
85
- contours = [
86
- cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours
87
- ]
88
- image = cv2.drawContours(image, contours, -1, (0, 0, 1, 0.4), thickness=1)
89
- return image