yansong1616 commited on
Commit
8a95b97
1 Parent(s): 6b57f16

Upload 59 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. SAM2/__init__.py +0 -0
  2. SAM2/__pycache__/__init__.cpython-310.pyc +0 -0
  3. SAM2/checkpoints/download_ckpts.sh +31 -0
  4. SAM2/checkpoints/sam2_hiera_large.pt +3 -0
  5. SAM2/sam2/_C.pyd +0 -0
  6. SAM2/sam2/__init__.py +16 -0
  7. SAM2/sam2/__pycache__/__init__.cpython-310.pyc +0 -0
  8. SAM2/sam2/__pycache__/build_sam.cpython-310.pyc +0 -0
  9. SAM2/sam2/__pycache__/sam2_image_predictor.cpython-310.pyc +0 -0
  10. SAM2/sam2/__pycache__/sam2_to_dust3r.cpython-310.pyc +0 -0
  11. SAM2/sam2/__pycache__/sam2_video_predictor.cpython-310.pyc +0 -0
  12. SAM2/sam2/automatic_mask_generator.py +434 -0
  13. SAM2/sam2/build_sam.py +90 -0
  14. SAM2/sam2/csrc/connected_components.cu +289 -0
  15. SAM2/sam2/modeling/__init__.py +5 -0
  16. SAM2/sam2/modeling/__pycache__/__init__.cpython-310.pyc +0 -0
  17. SAM2/sam2/modeling/__pycache__/memory_attention.cpython-310.pyc +0 -0
  18. SAM2/sam2/modeling/__pycache__/memory_encoder.cpython-310.pyc +0 -0
  19. SAM2/sam2/modeling/__pycache__/position_encoding.cpython-310.pyc +0 -0
  20. SAM2/sam2/modeling/__pycache__/sam2_base.cpython-310.pyc +0 -0
  21. SAM2/sam2/modeling/__pycache__/sam2_utils.cpython-310.pyc +0 -0
  22. SAM2/sam2/modeling/backbones/__init__.py +5 -0
  23. SAM2/sam2/modeling/backbones/__pycache__/__init__.cpython-310.pyc +0 -0
  24. SAM2/sam2/modeling/backbones/__pycache__/hieradet.cpython-310.pyc +0 -0
  25. SAM2/sam2/modeling/backbones/__pycache__/image_encoder.cpython-310.pyc +0 -0
  26. SAM2/sam2/modeling/backbones/__pycache__/utils.cpython-310.pyc +0 -0
  27. SAM2/sam2/modeling/backbones/hieradet.py +295 -0
  28. SAM2/sam2/modeling/backbones/image_encoder.py +133 -0
  29. SAM2/sam2/modeling/backbones/utils.py +95 -0
  30. SAM2/sam2/modeling/memory_attention.py +169 -0
  31. SAM2/sam2/modeling/memory_encoder.py +181 -0
  32. SAM2/sam2/modeling/position_encoding.py +216 -0
  33. SAM2/sam2/modeling/sam/__init__.py +5 -0
  34. SAM2/sam2/modeling/sam/__pycache__/__init__.cpython-310.pyc +0 -0
  35. SAM2/sam2/modeling/sam/__pycache__/mask_decoder.cpython-310.pyc +0 -0
  36. SAM2/sam2/modeling/sam/__pycache__/prompt_encoder.cpython-310.pyc +0 -0
  37. SAM2/sam2/modeling/sam/__pycache__/transformer.cpython-310.pyc +0 -0
  38. SAM2/sam2/modeling/sam/mask_decoder.py +295 -0
  39. SAM2/sam2/modeling/sam/prompt_encoder.py +182 -0
  40. SAM2/sam2/modeling/sam/transformer.py +330 -0
  41. SAM2/sam2/modeling/sam2_base.py +831 -0
  42. SAM2/sam2/modeling/sam2_utils.py +149 -0
  43. SAM2/sam2/sam2_image_predictor.py +446 -0
  44. SAM2/sam2/sam2_to_dust3r.py +161 -0
  45. SAM2/sam2/sam2_video_predictor.py +1042 -0
  46. SAM2/sam2/utils/__init__.py +5 -0
  47. SAM2/sam2/utils/__pycache__/__init__.cpython-310.pyc +0 -0
  48. SAM2/sam2/utils/__pycache__/amg.cpython-310.pyc +0 -0
  49. SAM2/sam2/utils/__pycache__/misc.cpython-310.pyc +0 -0
  50. SAM2/sam2/utils/__pycache__/transforms.cpython-310.pyc +0 -0
SAM2/__init__.py ADDED
File without changes
SAM2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (159 Bytes). View file
 
SAM2/checkpoints/download_ckpts.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
4
+ # All rights reserved.
5
+
6
+ # This source code is licensed under the license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+
10
+ # Define the URLs for the checkpoints
11
+ BASE_URL="https://dl.fbaipublicfiles.com/segment_anything_2/072824/"
12
+ sam2_hiera_t_url="${BASE_URL}sam2_hiera_tiny.pt"
13
+ sam2_hiera_s_url="${BASE_URL}sam2_hiera_small.pt"
14
+ sam2_hiera_b_plus_url="${BASE_URL}sam2_hiera_base_plus.pt"
15
+ sam2_hiera_l_url="${BASE_URL}sam2_hiera_large.pt"
16
+
17
+
18
+ # Download each of the four checkpoints using wget
19
+ echo "Downloading sam2_hiera_tiny.pt checkpoint..."
20
+ wget $sam2_hiera_t_url || { echo "Failed to download checkpoint from $sam2_hiera_t_url"; exit 1; }
21
+
22
+ echo "Downloading sam2_hiera_small.pt checkpoint..."
23
+ wget $sam2_hiera_s_url || { echo "Failed to download checkpoint from $sam2_hiera_s_url"; exit 1; }
24
+
25
+ echo "Downloading sam2_hiera_base_plus.pt checkpoint..."
26
+ wget $sam2_hiera_b_plus_url || { echo "Failed to download checkpoint from $sam2_hiera_b_plus_url"; exit 1; }
27
+
28
+ echo "Downloading sam2_hiera_large.pt checkpoint..."
29
+ wget $sam2_hiera_l_url || { echo "Failed to download checkpoint from $sam2_hiera_l_url"; exit 1; }
30
+
31
+ echo "All checkpoints are downloaded successfully."
SAM2/checkpoints/sam2_hiera_large.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7442e4e9b732a508f80e141e7c2913437a3610ee0c77381a66658c3a445df87b
3
+ size 897952466
SAM2/sam2/_C.pyd ADDED
Binary file (391 kB). View file
 
SAM2/sam2/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from hydra.core.global_hydra import GlobalHydra
8
+ from hydra import initialize_config_module
9
+
10
+ # 检查Hydra是否已经初始化
11
+ if not GlobalHydra().is_initialized():
12
+ initialize_config_module("sam2_configs", version_base="1.2")
13
+ else:
14
+ # 如果已经初始化,可以选择清除它
15
+ GlobalHydra.instance().clear()
16
+ initialize_config_module("sam2_configs", version_base="1.2")
SAM2/sam2/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (403 Bytes). View file
 
SAM2/sam2/__pycache__/build_sam.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
SAM2/sam2/__pycache__/sam2_image_predictor.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
SAM2/sam2/__pycache__/sam2_to_dust3r.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
SAM2/sam2/__pycache__/sam2_video_predictor.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
SAM2/sam2/automatic_mask_generator.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
13
+
14
+ from modeling.sam2_base import SAM2Base
15
+ from sam2_image_predictor import SAM2ImagePredictor
16
+ from utils.amg import (
17
+ area_from_rle,
18
+ batch_iterator,
19
+ batched_mask_to_box,
20
+ box_xyxy_to_xywh,
21
+ build_all_layer_point_grids,
22
+ calculate_stability_score,
23
+ coco_encode_rle,
24
+ generate_crop_boxes,
25
+ is_box_near_crop_edge,
26
+ mask_to_rle_pytorch,
27
+ MaskData,
28
+ remove_small_regions,
29
+ rle_to_mask,
30
+ uncrop_boxes_xyxy,
31
+ uncrop_masks,
32
+ uncrop_points,
33
+ )
34
+
35
+
36
+ class SAM2AutomaticMaskGenerator:
37
+ def __init__(
38
+ self,
39
+ model: SAM2Base,
40
+ points_per_side: Optional[int] = 32,
41
+ points_per_batch: int = 64,
42
+ pred_iou_thresh: float = 0.8,
43
+ stability_score_thresh: float = 0.95,
44
+ stability_score_offset: float = 1.0,
45
+ mask_threshold: float = 0.0,
46
+ box_nms_thresh: float = 0.7,
47
+ crop_n_layers: int = 0,
48
+ crop_nms_thresh: float = 0.7,
49
+ crop_overlap_ratio: float = 512 / 1500,
50
+ crop_n_points_downscale_factor: int = 1,
51
+ point_grids: Optional[List[np.ndarray]] = None,
52
+ min_mask_region_area: int = 0,
53
+ output_mode: str = "binary_mask",
54
+ use_m2m: bool = False,
55
+ multimask_output: bool = True,
56
+ ) -> None:
57
+ """
58
+ Using a SAM 2 model, generates masks for the entire image.
59
+ Generates a grid of point prompts over the image, then filters
60
+ low quality and duplicate masks. The default settings are chosen
61
+ for SAM 2 with a HieraL backbone.
62
+
63
+ Arguments:
64
+ model (Sam): The SAM 2 model to use for mask prediction.
65
+ points_per_side (int or None): The number of points to be sampled
66
+ along one side of the image. The total number of points is
67
+ points_per_side**2. If None, 'point_grids' must provide explicit
68
+ point sampling.
69
+ points_per_batch (int): Sets the number of points run simultaneously
70
+ by the model. Higher numbers may be faster but use more GPU memory.
71
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
72
+ model's predicted mask quality.
73
+ stability_score_thresh (float): A filtering threshold in [0,1], using
74
+ the stability of the mask under changes to the cutoff used to binarize
75
+ the model's mask predictions.
76
+ stability_score_offset (float): The amount to shift the cutoff when
77
+ calculated the stability score.
78
+ mask_threshold (float): Threshold for binarizing the mask logits
79
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
80
+ suppression to filter duplicate masks.
81
+ crop_n_layers (int): If >0, mask prediction will be run again on
82
+ crops of the image. Sets the number of layers to run, where each
83
+ layer has 2**i_layer number of image crops.
84
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
85
+ suppression to filter duplicate masks between different crops.
86
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
87
+ In the first crop layer, crops will overlap by this fraction of
88
+ the image length. Later layers with more crops scale down this overlap.
89
+ crop_n_points_downscale_factor (int): The number of points-per-side
90
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
91
+ point_grids (list(np.ndarray) or None): A list over explicit grids
92
+ of points used for sampling, normalized to [0,1]. The nth grid in the
93
+ list is used in the nth crop layer. Exclusive with points_per_side.
94
+ min_mask_region_area (int): If >0, postprocessing will be applied
95
+ to remove disconnected regions and holes in masks with area smaller
96
+ than min_mask_region_area. Requires opencv.
97
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
98
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
99
+ For large resolutions, 'binary_mask' may consume large amounts of
100
+ memory.
101
+ use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
102
+ multimask_output (bool): Whether to output multimask at each point of the grid.
103
+ """
104
+
105
+ assert (points_per_side is None) != (
106
+ point_grids is None
107
+ ), "Exactly one of points_per_side or point_grid must be provided."
108
+ if points_per_side is not None:
109
+ self.point_grids = build_all_layer_point_grids(
110
+ points_per_side,
111
+ crop_n_layers,
112
+ crop_n_points_downscale_factor,
113
+ )
114
+ elif point_grids is not None:
115
+ self.point_grids = point_grids
116
+ else:
117
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
118
+
119
+ assert output_mode in [
120
+ "binary_mask",
121
+ "uncompressed_rle",
122
+ "coco_rle",
123
+ ], f"Unknown output_mode {output_mode}."
124
+ if output_mode == "coco_rle":
125
+ try:
126
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
127
+ except ImportError as e:
128
+ print("Please install pycocotools")
129
+ raise e
130
+
131
+ self.predictor = SAM2ImagePredictor(
132
+ model,
133
+ max_hole_area=min_mask_region_area,
134
+ max_sprinkle_area=min_mask_region_area,
135
+ )
136
+ self.points_per_batch = points_per_batch
137
+ self.pred_iou_thresh = pred_iou_thresh
138
+ self.stability_score_thresh = stability_score_thresh
139
+ self.stability_score_offset = stability_score_offset
140
+ self.mask_threshold = mask_threshold
141
+ self.box_nms_thresh = box_nms_thresh
142
+ self.crop_n_layers = crop_n_layers
143
+ self.crop_nms_thresh = crop_nms_thresh
144
+ self.crop_overlap_ratio = crop_overlap_ratio
145
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
146
+ self.min_mask_region_area = min_mask_region_area
147
+ self.output_mode = output_mode
148
+ self.use_m2m = use_m2m
149
+ self.multimask_output = multimask_output
150
+
151
+ @torch.no_grad()
152
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
153
+ """
154
+ Generates masks for the given image.
155
+
156
+ Arguments:
157
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
158
+
159
+ Returns:
160
+ list(dict(str, any)): A list over records for masks. Each record is
161
+ a dict containing the following keys:
162
+ segmentation (dict(str, any) or np.ndarray): The mask. If
163
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
164
+ is a dictionary containing the RLE.
165
+ bbox (list(float)): The box around the mask, in XYWH format.
166
+ area (int): The area in pixels of the mask.
167
+ predicted_iou (float): The model's own prediction of the mask's
168
+ quality. This is filtered by the pred_iou_thresh parameter.
169
+ point_coords (list(list(float))): The point coordinates input
170
+ to the model to generate this mask.
171
+ stability_score (float): A measure of the mask's quality. This
172
+ is filtered on using the stability_score_thresh parameter.
173
+ crop_box (list(float)): The crop of the image used to generate
174
+ the mask, given in XYWH format.
175
+ """
176
+
177
+ # Generate masks
178
+ mask_data = self._generate_masks(image)
179
+
180
+ # Encode masks
181
+ if self.output_mode == "coco_rle":
182
+ mask_data["segmentations"] = [
183
+ coco_encode_rle(rle) for rle in mask_data["rles"]
184
+ ]
185
+ elif self.output_mode == "binary_mask":
186
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
187
+ else:
188
+ mask_data["segmentations"] = mask_data["rles"]
189
+
190
+ # Write mask records
191
+ curr_anns = []
192
+ for idx in range(len(mask_data["segmentations"])):
193
+ ann = {
194
+ "segmentation": mask_data["segmentations"][idx],
195
+ "area": area_from_rle(mask_data["rles"][idx]),
196
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
197
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
198
+ "point_coords": [mask_data["points"][idx].tolist()],
199
+ "stability_score": mask_data["stability_score"][idx].item(),
200
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
201
+ }
202
+ curr_anns.append(ann)
203
+
204
+ return curr_anns
205
+
206
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
207
+ orig_size = image.shape[:2]
208
+ crop_boxes, layer_idxs = generate_crop_boxes(
209
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
210
+ )
211
+
212
+ # Iterate over image crops
213
+ data = MaskData()
214
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
215
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
216
+ data.cat(crop_data)
217
+
218
+ # Remove duplicate masks between crops
219
+ if len(crop_boxes) > 1:
220
+ # Prefer masks from smaller crops
221
+ scores = 1 / box_area(data["crop_boxes"])
222
+ scores = scores.to(data["boxes"].device)
223
+ keep_by_nms = batched_nms(
224
+ data["boxes"].float(),
225
+ scores,
226
+ torch.zeros_like(data["boxes"][:, 0]), # categories
227
+ iou_threshold=self.crop_nms_thresh,
228
+ )
229
+ data.filter(keep_by_nms)
230
+ data.to_numpy()
231
+ return data
232
+
233
+ def _process_crop(
234
+ self,
235
+ image: np.ndarray,
236
+ crop_box: List[int],
237
+ crop_layer_idx: int,
238
+ orig_size: Tuple[int, ...],
239
+ ) -> MaskData:
240
+ # Crop the image and calculate embeddings
241
+ x0, y0, x1, y1 = crop_box
242
+ cropped_im = image[y0:y1, x0:x1, :]
243
+ cropped_im_size = cropped_im.shape[:2]
244
+ self.predictor.set_image(cropped_im)
245
+
246
+ # Get points for this crop
247
+ points_scale = np.array(cropped_im_size)[None, ::-1]
248
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
249
+
250
+ # Generate masks for this crop in batches
251
+ data = MaskData()
252
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
253
+ batch_data = self._process_batch(
254
+ points, cropped_im_size, crop_box, orig_size, normalize=True
255
+ )
256
+ data.cat(batch_data)
257
+ del batch_data
258
+ self.predictor.reset_predictor()
259
+
260
+ # Remove duplicates within this crop.
261
+ keep_by_nms = batched_nms(
262
+ data["boxes"].float(),
263
+ data["iou_preds"],
264
+ torch.zeros_like(data["boxes"][:, 0]), # categories
265
+ iou_threshold=self.box_nms_thresh,
266
+ )
267
+ data.filter(keep_by_nms)
268
+
269
+ # Return to the original image frame
270
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
271
+ data["points"] = uncrop_points(data["points"], crop_box)
272
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
273
+
274
+ return data
275
+
276
+ def _process_batch(
277
+ self,
278
+ points: np.ndarray,
279
+ im_size: Tuple[int, ...],
280
+ crop_box: List[int],
281
+ orig_size: Tuple[int, ...],
282
+ normalize=False,
283
+ ) -> MaskData:
284
+ orig_h, orig_w = orig_size
285
+
286
+ # Run model on this batch
287
+ points = torch.as_tensor(points, device=self.predictor.device)
288
+ in_points = self.predictor._transforms.transform_coords(
289
+ points, normalize=normalize, orig_hw=im_size
290
+ )
291
+ in_labels = torch.ones(
292
+ in_points.shape[0], dtype=torch.int, device=in_points.device
293
+ )
294
+ masks, iou_preds, low_res_masks = self.predictor._predict(
295
+ in_points[:, None, :],
296
+ in_labels[:, None],
297
+ multimask_output=self.multimask_output,
298
+ return_logits=True,
299
+ )
300
+
301
+ # Serialize predictions and store in MaskData
302
+ data = MaskData(
303
+ masks=masks.flatten(0, 1),
304
+ iou_preds=iou_preds.flatten(0, 1),
305
+ points=points.repeat_interleave(masks.shape[1], dim=0),
306
+ low_res_masks=low_res_masks.flatten(0, 1),
307
+ )
308
+ del masks
309
+
310
+ if not self.use_m2m:
311
+ # Filter by predicted IoU
312
+ if self.pred_iou_thresh > 0.0:
313
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
314
+ data.filter(keep_mask)
315
+
316
+ # Calculate and filter by stability score
317
+ data["stability_score"] = calculate_stability_score(
318
+ data["masks"], self.mask_threshold, self.stability_score_offset
319
+ )
320
+ if self.stability_score_thresh > 0.0:
321
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
322
+ data.filter(keep_mask)
323
+ else:
324
+ # One step refinement using previous mask predictions
325
+ in_points = self.predictor._transforms.transform_coords(
326
+ data["points"], normalize=normalize, orig_hw=im_size
327
+ )
328
+ labels = torch.ones(
329
+ in_points.shape[0], dtype=torch.int, device=in_points.device
330
+ )
331
+ masks, ious = self.refine_with_m2m(
332
+ in_points, labels, data["low_res_masks"], self.points_per_batch
333
+ )
334
+ data["masks"] = masks.squeeze(1)
335
+ data["iou_preds"] = ious.squeeze(1)
336
+
337
+ if self.pred_iou_thresh > 0.0:
338
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
339
+ data.filter(keep_mask)
340
+
341
+ data["stability_score"] = calculate_stability_score(
342
+ data["masks"], self.mask_threshold, self.stability_score_offset
343
+ )
344
+ if self.stability_score_thresh > 0.0:
345
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
346
+ data.filter(keep_mask)
347
+
348
+ # Threshold masks and calculate boxes
349
+ data["masks"] = data["masks"] > self.mask_threshold
350
+ data["boxes"] = batched_mask_to_box(data["masks"])
351
+
352
+ # Filter boxes that touch crop boundaries
353
+ keep_mask = ~is_box_near_crop_edge(
354
+ data["boxes"], crop_box, [0, 0, orig_w, orig_h]
355
+ )
356
+ if not torch.all(keep_mask):
357
+ data.filter(keep_mask)
358
+
359
+ # Compress to RLE
360
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
361
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
362
+ del data["masks"]
363
+
364
+ return data
365
+
366
+ @staticmethod
367
+ def postprocess_small_regions(
368
+ mask_data: MaskData, min_area: int, nms_thresh: float
369
+ ) -> MaskData:
370
+ """
371
+ Removes small disconnected regions and holes in masks, then reruns
372
+ box NMS to remove any new duplicates.
373
+
374
+ Edits mask_data in place.
375
+
376
+ Requires open-cv as a dependency.
377
+ """
378
+ if len(mask_data["rles"]) == 0:
379
+ return mask_data
380
+
381
+ # Filter small disconnected regions and holes
382
+ new_masks = []
383
+ scores = []
384
+ for rle in mask_data["rles"]:
385
+ mask = rle_to_mask(rle)
386
+
387
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
388
+ unchanged = not changed
389
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
390
+ unchanged = unchanged and not changed
391
+
392
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
393
+ # Give score=0 to changed masks and score=1 to unchanged masks
394
+ # so NMS will prefer ones that didn't need postprocessing
395
+ scores.append(float(unchanged))
396
+
397
+ # Recalculate boxes and remove any new duplicates
398
+ masks = torch.cat(new_masks, dim=0)
399
+ boxes = batched_mask_to_box(masks)
400
+ keep_by_nms = batched_nms(
401
+ boxes.float(),
402
+ torch.as_tensor(scores),
403
+ torch.zeros_like(boxes[:, 0]), # categories
404
+ iou_threshold=nms_thresh,
405
+ )
406
+
407
+ # Only recalculate RLEs for masks that have changed
408
+ for i_mask in keep_by_nms:
409
+ if scores[i_mask] == 0.0:
410
+ mask_torch = masks[i_mask].unsqueeze(0)
411
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
412
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
413
+ mask_data.filter(keep_by_nms)
414
+
415
+ return mask_data
416
+
417
+ def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
418
+ new_masks = []
419
+ new_iou_preds = []
420
+
421
+ for cur_points, cur_point_labels, low_res_mask in batch_iterator(
422
+ points_per_batch, points, point_labels, low_res_masks
423
+ ):
424
+ best_masks, best_iou_preds, _ = self.predictor._predict(
425
+ cur_points[:, None, :],
426
+ cur_point_labels[:, None],
427
+ mask_input=low_res_mask[:, None, :],
428
+ multimask_output=False,
429
+ return_logits=True,
430
+ )
431
+ new_masks.append(best_masks)
432
+ new_iou_preds.append(best_iou_preds)
433
+ masks = torch.cat(new_masks, dim=0)
434
+ return masks, torch.cat(new_iou_preds, dim=0)
SAM2/sam2/build_sam.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ import torch
10
+ from hydra import compose
11
+ from hydra.utils import instantiate
12
+ from omegaconf import OmegaConf
13
+
14
+
15
+ def build_sam2(
16
+ config_file,
17
+ ckpt_path=None,
18
+ device="cuda",
19
+ mode="eval",
20
+ hydra_overrides_extra=[],
21
+ apply_postprocessing=True,
22
+ ):
23
+
24
+ if apply_postprocessing:
25
+ hydra_overrides_extra = hydra_overrides_extra.copy()
26
+ hydra_overrides_extra += [
27
+ # dynamically fall back to multi-mask if the single mask is not stable
28
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
29
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
30
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
31
+ ]
32
+ # Read config and init model
33
+ cfg = compose(config_name=config_file, overrides=hydra_overrides_extra)
34
+ OmegaConf.resolve(cfg)
35
+ model = instantiate(cfg.model, _recursive_=True)
36
+ _load_checkpoint(model, ckpt_path)
37
+ model = model.to(device)
38
+ if mode == "eval":
39
+ model.eval()
40
+ return model
41
+
42
+
43
+ def build_sam2_video_predictor(
44
+ config_file,
45
+ ckpt_path=None,
46
+ device="cuda",
47
+ mode="eval",
48
+ hydra_overrides_extra=[],
49
+ apply_postprocessing=True,
50
+ ):
51
+ print('... loading SAM2_Video from', ckpt_path)
52
+ hydra_overrides = [
53
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
54
+ ]
55
+ if apply_postprocessing:
56
+ hydra_overrides_extra = hydra_overrides_extra.copy()
57
+ hydra_overrides_extra += [
58
+ # dynamically fall back to multi-mask if the single mask is not stable
59
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
60
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
61
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
62
+ # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
63
+ "++model.binarize_mask_from_pts_for_mem_enc=true",
64
+ # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
65
+ "++model.fill_hole_area=8",
66
+ ]
67
+ hydra_overrides.extend(hydra_overrides_extra)
68
+
69
+ # Read config and init model
70
+ cfg = compose(config_name=config_file, overrides=hydra_overrides)
71
+ OmegaConf.resolve(cfg)
72
+ model = instantiate(cfg.model, _recursive_=True)
73
+ _load_checkpoint(model, ckpt_path)
74
+ model = model.to(device)
75
+ if mode == "eval":
76
+ model.eval()
77
+ return model
78
+
79
+
80
+ def _load_checkpoint(model, ckpt_path):
81
+ if ckpt_path is not None:
82
+ sd = torch.load(ckpt_path, map_location="cpu")["model"]
83
+ missing_keys, unexpected_keys = model.load_state_dict(sd)
84
+ if missing_keys:
85
+ logging.error(missing_keys)
86
+ raise RuntimeError()
87
+ if unexpected_keys:
88
+ logging.error(unexpected_keys)
89
+ raise RuntimeError()
90
+ logging.info("Loaded checkpoint sucessfully")
SAM2/sam2/csrc/connected_components.cu ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+
4
+ // This source code is licensed under the license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ // adapted from https://github.com/zsef123/Connected_components_PyTorch
8
+ // with license found in the LICENSE_cctorch file in the root directory.
9
+ #include <ATen/cuda/CUDAContext.h>
10
+ #include <cuda.h>
11
+ #include <cuda_runtime.h>
12
+ #include <torch/extension.h>
13
+ #include <torch/script.h>
14
+ #include <vector>
15
+
16
+ // 2d
17
+ #define BLOCK_ROWS 16
18
+ #define BLOCK_COLS 16
19
+
20
+ namespace cc2d {
21
+
22
+ template <typename T>
23
+ __device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
24
+ return (bitmap >> pos) & 1;
25
+ }
26
+
27
+ __device__ int32_t find(const int32_t* s_buf, int32_t n) {
28
+ while (s_buf[n] != n)
29
+ n = s_buf[n];
30
+ return n;
31
+ }
32
+
33
+ __device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
34
+ const int32_t id = n;
35
+ while (s_buf[n] != n) {
36
+ n = s_buf[n];
37
+ s_buf[id] = n;
38
+ }
39
+ return n;
40
+ }
41
+
42
+ __device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
43
+ bool done;
44
+ do {
45
+ a = find(s_buf, a);
46
+ b = find(s_buf, b);
47
+
48
+ if (a < b) {
49
+ int32_t old = atomicMin(s_buf + b, a);
50
+ done = (old == b);
51
+ b = old;
52
+ } else if (b < a) {
53
+ int32_t old = atomicMin(s_buf + a, b);
54
+ done = (old == a);
55
+ a = old;
56
+ } else
57
+ done = true;
58
+
59
+ } while (!done);
60
+ }
61
+
62
+ __global__ void
63
+ init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
64
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
65
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
66
+ const uint32_t idx = row * W + col;
67
+
68
+ if (row < H && col < W)
69
+ label[idx] = idx;
70
+ }
71
+
72
+ __global__ void
73
+ merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
74
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
75
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
76
+ const uint32_t idx = row * W + col;
77
+
78
+ if (row >= H || col >= W)
79
+ return;
80
+
81
+ uint32_t P = 0;
82
+
83
+ if (img[idx])
84
+ P |= 0x777;
85
+ if (row + 1 < H && img[idx + W])
86
+ P |= 0x777 << 4;
87
+ if (col + 1 < W && img[idx + 1])
88
+ P |= 0x777 << 1;
89
+
90
+ if (col == 0)
91
+ P &= 0xEEEE;
92
+ if (col + 1 >= W)
93
+ P &= 0x3333;
94
+ else if (col + 2 >= W)
95
+ P &= 0x7777;
96
+
97
+ if (row == 0)
98
+ P &= 0xFFF0;
99
+ if (row + 1 >= H)
100
+ P &= 0xFF;
101
+
102
+ if (P > 0) {
103
+ // If need check about top-left pixel(if flag the first bit) and hit the
104
+ // top-left pixel
105
+ if (hasBit(P, 0) && img[idx - W - 1]) {
106
+ union_(label, idx, idx - 2 * W - 2); // top left block
107
+ }
108
+
109
+ if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
110
+ union_(label, idx, idx - 2 * W); // top bottom block
111
+
112
+ if (hasBit(P, 3) && img[idx + 2 - W])
113
+ union_(label, idx, idx - 2 * W + 2); // top right block
114
+
115
+ if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
116
+ union_(label, idx, idx - 2); // just left block
117
+ }
118
+ }
119
+
120
+ __global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
121
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
122
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
123
+ const uint32_t idx = row * W + col;
124
+
125
+ if (row < H && col < W)
126
+ find_n_compress(label, idx);
127
+ }
128
+
129
+ __global__ void final_labeling(
130
+ const uint8_t* img,
131
+ int32_t* label,
132
+ const int32_t W,
133
+ const int32_t H) {
134
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
135
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
136
+ const uint32_t idx = row * W + col;
137
+
138
+ if (row >= H || col >= W)
139
+ return;
140
+
141
+ int32_t y = label[idx] + 1;
142
+
143
+ if (img[idx])
144
+ label[idx] = y;
145
+ else
146
+ label[idx] = 0;
147
+
148
+ if (col + 1 < W) {
149
+ if (img[idx + 1])
150
+ label[idx + 1] = y;
151
+ else
152
+ label[idx + 1] = 0;
153
+
154
+ if (row + 1 < H) {
155
+ if (img[idx + W + 1])
156
+ label[idx + W + 1] = y;
157
+ else
158
+ label[idx + W + 1] = 0;
159
+ }
160
+ }
161
+
162
+ if (row + 1 < H) {
163
+ if (img[idx + W])
164
+ label[idx + W] = y;
165
+ else
166
+ label[idx + W] = 0;
167
+ }
168
+ }
169
+
170
+ __global__ void init_counting(
171
+ const int32_t* label,
172
+ int32_t* count_init,
173
+ const int32_t W,
174
+ const int32_t H) {
175
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
176
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
177
+ const uint32_t idx = row * W + col;
178
+
179
+ if (row >= H || col >= W)
180
+ return;
181
+
182
+ int32_t y = label[idx];
183
+ if (y > 0) {
184
+ int32_t count_idx = y - 1;
185
+ atomicAdd(count_init + count_idx, 1);
186
+ }
187
+ }
188
+
189
+ __global__ void final_counting(
190
+ const int32_t* label,
191
+ const int32_t* count_init,
192
+ int32_t* count_final,
193
+ const int32_t W,
194
+ const int32_t H) {
195
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
196
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
197
+ const uint32_t idx = row * W + col;
198
+
199
+ if (row >= H || col >= W)
200
+ return;
201
+
202
+ int32_t y = label[idx];
203
+ if (y > 0) {
204
+ int32_t count_idx = y - 1;
205
+ count_final[idx] = count_init[count_idx];
206
+ } else {
207
+ count_final[idx] = 0;
208
+ }
209
+ }
210
+
211
+ } // namespace cc2d
212
+
213
+ std::vector<torch::Tensor> get_connected_componnets(
214
+ const torch::Tensor& inputs) {
215
+ AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
216
+ AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
217
+ AT_ASSERTM(
218
+ inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
219
+
220
+ const uint32_t N = inputs.size(0);
221
+ const uint32_t C = inputs.size(1);
222
+ const uint32_t H = inputs.size(2);
223
+ const uint32_t W = inputs.size(3);
224
+
225
+ AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
226
+ AT_ASSERTM((H % 2) == 0, "height must be an even number");
227
+ AT_ASSERTM((W % 2) == 0, "width must be an even number");
228
+
229
+ // label must be uint32_t
230
+ auto label_options =
231
+ torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
232
+ torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
233
+ torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
234
+ torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
235
+
236
+ dim3 grid = dim3(
237
+ ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
238
+ ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
239
+ dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
240
+ dim3 grid_count =
241
+ dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
242
+ dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
243
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
244
+
245
+ for (int n = 0; n < N; n++) {
246
+ uint32_t offset = n * H * W;
247
+
248
+ cc2d::init_labeling<<<grid, block, 0, stream>>>(
249
+ labels.data_ptr<int32_t>() + offset, W, H);
250
+ cc2d::merge<<<grid, block, 0, stream>>>(
251
+ inputs.data_ptr<uint8_t>() + offset,
252
+ labels.data_ptr<int32_t>() + offset,
253
+ W,
254
+ H);
255
+ cc2d::compression<<<grid, block, 0, stream>>>(
256
+ labels.data_ptr<int32_t>() + offset, W, H);
257
+ cc2d::final_labeling<<<grid, block, 0, stream>>>(
258
+ inputs.data_ptr<uint8_t>() + offset,
259
+ labels.data_ptr<int32_t>() + offset,
260
+ W,
261
+ H);
262
+
263
+ // get the counting of each pixel
264
+ cc2d::init_counting<<<grid_count, block_count, 0, stream>>>(
265
+ labels.data_ptr<int32_t>() + offset,
266
+ counts_init.data_ptr<int32_t>() + offset,
267
+ W,
268
+ H);
269
+ cc2d::final_counting<<<grid_count, block_count, 0, stream>>>(
270
+ labels.data_ptr<int32_t>() + offset,
271
+ counts_init.data_ptr<int32_t>() + offset,
272
+ counts_final.data_ptr<int32_t>() + offset,
273
+ W,
274
+ H);
275
+ }
276
+
277
+ // returned values are [labels, counts]
278
+ std::vector<torch::Tensor> outputs;
279
+ outputs.push_back(labels);
280
+ outputs.push_back(counts_final);
281
+ return outputs;
282
+ }
283
+
284
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
285
+ m.def(
286
+ "get_connected_componnets",
287
+ &get_connected_componnets,
288
+ "get_connected_componnets");
289
+ }
SAM2/sam2/modeling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
SAM2/sam2/modeling/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
SAM2/sam2/modeling/__pycache__/memory_attention.cpython-310.pyc ADDED
Binary file (3.99 kB). View file
 
SAM2/sam2/modeling/__pycache__/memory_encoder.cpython-310.pyc ADDED
Binary file (4.99 kB). View file
 
SAM2/sam2/modeling/__pycache__/position_encoding.cpython-310.pyc ADDED
Binary file (7.48 kB). View file
 
SAM2/sam2/modeling/__pycache__/sam2_base.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
SAM2/sam2/modeling/__pycache__/sam2_utils.cpython-310.pyc ADDED
Binary file (6.05 kB). View file
 
SAM2/sam2/modeling/backbones/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
SAM2/sam2/modeling/backbones/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
SAM2/sam2/modeling/backbones/__pycache__/hieradet.cpython-310.pyc ADDED
Binary file (7.01 kB). View file
 
SAM2/sam2/modeling/backbones/__pycache__/image_encoder.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
SAM2/sam2/modeling/backbones/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.25 kB). View file
 
SAM2/sam2/modeling/backbones/hieradet.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from functools import partial
8
+ from typing import List, Tuple, Union
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from sam2.modeling.backbones.utils import (
15
+ PatchEmbed,
16
+ window_partition,
17
+ window_unpartition,
18
+ )
19
+
20
+ from sam2.modeling.sam2_utils import DropPath, MLP
21
+
22
+
23
+ def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
24
+ if pool is None:
25
+ return x
26
+ # (B, H, W, C) -> (B, C, H, W)
27
+ x = x.permute(0, 3, 1, 2)
28
+ x = pool(x)
29
+ # (B, C, H', W') -> (B, H', W', C)
30
+ x = x.permute(0, 2, 3, 1)
31
+ if norm:
32
+ x = norm(x)
33
+
34
+ return x
35
+
36
+
37
+ class MultiScaleAttention(nn.Module):
38
+ def __init__(
39
+ self,
40
+ dim: int,
41
+ dim_out: int,
42
+ num_heads: int,
43
+ q_pool: nn.Module = None,
44
+ ):
45
+ super().__init__()
46
+
47
+ self.dim = dim
48
+ self.dim_out = dim_out
49
+
50
+ self.num_heads = num_heads
51
+ head_dim = dim_out // num_heads
52
+ self.scale = head_dim**-0.5
53
+
54
+ self.q_pool = q_pool
55
+ self.qkv = nn.Linear(dim, dim_out * 3)
56
+ self.proj = nn.Linear(dim_out, dim_out)
57
+
58
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
59
+ B, H, W, _ = x.shape
60
+ # qkv with shape (B, H * W, 3, nHead, C)
61
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
62
+ # q, k, v with shape (B, H * W, nheads, C)
63
+ q, k, v = torch.unbind(qkv, 2)
64
+
65
+ # Q pooling (for downsample at stage changes)
66
+ if self.q_pool:
67
+ q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
68
+ H, W = q.shape[1:3] # downsampled shape
69
+ q = q.reshape(B, H * W, self.num_heads, -1)
70
+
71
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
72
+ x = F.scaled_dot_product_attention(
73
+ q.transpose(1, 2),
74
+ k.transpose(1, 2),
75
+ v.transpose(1, 2),
76
+ )
77
+ # Transpose back
78
+ x = x.transpose(1, 2)
79
+ x = x.reshape(B, H, W, -1)
80
+
81
+ x = self.proj(x)
82
+
83
+ return x
84
+
85
+
86
+ class MultiScaleBlock(nn.Module):
87
+ def __init__(
88
+ self,
89
+ dim: int,
90
+ dim_out: int,
91
+ num_heads: int,
92
+ mlp_ratio: float = 4.0,
93
+ drop_path: float = 0.0,
94
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
95
+ q_stride: Tuple[int, int] = None,
96
+ act_layer: nn.Module = nn.GELU,
97
+ window_size: int = 0,
98
+ ):
99
+ super().__init__()
100
+
101
+ if isinstance(norm_layer, str):
102
+ norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
103
+
104
+ self.dim = dim
105
+ self.dim_out = dim_out
106
+ self.norm1 = norm_layer(dim)
107
+
108
+ self.window_size = window_size
109
+
110
+ self.pool, self.q_stride = None, q_stride
111
+ if self.q_stride:
112
+ self.pool = nn.MaxPool2d(
113
+ kernel_size=q_stride, stride=q_stride, ceil_mode=False
114
+ )
115
+
116
+ self.attn = MultiScaleAttention(
117
+ dim,
118
+ dim_out,
119
+ num_heads=num_heads,
120
+ q_pool=self.pool,
121
+ )
122
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
123
+
124
+ self.norm2 = norm_layer(dim_out)
125
+ self.mlp = MLP(
126
+ dim_out,
127
+ int(dim_out * mlp_ratio),
128
+ dim_out,
129
+ num_layers=2,
130
+ activation=act_layer,
131
+ )
132
+
133
+ if dim != dim_out:
134
+ self.proj = nn.Linear(dim, dim_out)
135
+
136
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
137
+ shortcut = x # B, H, W, C
138
+ x = self.norm1(x)
139
+
140
+ # Skip connection
141
+ if self.dim != self.dim_out:
142
+ shortcut = do_pool(self.proj(x), self.pool)
143
+
144
+ # Window partition
145
+ window_size = self.window_size
146
+ if window_size > 0:
147
+ H, W = x.shape[1], x.shape[2]
148
+ x, pad_hw = window_partition(x, window_size)
149
+
150
+ # Window Attention + Q Pooling (if stage change)
151
+ x = self.attn(x)
152
+ if self.q_stride:
153
+ # Shapes have changed due to Q pooling
154
+ window_size = self.window_size // self.q_stride[0]
155
+ H, W = shortcut.shape[1:3]
156
+
157
+ pad_h = (window_size - H % window_size) % window_size
158
+ pad_w = (window_size - W % window_size) % window_size
159
+ pad_hw = (H + pad_h, W + pad_w)
160
+
161
+ # Reverse window partition
162
+ if self.window_size > 0:
163
+ x = window_unpartition(x, window_size, pad_hw, (H, W))
164
+
165
+ x = shortcut + self.drop_path(x)
166
+ # MLP
167
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
168
+ return x
169
+
170
+
171
+ class Hiera(nn.Module):
172
+ """
173
+ Reference: https://arxiv.org/abs/2306.00989
174
+ """
175
+
176
+ def __init__(
177
+ self,
178
+ embed_dim: int = 96, # initial embed dim
179
+ num_heads: int = 1, # initial number of heads
180
+ drop_path_rate: float = 0.0, # stochastic depth
181
+ q_pool: int = 3, # number of q_pool stages
182
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
183
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
184
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
185
+ head_mul: float = 2.0, # head_mul factor at stage shift
186
+ window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
187
+ # window size per stage, when not using global att.
188
+ window_spec: Tuple[int, ...] = (
189
+ 8,
190
+ 4,
191
+ 14,
192
+ 7,
193
+ ),
194
+ # global attn in these blocks
195
+ global_att_blocks: Tuple[int, ...] = (
196
+ 12,
197
+ 16,
198
+ 20,
199
+ ),
200
+ return_interm_layers=True, # return feats from every stage
201
+ ):
202
+ super().__init__()
203
+
204
+ assert len(stages) == len(window_spec)
205
+ self.window_spec = window_spec
206
+
207
+ depth = sum(stages)
208
+ self.q_stride = q_stride
209
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
210
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
211
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
212
+ self.return_interm_layers = return_interm_layers
213
+
214
+ self.patch_embed = PatchEmbed(
215
+ embed_dim=embed_dim,
216
+ )
217
+ # Which blocks have global att?
218
+ self.global_att_blocks = global_att_blocks
219
+
220
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
221
+ self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
222
+ self.pos_embed = nn.Parameter(
223
+ torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size)
224
+ )
225
+ self.pos_embed_window = nn.Parameter(
226
+ torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])
227
+ )
228
+
229
+ dpr = [
230
+ x.item() for x in torch.linspace(0, drop_path_rate, depth)
231
+ ] # stochastic depth decay rule
232
+
233
+ cur_stage = 1
234
+ self.blocks = nn.ModuleList()
235
+
236
+ for i in range(depth):
237
+ dim_out = embed_dim
238
+ # lags by a block, so first block of
239
+ # next stage uses an initial window size
240
+ # of previous stage and final window size of current stage
241
+ window_size = self.window_spec[cur_stage - 1]
242
+
243
+ if self.global_att_blocks is not None:
244
+ window_size = 0 if i in self.global_att_blocks else window_size
245
+
246
+ if i - 1 in self.stage_ends:
247
+ dim_out = int(embed_dim * dim_mul)
248
+ num_heads = int(num_heads * head_mul)
249
+ cur_stage += 1
250
+
251
+ block = MultiScaleBlock(
252
+ dim=embed_dim,
253
+ dim_out=dim_out,
254
+ num_heads=num_heads,
255
+ drop_path=dpr[i],
256
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
257
+ window_size=window_size,
258
+ )
259
+
260
+ embed_dim = dim_out
261
+ self.blocks.append(block)
262
+
263
+ self.channel_list = (
264
+ [self.blocks[i].dim_out for i in self.stage_ends[::-1]]
265
+ if return_interm_layers
266
+ else [self.blocks[-1].dim_out]
267
+ )
268
+
269
+ def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
270
+ h, w = hw
271
+ window_embed = self.pos_embed_window
272
+ pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
273
+ pos_embed = pos_embed + window_embed.tile(
274
+ [x // y for x, y in zip(pos_embed.shape, window_embed.shape)]
275
+ )
276
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
277
+ return pos_embed
278
+
279
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
280
+ x = self.patch_embed(x) # patch embedding
281
+ # x: (B, H, W, C)
282
+
283
+ # Add pos embed
284
+ x = x + self._get_pos_embed(x.shape[1:3]) # 加上位置编码
285
+
286
+ outputs = []
287
+ for i, blk in enumerate(self.blocks):
288
+ x = blk(x)
289
+ if (i == self.stage_ends[-1]) or (
290
+ i in self.stage_ends and self.return_interm_layers
291
+ ):
292
+ feats = x.permute(0, 3, 1, 2)
293
+ outputs.append(feats)
294
+
295
+ return outputs #一共有4个stage
SAM2/sam2/modeling/backbones/image_encoder.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+
14
+ class ImageEncoder(nn.Module):
15
+ def __init__(
16
+ self,
17
+ trunk: nn.Module,
18
+ neck: nn.Module,
19
+ scalp: int = 0,
20
+ ):
21
+ super().__init__()
22
+ self.trunk = trunk
23
+ self.neck = neck
24
+ self.scalp = scalp
25
+ assert (
26
+ self.trunk.channel_list == self.neck.backbone_channel_list
27
+ ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
28
+
29
+ def forward(self, sample: torch.Tensor):
30
+ # Forward through backbone
31
+ features, pos = self.neck(self.trunk(sample)) # 有4个stage
32
+ if self.scalp > 0:
33
+ # Discard the lowest resolution features
34
+ features, pos = features[: -self.scalp], pos[: -self.scalp] # 一共有4个不同分辨率的feature(对应4个stage),将最低的scalp个丢弃
35
+
36
+ src = features[-1]
37
+ output = {
38
+ "vision_features": src,
39
+ "vision_pos_enc": pos,
40
+ "backbone_fpn": features,
41
+ }
42
+ return output
43
+
44
+
45
+ class FpnNeck(nn.Module):
46
+ """
47
+ A modified variant of Feature Pyramid Network (FPN) neck
48
+ (we remove output conv and also do bicubic interpolation similar to ViT
49
+ pos embed interpolation)
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ position_encoding: nn.Module,
55
+ d_model: int,
56
+ backbone_channel_list: List[int],
57
+ kernel_size: int = 1,
58
+ stride: int = 1,
59
+ padding: int = 0,
60
+ fpn_interp_model: str = "bilinear",
61
+ fuse_type: str = "sum",
62
+ fpn_top_down_levels: Optional[List[int]] = None,
63
+ ):
64
+ """Initialize the neck
65
+ :param trunk: the backbone
66
+ :param position_encoding: the positional encoding to use
67
+ :param d_model: the dimension of the model
68
+ :param neck_norm: the normalization to use
69
+ """
70
+ super().__init__()
71
+ self.position_encoding = position_encoding
72
+ self.convs = nn.ModuleList()
73
+ self.backbone_channel_list = backbone_channel_list
74
+ for dim in backbone_channel_list:
75
+ current = nn.Sequential()
76
+ current.add_module(
77
+ "conv",
78
+ nn.Conv2d(
79
+ in_channels=dim,
80
+ out_channels=d_model,
81
+ kernel_size=kernel_size,
82
+ stride=stride,
83
+ padding=padding,
84
+ ),
85
+ )
86
+
87
+ self.convs.append(current)
88
+ self.fpn_interp_model = fpn_interp_model
89
+ assert fuse_type in ["sum", "avg"]
90
+ self.fuse_type = fuse_type
91
+
92
+ # levels to have top-down features in its outputs
93
+ # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
94
+ # have top-down propagation, while outputs of level 0 and level 1 have only
95
+ # lateral features from the same backbone level.
96
+ if fpn_top_down_levels is None:
97
+ # default is to have top-down features on all levels
98
+ fpn_top_down_levels = range(len(self.convs))
99
+ self.fpn_top_down_levels = list(fpn_top_down_levels)
100
+
101
+ def forward(self, xs: List[torch.Tensor]):
102
+
103
+ out = [None] * len(self.convs)
104
+ pos = [None] * len(self.convs)
105
+ assert len(xs) == len(self.convs)
106
+ # fpn forward pass
107
+ # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
108
+ prev_features = None
109
+ # forward in top-down order (from low to high resolution)
110
+ n = len(self.convs) - 1
111
+ for i in range(n, -1, -1):
112
+ x = xs[i]
113
+ lateral_features = self.convs[n - i](x)
114
+ if i in self.fpn_top_down_levels and prev_features is not None:
115
+ top_down_features = F.interpolate(
116
+ prev_features.to(dtype=torch.float32),
117
+ scale_factor=2.0,
118
+ mode=self.fpn_interp_model,
119
+ align_corners=(
120
+ None if self.fpn_interp_model == "nearest" else False
121
+ ),
122
+ antialias=False,
123
+ )
124
+ prev_features = lateral_features + top_down_features
125
+ if self.fuse_type == "avg":
126
+ prev_features /= 2
127
+ else:
128
+ prev_features = lateral_features
129
+ x_out = prev_features
130
+ out[i] = x_out
131
+ pos[i] = self.position_encoding(x_out).to(x_out.dtype)
132
+
133
+ return out, pos
SAM2/sam2/modeling/backbones/utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Some utilities for backbones, in particular for windowing"""
8
+
9
+ from typing import Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+
16
+ def window_partition(x, window_size):
17
+ """
18
+ Partition into non-overlapping windows with padding if needed.
19
+ Args:
20
+ x (tensor): input tokens with [B, H, W, C].
21
+ window_size (int): window size.
22
+ Returns:
23
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
24
+ (Hp, Wp): padded height and width before partition
25
+ """
26
+ B, H, W, C = x.shape
27
+
28
+ pad_h = (window_size - H % window_size) % window_size
29
+ pad_w = (window_size - W % window_size) % window_size
30
+ if pad_h > 0 or pad_w > 0:
31
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
32
+ Hp, Wp = H + pad_h, W + pad_w
33
+
34
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
35
+ windows = (
36
+ x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
37
+ )
38
+ return windows, (Hp, Wp)
39
+
40
+
41
+ def window_unpartition(windows, window_size, pad_hw, hw):
42
+ """
43
+ Window unpartition into original sequences and removing padding.
44
+ Args:
45
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
46
+ window_size (int): window size.
47
+ pad_hw (Tuple): padded height and width (Hp, Wp).
48
+ hw (Tuple): original height and width (H, W) before padding.
49
+ Returns:
50
+ x: unpartitioned sequences with [B, H, W, C].
51
+ """
52
+ Hp, Wp = pad_hw
53
+ H, W = hw
54
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
55
+ x = windows.view(
56
+ B, Hp // window_size, Wp // window_size, window_size, window_size, -1
57
+ )
58
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
59
+
60
+ if Hp > H or Wp > W:
61
+ x = x[:, :H, :W, :].contiguous()
62
+ return x
63
+
64
+
65
+ class PatchEmbed(nn.Module):
66
+ """
67
+ Image to Patch Embedding.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ kernel_size: Tuple[int, ...] = (7, 7),
73
+ stride: Tuple[int, ...] = (4, 4),
74
+ padding: Tuple[int, ...] = (3, 3),
75
+ in_chans: int = 3,
76
+ embed_dim: int = 768,
77
+ ):
78
+ """
79
+ Args:
80
+ kernel_size (Tuple): kernel size of the projection layer.
81
+ stride (Tuple): stride of the projection layer.
82
+ padding (Tuple): padding size of the projection layer.
83
+ in_chans (int): Number of input image channels.
84
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
85
+ """
86
+ super().__init__()
87
+ self.proj = nn.Conv2d(
88
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
89
+ )
90
+
91
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
92
+ x = self.proj(x)
93
+ # B C H W -> B H W C
94
+ x = x.permute(0, 2, 3, 1)
95
+ return x
SAM2/sam2/modeling/memory_attention.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional
8
+
9
+ import torch
10
+ from torch import nn, Tensor
11
+
12
+ from sam2.modeling.sam.transformer import RoPEAttention
13
+
14
+ from sam2.modeling.sam2_utils import get_activation_fn, get_clones
15
+
16
+
17
+ class MemoryAttentionLayer(nn.Module):
18
+
19
+ def __init__(
20
+ self,
21
+ activation: str,
22
+ cross_attention: nn.Module,
23
+ d_model: int,
24
+ dim_feedforward: int,
25
+ dropout: float,
26
+ pos_enc_at_attn: bool,
27
+ pos_enc_at_cross_attn_keys: bool,
28
+ pos_enc_at_cross_attn_queries: bool,
29
+ self_attention: nn.Module,
30
+ ):
31
+ super().__init__()
32
+ self.d_model = d_model
33
+ self.dim_feedforward = dim_feedforward
34
+ self.dropout_value = dropout
35
+ self.self_attn = self_attention
36
+ self.cross_attn_image = cross_attention
37
+
38
+ # Implementation of Feedforward model
39
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
40
+ self.dropout = nn.Dropout(dropout)
41
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
42
+
43
+ self.norm1 = nn.LayerNorm(d_model)
44
+ self.norm2 = nn.LayerNorm(d_model)
45
+ self.norm3 = nn.LayerNorm(d_model)
46
+ self.dropout1 = nn.Dropout(dropout)
47
+ self.dropout2 = nn.Dropout(dropout)
48
+ self.dropout3 = nn.Dropout(dropout)
49
+
50
+ self.activation_str = activation
51
+ self.activation = get_activation_fn(activation)
52
+
53
+ # Where to add pos enc
54
+ self.pos_enc_at_attn = pos_enc_at_attn
55
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
56
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
57
+
58
+ def _forward_sa(self, tgt, query_pos):
59
+ # Self-Attention
60
+ tgt2 = self.norm1(tgt)
61
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
62
+ tgt2 = self.self_attn(q, k, v=tgt2)
63
+ tgt = tgt + self.dropout1(tgt2)
64
+ return tgt
65
+
66
+ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
67
+ kwds = {}
68
+ if num_k_exclude_rope > 0:
69
+ assert isinstance(self.cross_attn_image, RoPEAttention)
70
+ kwds = {"num_k_exclude_rope": num_k_exclude_rope}
71
+
72
+ # Cross-Attention
73
+ tgt2 = self.norm2(tgt)
74
+ tgt2 = self.cross_attn_image(
75
+ q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
76
+ k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
77
+ v=memory,
78
+ **kwds,
79
+ )
80
+ tgt = tgt + self.dropout2(tgt2)
81
+ return tgt
82
+
83
+ def forward(
84
+ self,
85
+ tgt, # image embedding
86
+ memory, # memory feature 连接 object pointer
87
+ pos: Optional[Tensor] = None,
88
+ query_pos: Optional[Tensor] = None,
89
+ num_k_exclude_rope: int = 0, # 维度从256 split成4个64后,object pointer的数量
90
+ ) -> torch.Tensor:
91
+
92
+ # Self-Attn, Cross-Attn
93
+ tgt = self._forward_sa(tgt, query_pos)
94
+ tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
95
+ # MLP
96
+ tgt2 = self.norm3(tgt)
97
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
98
+ tgt = tgt + self.dropout3(tgt2)
99
+ return tgt
100
+
101
+
102
+ class MemoryAttention(nn.Module):
103
+ def __init__(
104
+ self,
105
+ d_model: int,
106
+ pos_enc_at_input: bool,
107
+ layer: nn.Module,
108
+ num_layers: int,
109
+ batch_first: bool = True, # Do layers expect batch first input?
110
+ ):
111
+ super().__init__()
112
+ self.d_model = d_model
113
+ self.layers = get_clones(layer, num_layers)
114
+ self.num_layers = num_layers
115
+ self.norm = nn.LayerNorm(d_model)
116
+ self.pos_enc_at_input = pos_enc_at_input
117
+ self.batch_first = batch_first
118
+
119
+ def forward(
120
+ self,
121
+ curr: torch.Tensor, # self-attention inputs ,image embedding
122
+ memory: torch.Tensor, # cross-attention inputs , memory feature 连接 object pointer
123
+ curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
124
+ memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
125
+ num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
126
+ ):
127
+ if isinstance(curr, list):
128
+ assert isinstance(curr_pos, list)
129
+ assert len(curr) == len(curr_pos) == 1
130
+ curr, curr_pos = (
131
+ curr[0],
132
+ curr_pos[0],
133
+ )
134
+
135
+ assert (
136
+ curr.shape[1] == memory.shape[1]
137
+ ), "Batch size must be the same for curr and memory"
138
+
139
+ output = curr # image embedding
140
+ if self.pos_enc_at_input and curr_pos is not None:
141
+ output = output + 0.1 * curr_pos # ��置编码
142
+
143
+ if self.batch_first:
144
+ # Convert to batch first
145
+ output = output.transpose(0, 1)
146
+ curr_pos = curr_pos.transpose(0, 1)
147
+ memory = memory.transpose(0, 1)
148
+ memory_pos = memory_pos.transpose(0, 1)
149
+
150
+ for layer in self.layers:
151
+ kwds = {}
152
+ if isinstance(layer.cross_attn_image, RoPEAttention):
153
+ kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
154
+
155
+ output = layer(
156
+ tgt=output,
157
+ memory=memory,
158
+ pos=memory_pos,
159
+ query_pos=curr_pos,
160
+ **kwds,
161
+ )
162
+ normed_output = self.norm(output)
163
+
164
+ if self.batch_first:
165
+ # Convert back to seq first
166
+ normed_output = normed_output.transpose(0, 1)
167
+ curr_pos = curr_pos.transpose(0, 1)
168
+
169
+ return normed_output
SAM2/sam2/modeling/memory_encoder.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
15
+
16
+
17
+ class MaskDownSampler(nn.Module):
18
+ """
19
+ Progressively downsample a mask by total_stride, each time by stride.
20
+ Note that LayerNorm is applied per *token*, like in ViT.
21
+
22
+ With each downsample (by a factor stride**2), channel capacity increases by the same factor.
23
+ In the end, we linearly project to embed_dim channels.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ embed_dim=256,
29
+ kernel_size=4,
30
+ stride=4,
31
+ padding=0,
32
+ total_stride=16,
33
+ activation=nn.GELU,
34
+ ):
35
+ super().__init__()
36
+ num_layers = int(math.log2(total_stride) // math.log2(stride))
37
+ assert stride**num_layers == total_stride
38
+ self.encoder = nn.Sequential()
39
+ mask_in_chans, mask_out_chans = 1, 1
40
+ for _ in range(num_layers):
41
+ mask_out_chans = mask_in_chans * (stride**2)
42
+ self.encoder.append(
43
+ nn.Conv2d(
44
+ mask_in_chans,
45
+ mask_out_chans,
46
+ kernel_size=kernel_size,
47
+ stride=stride,
48
+ padding=padding,
49
+ )
50
+ )
51
+ self.encoder.append(LayerNorm2d(mask_out_chans))
52
+ self.encoder.append(activation())
53
+ mask_in_chans = mask_out_chans
54
+
55
+ self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
56
+
57
+ def forward(self, x):
58
+ return self.encoder(x)
59
+
60
+
61
+ # Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
62
+ class CXBlock(nn.Module):
63
+ r"""ConvNeXt Block. There are two equivalent implementations:
64
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
65
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
66
+ We use (2) as we find it slightly faster in PyTorch
67
+
68
+ Args:
69
+ dim (int): Number of input channels.
70
+ drop_path (float): Stochastic depth rate. Default: 0.0
71
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ dim,
77
+ kernel_size=7,
78
+ padding=3,
79
+ drop_path=0.0,
80
+ layer_scale_init_value=1e-6,
81
+ use_dwconv=True,
82
+ ):
83
+ super().__init__()
84
+ self.dwconv = nn.Conv2d(
85
+ dim,
86
+ dim,
87
+ kernel_size=kernel_size,
88
+ padding=padding,
89
+ groups=dim if use_dwconv else 1,
90
+ ) # depthwise conv
91
+ self.norm = LayerNorm2d(dim, eps=1e-6)
92
+ self.pwconv1 = nn.Linear(
93
+ dim, 4 * dim
94
+ ) # pointwise/1x1 convs, implemented with linear layers
95
+ self.act = nn.GELU()
96
+ self.pwconv2 = nn.Linear(4 * dim, dim)
97
+ self.gamma = (
98
+ nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
99
+ if layer_scale_init_value > 0
100
+ else None
101
+ )
102
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
103
+
104
+ def forward(self, x):
105
+ input = x
106
+ x = self.dwconv(x)
107
+ x = self.norm(x)
108
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
109
+ x = self.pwconv1(x)
110
+ x = self.act(x)
111
+ x = self.pwconv2(x)
112
+ if self.gamma is not None:
113
+ x = self.gamma * x
114
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
115
+
116
+ x = input + self.drop_path(x)
117
+ return x
118
+
119
+
120
+ class Fuser(nn.Module):
121
+ def __init__(self, layer, num_layers, dim=None, input_projection=False):
122
+ super().__init__()
123
+ self.proj = nn.Identity()
124
+ self.layers = get_clones(layer, num_layers)
125
+
126
+ if input_projection:
127
+ assert dim is not None
128
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
129
+
130
+ def forward(self, x):
131
+ # normally x: (N, C, H, W)
132
+ x = self.proj(x)
133
+ for layer in self.layers:
134
+ x = layer(x)
135
+ return x
136
+
137
+
138
+ class MemoryEncoder(nn.Module):
139
+ def __init__(
140
+ self,
141
+ out_dim,
142
+ mask_downsampler,
143
+ fuser,
144
+ position_encoding,
145
+ in_dim=256, # in_dim of pix_feats
146
+ ):
147
+ super().__init__()
148
+
149
+ self.mask_downsampler = mask_downsampler
150
+
151
+ self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
152
+ self.fuser = fuser
153
+ self.position_encoding = position_encoding
154
+ self.out_proj = nn.Identity()
155
+ if out_dim != in_dim:
156
+ self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
157
+
158
+ def forward(
159
+ self,
160
+ pix_feat: torch.Tensor,
161
+ masks: torch.Tensor,
162
+ skip_mask_sigmoid: bool = False,
163
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
164
+ ## Process masks
165
+ # sigmoid, so that less domain shift from gt masks which are bool
166
+ if not skip_mask_sigmoid:
167
+ masks = F.sigmoid(masks)
168
+ masks = self.mask_downsampler(masks)
169
+
170
+ ## Fuse pix_feats and downsampled masks
171
+ # in case the visual features are on CPU, cast them to CUDA
172
+ pix_feat = pix_feat.to(masks.device)
173
+
174
+ x = self.pix_feat_proj(pix_feat)
175
+ x = x + masks
176
+ x = self.fuser(x)
177
+ x = self.out_proj(x)
178
+
179
+ pos = self.position_encoding(x).to(x.dtype)
180
+
181
+ return {"vision_features": x, "vision_pos_enc": [pos]}
SAM2/sam2/modeling/position_encoding.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Any, Optional, Tuple
9
+
10
+ import numpy as np
11
+
12
+ import torch
13
+ from torch import nn
14
+
15
+
16
+ class PositionEmbeddingSine(nn.Module):
17
+ """
18
+ This is a more standard version of the position embedding, very similar to the one
19
+ used by the Attention is all you need paper, generalized to work on images.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ num_pos_feats,
25
+ temperature: int = 10000,
26
+ normalize: bool = True,
27
+ scale: Optional[float] = None,
28
+ ):
29
+ super().__init__()
30
+ assert num_pos_feats % 2 == 0, "Expecting even model width"
31
+ self.num_pos_feats = num_pos_feats // 2
32
+ self.temperature = temperature
33
+ self.normalize = normalize
34
+ if scale is not None and normalize is False:
35
+ raise ValueError("normalize should be True if scale is passed")
36
+ if scale is None:
37
+ scale = 2 * math.pi
38
+ self.scale = scale
39
+
40
+ self.cache = {}
41
+
42
+ def _encode_xy(self, x, y):
43
+ # The positions are expected to be normalized
44
+ assert len(x) == len(y) and x.ndim == y.ndim == 1
45
+ x_embed = x * self.scale
46
+ y_embed = y * self.scale
47
+
48
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
49
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
50
+
51
+ pos_x = x_embed[:, None] / dim_t
52
+ pos_y = y_embed[:, None] / dim_t
53
+ pos_x = torch.stack(
54
+ (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2
55
+ ).flatten(1)
56
+ pos_y = torch.stack(
57
+ (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2
58
+ ).flatten(1)
59
+ return pos_x, pos_y
60
+
61
+ @torch.no_grad()
62
+ def encode_boxes(self, x, y, w, h):
63
+ pos_x, pos_y = self._encode_xy(x, y)
64
+ pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
65
+ return pos
66
+
67
+ encode = encode_boxes # Backwards compatibility
68
+
69
+ @torch.no_grad()
70
+ def encode_points(self, x, y, labels):
71
+ (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
72
+ assert bx == by and nx == ny and bx == bl and nx == nl
73
+ pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
74
+ pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
75
+ pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
76
+ return pos
77
+
78
+ @torch.no_grad()
79
+ def forward(self, x: torch.Tensor):
80
+ cache_key = (x.shape[-2], x.shape[-1])
81
+ if cache_key in self.cache:
82
+ return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
83
+ y_embed = (
84
+ torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
85
+ .view(1, -1, 1)
86
+ .repeat(x.shape[0], 1, x.shape[-1])
87
+ )
88
+ x_embed = (
89
+ torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
90
+ .view(1, 1, -1)
91
+ .repeat(x.shape[0], x.shape[-2], 1)
92
+ )
93
+
94
+ if self.normalize:
95
+ eps = 1e-6
96
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
97
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
98
+
99
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
100
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
101
+
102
+ pos_x = x_embed[:, :, :, None] / dim_t
103
+ pos_y = y_embed[:, :, :, None] / dim_t
104
+ pos_x = torch.stack(
105
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
106
+ ).flatten(3)
107
+ pos_y = torch.stack(
108
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
109
+ ).flatten(3)
110
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
111
+ self.cache[cache_key] = pos[0]
112
+ return pos
113
+
114
+
115
+ class PositionEmbeddingRandom(nn.Module):
116
+ """
117
+ Positional encoding using random spatial frequencies.
118
+ """
119
+
120
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
121
+ super().__init__()
122
+ if scale is None or scale <= 0.0:
123
+ scale = 1.0
124
+ self.register_buffer(
125
+ "positional_encoding_gaussian_matrix",
126
+ scale * torch.randn((2, num_pos_feats)),
127
+ )
128
+
129
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
130
+ """Positionally encode points that are normalized to [0,1]."""
131
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
132
+ coords = 2 * coords - 1
133
+ coords = coords @ self.positional_encoding_gaussian_matrix
134
+ coords = 2 * np.pi * coords
135
+ # outputs d_1 x ... x d_n x C shape
136
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
137
+
138
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
139
+ """Generate positional encoding for a grid of the specified size."""
140
+ h, w = size
141
+ device: Any = self.positional_encoding_gaussian_matrix.device
142
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
143
+ y_embed = grid.cumsum(dim=0) - 0.5
144
+ x_embed = grid.cumsum(dim=1) - 0.5
145
+ y_embed = y_embed / h
146
+ x_embed = x_embed / w
147
+
148
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
149
+ return pe.permute(2, 0, 1) # C x H x W
150
+
151
+ def forward_with_coords(
152
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
153
+ ) -> torch.Tensor:
154
+ """Positionally encode points that are not normalized to [0,1]."""
155
+ coords = coords_input.clone()
156
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
157
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
158
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
159
+
160
+
161
+ # Rotary Positional Encoding, adapted from:
162
+ # 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
163
+ # 2. https://github.com/naver-ai/rope-vit
164
+ # 3. https://github.com/lucidrains/rotary-embedding-torch
165
+
166
+
167
+ def init_t_xy(end_x: int, end_y: int):
168
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
169
+ t_x = (t % end_x).float()
170
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
171
+ return t_x, t_y
172
+
173
+
174
+ def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
175
+ freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
176
+ freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
177
+
178
+ t_x, t_y = init_t_xy(end_x, end_y)
179
+ freqs_x = torch.outer(t_x, freqs_x)
180
+ freqs_y = torch.outer(t_y, freqs_y)
181
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
182
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
183
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
184
+
185
+
186
+ def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
187
+ ndim = x.ndim
188
+ assert 0 <= 1 < ndim
189
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
190
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
191
+ return freqs_cis.view(*shape)
192
+
193
+
194
+ def apply_rotary_enc(
195
+ xq: torch.Tensor,
196
+ xk: torch.Tensor,
197
+ freqs_cis: torch.Tensor,
198
+ repeat_freqs_k: bool = False,
199
+ ):
200
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
201
+ xk_ = (
202
+ torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
203
+ if xk.shape[-2] != 0
204
+ else None
205
+ )
206
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
207
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
208
+ if xk_ is None:
209
+ # no keys to rotate, due to dropout
210
+ return xq_out.type_as(xq).to(xq.device), xk
211
+ # repeat freqs along seq_len dim to match k seq_len
212
+ if repeat_freqs_k:
213
+ r = xk_.shape[-2] // xq_.shape[-2]
214
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
215
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
216
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
SAM2/sam2/modeling/sam/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
SAM2/sam2/modeling/sam/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
SAM2/sam2/modeling/sam/__pycache__/mask_decoder.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
SAM2/sam2/modeling/sam/__pycache__/prompt_encoder.cpython-310.pyc ADDED
Binary file (5.93 kB). View file
 
SAM2/sam2/modeling/sam/__pycache__/transformer.cpython-310.pyc ADDED
Binary file (8.87 kB). View file
 
SAM2/sam2/modeling/sam/mask_decoder.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.sam2_utils import LayerNorm2d, MLP
13
+
14
+
15
+ class MaskDecoder(nn.Module):
16
+ def __init__(
17
+ self,
18
+ *,
19
+ transformer_dim: int,
20
+ transformer: nn.Module,
21
+ num_multimask_outputs: int = 3,
22
+ activation: Type[nn.Module] = nn.GELU,
23
+ iou_head_depth: int = 3,
24
+ iou_head_hidden_dim: int = 256,
25
+ use_high_res_features: bool = False,
26
+ iou_prediction_use_sigmoid=False,
27
+ dynamic_multimask_via_stability=False,
28
+ dynamic_multimask_stability_delta=0.05,
29
+ dynamic_multimask_stability_thresh=0.98,
30
+ pred_obj_scores: bool = False,
31
+ pred_obj_scores_mlp: bool = False,
32
+ use_multimask_token_for_obj_ptr: bool = False,
33
+ ) -> None:
34
+ """
35
+ Predicts masks given an image and prompt embeddings, using a
36
+ transformer architecture.
37
+
38
+ Arguments:
39
+ transformer_dim (int): the channel dimension of the transformer
40
+ transformer (nn.Module): the transformer used to predict masks
41
+ num_multimask_outputs (int): the number of masks to predict
42
+ when disambiguating masks
43
+ activation (nn.Module): the type of activation to use when
44
+ upscaling masks
45
+ iou_head_depth (int): the depth of the MLP used to predict
46
+ mask quality
47
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
48
+ used to predict mask quality
49
+ """
50
+ super().__init__()
51
+ self.transformer_dim = transformer_dim
52
+ self.transformer = transformer
53
+
54
+ self.num_multimask_outputs = num_multimask_outputs
55
+
56
+ self.iou_token = nn.Embedding(1, transformer_dim)
57
+ self.num_mask_tokens = num_multimask_outputs + 1
58
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
59
+
60
+ self.pred_obj_scores = pred_obj_scores
61
+ if self.pred_obj_scores:
62
+ self.obj_score_token = nn.Embedding(1, transformer_dim)
63
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
64
+
65
+ self.output_upscaling = nn.Sequential(
66
+ nn.ConvTranspose2d(
67
+ transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
68
+ ),
69
+ LayerNorm2d(transformer_dim // 4),
70
+ activation(),
71
+ nn.ConvTranspose2d(
72
+ transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
73
+ ),
74
+ activation(),
75
+ )
76
+ self.use_high_res_features = use_high_res_features
77
+ if use_high_res_features:
78
+ self.conv_s0 = nn.Conv2d(
79
+ transformer_dim, transformer_dim // 8, kernel_size=1, stride=1
80
+ )
81
+ self.conv_s1 = nn.Conv2d(
82
+ transformer_dim, transformer_dim // 4, kernel_size=1, stride=1
83
+ )
84
+
85
+ self.output_hypernetworks_mlps = nn.ModuleList(
86
+ [
87
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
88
+ for i in range(self.num_mask_tokens)
89
+ ]
90
+ )
91
+
92
+ self.iou_prediction_head = MLP(
93
+ transformer_dim,
94
+ iou_head_hidden_dim,
95
+ self.num_mask_tokens,
96
+ iou_head_depth,
97
+ sigmoid_output=iou_prediction_use_sigmoid,
98
+ )
99
+ if self.pred_obj_scores:
100
+ self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
101
+ if pred_obj_scores_mlp:
102
+ self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
103
+
104
+ # When outputting a single mask, optionally we can dynamically fall back to the best
105
+ # multimask output token if the single mask output token gives low stability scores.
106
+ self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
107
+ self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
108
+ self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
109
+
110
+ def forward(
111
+ self,
112
+ image_embeddings: torch.Tensor,
113
+ image_pe: torch.Tensor,
114
+ sparse_prompt_embeddings: torch.Tensor,
115
+ dense_prompt_embeddings: torch.Tensor,
116
+ multimask_output: bool,
117
+ repeat_image: bool,
118
+ high_res_features: Optional[List[torch.Tensor]] = None,
119
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
120
+ """
121
+ Predict masks given image and prompt embeddings.
122
+
123
+ Arguments:
124
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
125
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
126
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
127
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
128
+ multimask_output (bool): Whether to return multiple masks or a single
129
+ mask.
130
+
131
+ Returns:
132
+ torch.Tensor: batched predicted masks
133
+ torch.Tensor: batched predictions of mask quality
134
+ torch.Tensor: batched SAM token for mask output
135
+ """
136
+ masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
137
+ image_embeddings=image_embeddings,
138
+ image_pe=image_pe,
139
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
140
+ dense_prompt_embeddings=dense_prompt_embeddings,
141
+ repeat_image=repeat_image,
142
+ high_res_features=high_res_features,
143
+ )
144
+
145
+ # Select the correct mask or masks for output
146
+ if multimask_output:
147
+ masks = masks[:, 1:, :, :]
148
+ iou_pred = iou_pred[:, 1:]
149
+ elif self.dynamic_multimask_via_stability and not self.training:
150
+ masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
151
+ else:
152
+ masks = masks[:, 0:1, :, :]
153
+ iou_pred = iou_pred[:, 0:1]
154
+
155
+ if multimask_output and self.use_multimask_token_for_obj_ptr:
156
+ sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
157
+ else:
158
+ # Take the mask output token. Here we *always* use the token for single mask output.
159
+ # At test time, even if we track after 1-click (and using multimask_output=True),
160
+ # we still take the single mask token here. The rationale is that we always track
161
+ # after multiple clicks during training, so the past tokens seen during training
162
+ # are always the single mask token (and we'll let it be the object-memory token).
163
+ sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
164
+
165
+ # Prepare output
166
+ return masks, iou_pred, sam_tokens_out, object_score_logits
167
+
168
+ def predict_masks(
169
+ self,
170
+ image_embeddings: torch.Tensor,
171
+ image_pe: torch.Tensor,
172
+ sparse_prompt_embeddings: torch.Tensor,
173
+ dense_prompt_embeddings: torch.Tensor,
174
+ repeat_image: bool,
175
+ high_res_features: Optional[List[torch.Tensor]] = None,
176
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
177
+ """Predicts masks. See 'forward' for more details."""
178
+ # Concatenate output tokens
179
+ s = 0
180
+ if self.pred_obj_scores:
181
+ output_tokens = torch.cat(
182
+ [
183
+ self.obj_score_token.weight,
184
+ self.iou_token.weight,
185
+ self.mask_tokens.weight,
186
+ ],
187
+ dim=0,
188
+ )
189
+ s = 1
190
+ else:
191
+ output_tokens = torch.cat(
192
+ [self.iou_token.weight, self.mask_tokens.weight], dim=0
193
+ )
194
+ output_tokens = output_tokens.unsqueeze(0).expand(
195
+ sparse_prompt_embeddings.size(0), -1, -1
196
+ )
197
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
198
+
199
+ # Expand per-image data in batch direction to be per-mask
200
+ if repeat_image:
201
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
202
+ else:
203
+ assert image_embeddings.shape[0] == tokens.shape[0]
204
+ src = image_embeddings
205
+ src = src + dense_prompt_embeddings
206
+ assert (
207
+ image_pe.size(0) == 1
208
+ ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
209
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
210
+ b, c, h, w = src.shape
211
+
212
+ # Run the transformer
213
+ hs, src = self.transformer(src, pos_src, tokens) # 运行 mask decoder
214
+ iou_token_out = hs[:, s, :]
215
+ mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
216
+
217
+ # Upscale mask embeddings and predict masks using the mask tokens
218
+ src = src.transpose(1, 2).view(b, c, h, w)
219
+ if not self.use_high_res_features:
220
+ upscaled_embedding = self.output_upscaling(src)
221
+ else: # mask decoder中的[2x conv. trans.]模块,
222
+ dc1, ln1, act1, dc2, act2 = self.output_upscaling
223
+ feat_s0, feat_s1 = high_res_features
224
+ upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
225
+ upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
226
+
227
+ hyper_in_list: List[torch.Tensor] = []
228
+ for i in range(self.num_mask_tokens):
229
+ hyper_in_list.append(
230
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
231
+ )
232
+ hyper_in = torch.stack(hyper_in_list, dim=1)
233
+ b, c, h, w = upscaled_embedding.shape
234
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
235
+
236
+ # Generate mask quality predictions
237
+ iou_pred = self.iou_prediction_head(iou_token_out)
238
+ if self.pred_obj_scores:
239
+ assert s == 1
240
+ object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
241
+ else:
242
+ # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
243
+ object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
244
+
245
+ return masks, iou_pred, mask_tokens_out, object_score_logits
246
+
247
+ def _get_stability_scores(self, mask_logits):
248
+ """
249
+ Compute stability scores of the mask logits based on the IoU between upper and
250
+ lower thresholds, similar to https://github.com/fairinternal/onevision/pull/568.
251
+ """
252
+ mask_logits = mask_logits.flatten(-2)
253
+ stability_delta = self.dynamic_multimask_stability_delta
254
+ area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
255
+ area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
256
+ stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
257
+ return stability_scores
258
+
259
+ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
260
+ """
261
+ When outputting a single mask, if the stability score from the current single-mask
262
+ output (based on output token 0) falls below a threshold, we instead select from
263
+ multi-mask outputs (based on output token 1~3) the mask with the highest predicted
264
+ IoU score. This is intended to ensure a valid mask for both clicking and tracking.
265
+ """
266
+ # The best mask from multimask output tokens (1~3)
267
+ multimask_logits = all_mask_logits[:, 1:, :, :]
268
+ multimask_iou_scores = all_iou_scores[:, 1:]
269
+ best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
270
+ batch_inds = torch.arange(
271
+ multimask_iou_scores.size(0), device=all_iou_scores.device
272
+ )
273
+ best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
274
+ best_multimask_logits = best_multimask_logits.unsqueeze(1)
275
+ best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
276
+ best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
277
+
278
+ # The mask from singlemask output token 0 and its stability score
279
+ singlemask_logits = all_mask_logits[:, 0:1, :, :]
280
+ singlemask_iou_scores = all_iou_scores[:, 0:1]
281
+ stability_scores = self._get_stability_scores(singlemask_logits)
282
+ is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
283
+
284
+ # Dynamically fall back to best multimask output upon low stability scores.
285
+ mask_logits_out = torch.where(
286
+ is_stable[..., None, None].expand_as(singlemask_logits),
287
+ singlemask_logits,
288
+ best_multimask_logits,
289
+ )
290
+ iou_scores_out = torch.where(
291
+ is_stable.expand_as(singlemask_iou_scores),
292
+ singlemask_iou_scores,
293
+ best_multimask_iou_scores,
294
+ )
295
+ return mask_logits_out, iou_scores_out
SAM2/sam2/modeling/sam/prompt_encoder.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.position_encoding import PositionEmbeddingRandom
13
+
14
+ from sam2.modeling.sam2_utils import LayerNorm2d
15
+
16
+
17
+ class PromptEncoder(nn.Module):
18
+ def __init__(
19
+ self,
20
+ embed_dim: int,
21
+ image_embedding_size: Tuple[int, int],
22
+ input_image_size: Tuple[int, int],
23
+ mask_in_chans: int,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ ) -> None:
26
+ """
27
+ Encodes prompts for input to SAM's mask decoder.
28
+
29
+ Arguments:
30
+ embed_dim (int): The prompts' embedding dimension
31
+ image_embedding_size (tuple(int, int)): The spatial size of the
32
+ image embedding, as (H, W).
33
+ input_image_size (int): The padded size of the image as input
34
+ to the image encoder, as (H, W).
35
+ mask_in_chans (int): The number of hidden channels used for
36
+ encoding input masks.
37
+ activation (nn.Module): The activation to use when encoding
38
+ input masks.
39
+ """
40
+ super().__init__()
41
+ self.embed_dim = embed_dim
42
+ self.input_image_size = input_image_size
43
+ self.image_embedding_size = image_embedding_size
44
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
45
+
46
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
47
+ point_embeddings = [
48
+ nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
49
+ ]
50
+ self.point_embeddings = nn.ModuleList(point_embeddings)
51
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
52
+
53
+ self.mask_input_size = (
54
+ 4 * image_embedding_size[0],
55
+ 4 * image_embedding_size[1],
56
+ )
57
+ self.mask_downscaling = nn.Sequential(
58
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
59
+ LayerNorm2d(mask_in_chans // 4),
60
+ activation(),
61
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
62
+ LayerNorm2d(mask_in_chans),
63
+ activation(),
64
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
65
+ )
66
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
67
+
68
+ def get_dense_pe(self) -> torch.Tensor:
69
+ """
70
+ Returns the positional encoding used to encode point prompts,
71
+ applied to a dense set of points the shape of the image encoding.
72
+
73
+ Returns:
74
+ torch.Tensor: Positional encoding with shape
75
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
76
+ """
77
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
78
+
79
+ def _embed_points(
80
+ self,
81
+ points: torch.Tensor,
82
+ labels: torch.Tensor,
83
+ pad: bool,
84
+ ) -> torch.Tensor:
85
+ """Embeds point prompts."""
86
+ points = points + 0.5 # Shift to center of pixel
87
+ if pad:
88
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
89
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
90
+ points = torch.cat([points, padding_point], dim=1)
91
+ labels = torch.cat([labels, padding_label], dim=1)
92
+ point_embedding = self.pe_layer.forward_with_coords(
93
+ points, self.input_image_size
94
+ )
95
+ point_embedding[labels == -1] = 0.0
96
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
97
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
98
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
99
+ point_embedding[labels == 2] += self.point_embeddings[2].weight
100
+ point_embedding[labels == 3] += self.point_embeddings[3].weight
101
+ return point_embedding
102
+
103
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
104
+ """Embeds box prompts."""
105
+ boxes = boxes + 0.5 # Shift to center of pixel
106
+ coords = boxes.reshape(-1, 2, 2)
107
+ corner_embedding = self.pe_layer.forward_with_coords(
108
+ coords, self.input_image_size
109
+ )
110
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
111
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
112
+ return corner_embedding
113
+
114
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
115
+ """Embeds mask inputs."""
116
+ mask_embedding = self.mask_downscaling(masks)
117
+ return mask_embedding
118
+
119
+ def _get_batch_size(
120
+ self,
121
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
122
+ boxes: Optional[torch.Tensor],
123
+ masks: Optional[torch.Tensor],
124
+ ) -> int:
125
+ """
126
+ Gets the batch size of the output given the batch size of the input prompts.
127
+ """
128
+ if points is not None:
129
+ return points[0].shape[0]
130
+ elif boxes is not None:
131
+ return boxes.shape[0]
132
+ elif masks is not None:
133
+ return masks.shape[0]
134
+ else:
135
+ return 1
136
+
137
+ def _get_device(self) -> torch.device:
138
+ return self.point_embeddings[0].weight.device
139
+
140
+ def forward(
141
+ self,
142
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
143
+ boxes: Optional[torch.Tensor],
144
+ masks: Optional[torch.Tensor],
145
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
146
+ """
147
+ Embeds different types of prompts, returning both sparse and dense
148
+ embeddings.
149
+
150
+ Arguments:
151
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
152
+ and labels to embed.
153
+ boxes (torch.Tensor or none): boxes to embed
154
+ masks (torch.Tensor or none): masks to embed
155
+
156
+ Returns:
157
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
158
+ BxNx(embed_dim), where N is determined by the number of input points
159
+ and boxes.
160
+ torch.Tensor: dense embeddings for the masks, in the shape
161
+ Bx(embed_dim)x(embed_H)x(embed_W)
162
+ """
163
+ bs = self._get_batch_size(points, boxes, masks)
164
+ sparse_embeddings = torch.empty(
165
+ (bs, 0, self.embed_dim), device=self._get_device()
166
+ )
167
+ if points is not None:
168
+ coords, labels = points
169
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
170
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
171
+ if boxes is not None:
172
+ box_embeddings = self._embed_boxes(boxes)
173
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
174
+
175
+ if masks is not None:
176
+ dense_embeddings = self._embed_masks(masks)
177
+ else:
178
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
179
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
180
+ )
181
+
182
+ return sparse_embeddings, dense_embeddings
SAM2/sam2/modeling/sam/transformer.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ import warnings
9
+ from functools import partial
10
+ from typing import Tuple, Type
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from torch import nn, Tensor
15
+
16
+ from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
17
+
18
+ from sam2.modeling.sam2_utils import MLP
19
+ from sam2.utils.misc import get_sdpa_settings
20
+
21
+ warnings.simplefilter(action="ignore", category=FutureWarning)
22
+ OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
23
+ USE_FLASH_ATTN = False
24
+ MATH_KERNEL_ON = True
25
+ OLD_GPU = True
26
+
27
+
28
+ class TwoWayTransformer(nn.Module):
29
+ def __init__(
30
+ self,
31
+ depth: int,
32
+ embedding_dim: int,
33
+ num_heads: int,
34
+ mlp_dim: int,
35
+ activation: Type[nn.Module] = nn.ReLU,
36
+ attention_downsample_rate: int = 2,
37
+ ) -> None:
38
+ """
39
+ A transformer decoder that attends to an input image using
40
+ queries whose positional embedding is supplied.
41
+
42
+ Args:
43
+ depth (int): number of layers in the transformer
44
+ embedding_dim (int): the channel dimension for the input embeddings
45
+ num_heads (int): the number of heads for multihead attention. Must
46
+ divide embedding_dim
47
+ mlp_dim (int): the channel dimension internal to the MLP block
48
+ activation (nn.Module): the activation to use in the MLP block
49
+ """
50
+ super().__init__()
51
+ self.depth = depth
52
+ self.embedding_dim = embedding_dim
53
+ self.num_heads = num_heads
54
+ self.mlp_dim = mlp_dim
55
+ self.layers = nn.ModuleList()
56
+
57
+ for i in range(depth):
58
+ self.layers.append(
59
+ TwoWayAttentionBlock(
60
+ embedding_dim=embedding_dim,
61
+ num_heads=num_heads,
62
+ mlp_dim=mlp_dim,
63
+ activation=activation,
64
+ attention_downsample_rate=attention_downsample_rate,
65
+ skip_first_layer_pe=(i == 0),
66
+ )
67
+ )
68
+
69
+ self.final_attn_token_to_image = Attention(
70
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
71
+ )
72
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
73
+
74
+ def forward(
75
+ self,
76
+ image_embedding: Tensor,
77
+ image_pe: Tensor,
78
+ point_embedding: Tensor,
79
+ ) -> Tuple[Tensor, Tensor]:
80
+ """
81
+ Args:
82
+ image_embedding (torch.Tensor): image to attend to. Should be shape
83
+ B x embedding_dim x h x w for any h and w.
84
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
85
+ have the same shape as image_embedding.
86
+ point_embedding (torch.Tensor): the embedding to add to the query points.
87
+ Must have shape B x N_points x embedding_dim for any N_points.
88
+
89
+ Returns:
90
+ torch.Tensor: the processed point_embedding
91
+ torch.Tensor: the processed image_embedding
92
+ """
93
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
94
+ bs, c, h, w = image_embedding.shape
95
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
96
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
97
+
98
+ # Prepare queries
99
+ queries = point_embedding
100
+ keys = image_embedding
101
+
102
+ # Apply transformer blocks and final layernorm
103
+ for layer in self.layers:
104
+ queries, keys = layer(
105
+ queries=queries,
106
+ keys=keys,
107
+ query_pe=point_embedding,
108
+ key_pe=image_pe,
109
+ )
110
+
111
+ # Apply the final attention layer from the points to the image
112
+ q = queries + point_embedding
113
+ k = keys + image_pe
114
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
115
+ queries = queries + attn_out
116
+ queries = self.norm_final_attn(queries)
117
+
118
+ return queries, keys
119
+
120
+
121
+ class TwoWayAttentionBlock(nn.Module):
122
+ def __init__(
123
+ self,
124
+ embedding_dim: int,
125
+ num_heads: int,
126
+ mlp_dim: int = 2048,
127
+ activation: Type[nn.Module] = nn.ReLU,
128
+ attention_downsample_rate: int = 2,
129
+ skip_first_layer_pe: bool = False,
130
+ ) -> None:
131
+ """
132
+ A transformer block with four layers: (1) self-attention of sparse
133
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
134
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
135
+ inputs.
136
+
137
+ Arguments:
138
+ embedding_dim (int): the channel dimension of the embeddings
139
+ num_heads (int): the number of heads in the attention layers
140
+ mlp_dim (int): the hidden dimension of the mlp block
141
+ activation (nn.Module): the activation of the mlp block
142
+ skip_first_layer_pe (bool): skip the PE on the first layer
143
+ """
144
+ super().__init__()
145
+ self.self_attn = Attention(embedding_dim, num_heads)
146
+ self.norm1 = nn.LayerNorm(embedding_dim)
147
+
148
+ self.cross_attn_token_to_image = Attention(
149
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
150
+ )
151
+ self.norm2 = nn.LayerNorm(embedding_dim)
152
+
153
+ self.mlp = MLP(
154
+ embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation
155
+ )
156
+ self.norm3 = nn.LayerNorm(embedding_dim)
157
+
158
+ self.norm4 = nn.LayerNorm(embedding_dim)
159
+ self.cross_attn_image_to_token = Attention(
160
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
161
+ )
162
+
163
+ self.skip_first_layer_pe = skip_first_layer_pe
164
+
165
+ def forward(
166
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
167
+ ) -> Tuple[Tensor, Tensor]:
168
+ # Self attention block
169
+ if self.skip_first_layer_pe:
170
+ queries = self.self_attn(q=queries, k=queries, v=queries)
171
+ else:
172
+ q = queries + query_pe
173
+ attn_out = self.self_attn(q=q, k=q, v=queries)
174
+ queries = queries + attn_out
175
+ queries = self.norm1(queries)
176
+
177
+ # Cross attention block, tokens attending to image embedding
178
+ q = queries + query_pe
179
+ k = keys + key_pe
180
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
181
+ queries = queries + attn_out
182
+ queries = self.norm2(queries)
183
+
184
+ # MLP block
185
+ mlp_out = self.mlp(queries)
186
+ queries = queries + mlp_out
187
+ queries = self.norm3(queries)
188
+
189
+ # Cross attention block, image embedding attending to tokens
190
+ q = queries + query_pe
191
+ k = keys + key_pe
192
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
193
+ keys = keys + attn_out
194
+ keys = self.norm4(keys)
195
+
196
+ return queries, keys
197
+
198
+
199
+ class Attention(nn.Module):
200
+ """
201
+ An attention layer that allows for downscaling the size of the embedding
202
+ after projection to queries, keys, and values.
203
+ """
204
+
205
+ def __init__(
206
+ self,
207
+ embedding_dim: int,
208
+ num_heads: int,
209
+ downsample_rate: int = 1,
210
+ dropout: float = 0.0,
211
+ kv_in_dim: int = None,
212
+ ) -> None:
213
+ super().__init__()
214
+ self.embedding_dim = embedding_dim
215
+ self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
216
+ self.internal_dim = embedding_dim // downsample_rate
217
+ self.num_heads = num_heads
218
+ assert (
219
+ self.internal_dim % num_heads == 0
220
+ ), "num_heads must divide embedding_dim."
221
+
222
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
223
+ self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
224
+ self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
225
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
226
+
227
+ self.dropout_p = dropout
228
+
229
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
230
+ b, n, c = x.shape
231
+ x = x.reshape(b, n, num_heads, c // num_heads)
232
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
233
+
234
+ def _recombine_heads(self, x: Tensor) -> Tensor:
235
+ b, n_heads, n_tokens, c_per_head = x.shape
236
+ x = x.transpose(1, 2)
237
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
238
+
239
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
240
+ # Input projections
241
+ q = self.q_proj(q)
242
+ k = self.k_proj(k)
243
+ v = self.v_proj(v)
244
+
245
+ # Separate into heads
246
+ q = self._separate_heads(q, self.num_heads)
247
+ k = self._separate_heads(k, self.num_heads)
248
+ v = self._separate_heads(v, self.num_heads)
249
+
250
+ dropout_p = self.dropout_p if self.training else 0.0
251
+ # Attention
252
+ with torch.backends.cuda.sdp_kernel(
253
+ enable_flash=USE_FLASH_ATTN,
254
+ # if Flash attention kernel is off, then math kernel needs to be enabled
255
+ enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
256
+ enable_mem_efficient=OLD_GPU,
257
+ ):
258
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
259
+
260
+ out = self._recombine_heads(out)
261
+ out = self.out_proj(out)
262
+
263
+ return out
264
+
265
+
266
+ class RoPEAttention(Attention):
267
+ """Attention with rotary position encoding."""
268
+
269
+ def __init__(
270
+ self,
271
+ *args,
272
+ rope_theta=10000.0,
273
+ # whether to repeat q rope to match k length
274
+ # this is needed for cross-attention to memories
275
+ rope_k_repeat=False,
276
+ feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution
277
+ **kwargs,
278
+ ):
279
+ super().__init__(*args, **kwargs)
280
+
281
+ self.compute_cis = partial(
282
+ compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta
283
+ )
284
+ freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
285
+ self.freqs_cis = freqs_cis
286
+ self.rope_k_repeat = rope_k_repeat
287
+
288
+ def forward(
289
+ self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0
290
+ ) -> Tensor:
291
+ # Input projections
292
+ q = self.q_proj(q)
293
+ k = self.k_proj(k)
294
+ v = self.v_proj(v)
295
+
296
+ # Separate into heads
297
+ q = self._separate_heads(q, self.num_heads)
298
+ k = self._separate_heads(k, self.num_heads)
299
+ v = self._separate_heads(v, self.num_heads)
300
+
301
+ # Apply rotary position encoding
302
+ w = h = math.sqrt(q.shape[-2])
303
+ self.freqs_cis = self.freqs_cis.to(q.device)
304
+ if self.freqs_cis.shape[0] != q.shape[-2]:
305
+ self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
306
+ if q.shape[-2] != k.shape[-2]:
307
+ assert self.rope_k_repeat
308
+
309
+ num_k_rope = k.size(-2) - num_k_exclude_rope
310
+ q, k[:, :, :num_k_rope] = apply_rotary_enc(
311
+ q,
312
+ k[:, :, :num_k_rope],
313
+ freqs_cis=self.freqs_cis,
314
+ repeat_freqs_k=self.rope_k_repeat,
315
+ )
316
+
317
+ dropout_p = self.dropout_p if self.training else 0.0
318
+ # Attention
319
+ with torch.backends.cuda.sdp_kernel(
320
+ enable_flash=USE_FLASH_ATTN,
321
+ # if Flash attention kernel is off, then math kernel needs to be enabled
322
+ enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
323
+ enable_mem_efficient=OLD_GPU,
324
+ ):
325
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
326
+
327
+ out = self._recombine_heads(out)
328
+ out = self.out_proj(out)
329
+
330
+ return out
SAM2/sam2/modeling/sam2_base.py ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.distributed
9
+ import torch.nn.functional as F
10
+
11
+ from torch.nn.init import trunc_normal_
12
+
13
+ from sam2.modeling.sam.mask_decoder import MaskDecoder
14
+ from sam2.modeling.sam.prompt_encoder import PromptEncoder
15
+ from sam2.modeling.sam.transformer import TwoWayTransformer
16
+ from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames
17
+
18
+
19
+ # a large negative value as a placeholder score for missing objects
20
+ NO_OBJ_SCORE = -1024.0
21
+
22
+
23
+ class SAM2Base(torch.nn.Module):
24
+ def __init__(
25
+ self,
26
+ image_encoder,
27
+ memory_attention,
28
+ memory_encoder,
29
+ num_maskmem=7, # default 1 input frame + 6 previous frames
30
+ image_size=512,
31
+ backbone_stride=16, # stride of the image backbone output
32
+ sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
33
+ sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
34
+ # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
35
+ binarize_mask_from_pts_for_mem_enc=False,
36
+ use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
37
+ # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
38
+ # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
39
+ # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
40
+ max_cond_frames_in_attn=-1, # 在memory attention时,从memory bank中取出的conditioning frame的个数。-1表示取出所有,输入到memory attention
41
+ # on the first frame, whether to directly add the no-memory embedding to the image feature
42
+ # (instead of using the transformer encoder)
43
+ directly_add_no_mem_embed=False,
44
+ # whether to use high-resolution feature maps in the SAM mask decoder
45
+ use_high_res_features_in_sam=False,
46
+ # whether to output multiple (3) masks for the first click on initial conditioning frames
47
+ multimask_output_in_sam=False,
48
+ # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
49
+ # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
50
+ multimask_min_pt_num=1,
51
+ multimask_max_pt_num=1,
52
+ # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
53
+ multimask_output_for_tracking=False,
54
+ # Whether to use multimask tokens for obj ptr; Only relevant when both
55
+ # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
56
+ use_multimask_token_for_obj_ptr: bool = False,
57
+ # whether to use sigmoid to restrict ious prediction to [0-1]
58
+ iou_prediction_use_sigmoid=False,
59
+ # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
60
+ # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
61
+ # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
62
+ memory_temporal_stride_for_eval=1,
63
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
64
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
65
+ add_all_frames_to_correct_as_cond=False,
66
+ # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
67
+ non_overlap_masks_for_mem_enc=False,
68
+ # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
69
+ use_obj_ptrs_in_encoder=False,
70
+ # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
71
+ max_obj_ptrs_in_encoder=16,
72
+ # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
73
+ add_tpos_enc_to_obj_ptrs=True,
74
+ # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
75
+ # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
76
+ proj_tpos_enc_in_obj_ptrs=False,
77
+ # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
78
+ # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
79
+ only_obj_ptrs_in_the_past_for_eval=False,
80
+ # Whether to predict if there is an object in the frame
81
+ pred_obj_scores: bool = False,
82
+ # Whether to use an MLP to predict object scores
83
+ pred_obj_scores_mlp: bool = False,
84
+ # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
85
+ # Whether to have a fixed no obj pointer when there is no object present
86
+ # or to use it as an additive embedding with obj_ptr produced by decoder
87
+ fixed_no_obj_ptr: bool = False,
88
+ # Soft no object, i.e. mix in no_obj_ptr softly,
89
+ # hope to make recovery easier if there is a mistake and mitigate accumulation of errors
90
+ soft_no_obj_ptr: bool = False,
91
+ use_mlp_for_obj_ptr_proj: bool = False,
92
+ # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
93
+ sam_mask_decoder_extra_args=None,
94
+ compile_image_encoder: bool = False,
95
+ ):
96
+ super().__init__()
97
+
98
+ # Part 1: the image backbone
99
+ self.image_encoder = image_encoder
100
+ # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
101
+ self.use_high_res_features_in_sam = use_high_res_features_in_sam
102
+ self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
103
+ self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
104
+ self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
105
+ if use_obj_ptrs_in_encoder:
106
+ # A conv layer to downsample the mask prompt to stride 4 (the same stride as
107
+ # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
108
+ # so that it can be fed into the SAM mask decoder to generate a pointer.
109
+ self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
110
+ self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
111
+ if proj_tpos_enc_in_obj_ptrs:
112
+ assert add_tpos_enc_to_obj_ptrs # these options need to be used together
113
+ self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
114
+ self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
115
+
116
+ # Part 2: memory attention to condition current frame's visual features
117
+ # with memories (and obj ptrs) from past frames
118
+ self.memory_attention = memory_attention
119
+ self.hidden_dim = memory_attention.d_model
120
+
121
+ # Part 3: memory encoder for the previous frame's outputs
122
+ self.memory_encoder = memory_encoder
123
+ self.mem_dim = self.hidden_dim
124
+ if hasattr(self.memory_encoder, "out_proj") and hasattr(
125
+ self.memory_encoder.out_proj, "weight"
126
+ ):
127
+ # if there is compression of memories along channel dim
128
+ self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
129
+ self.num_maskmem = num_maskmem # Number of memories accessible
130
+ # Temporal encoding of the memories
131
+ self.maskmem_tpos_enc = torch.nn.Parameter(
132
+ torch.zeros(num_maskmem, 1, 1, self.mem_dim)
133
+ )
134
+ trunc_normal_(self.maskmem_tpos_enc, std=0.02)
135
+ # a single token to indicate no memory embedding from previous frames
136
+ self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
137
+ self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
138
+ trunc_normal_(self.no_mem_embed, std=0.02)
139
+ trunc_normal_(self.no_mem_pos_enc, std=0.02)
140
+ self.directly_add_no_mem_embed = directly_add_no_mem_embed
141
+ # Apply sigmoid to the output raw mask logits (to turn them from
142
+ # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
143
+ self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
144
+ self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
145
+ self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
146
+ self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
147
+ self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
148
+ # On frames with mask input, whether to directly output the input mask without
149
+ # using a SAM prompt encoder + mask decoder
150
+ self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
151
+ self.multimask_output_in_sam = multimask_output_in_sam
152
+ self.multimask_min_pt_num = multimask_min_pt_num
153
+ self.multimask_max_pt_num = multimask_max_pt_num
154
+ self.multimask_output_for_tracking = multimask_output_for_tracking
155
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
156
+ self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
157
+
158
+ # Part 4: SAM-style prompt encoder (for both mask and point inputs)
159
+ # and SAM-style mask decoder for the final mask output
160
+ self.image_size = image_size
161
+ self.backbone_stride = backbone_stride
162
+ self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
163
+ self.pred_obj_scores = pred_obj_scores
164
+ self.pred_obj_scores_mlp = pred_obj_scores_mlp
165
+ self.fixed_no_obj_ptr = fixed_no_obj_ptr
166
+ self.soft_no_obj_ptr = soft_no_obj_ptr
167
+ if self.fixed_no_obj_ptr:
168
+ assert self.pred_obj_scores
169
+ assert self.use_obj_ptrs_in_encoder
170
+ if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
171
+ self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
172
+ trunc_normal_(self.no_obj_ptr, std=0.02)
173
+ self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
174
+
175
+ self._build_sam_heads()
176
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
177
+ self.max_cond_frames_in_attn = max_cond_frames_in_attn
178
+
179
+ # Model compilation
180
+ if compile_image_encoder:
181
+ # Compile the forward function (not the full module) to allow loading checkpoints.
182
+ print(
183
+ "Image encoder compilation is enabled. First forward pass will be slow."
184
+ )
185
+ self.image_encoder.forward = torch.compile(
186
+ self.image_encoder.forward,
187
+ mode="max-autotune",
188
+ fullgraph=True,
189
+ dynamic=False,
190
+ )
191
+
192
+ @property
193
+ def device(self):
194
+ return next(self.parameters()).device
195
+
196
+ def forward(self, *args, **kwargs):
197
+ raise NotImplementedError(
198
+ "Please use the corresponding methods in SAM2VideoPredictor for inference."
199
+ "See notebooks/video_predictor_example.ipynb for an example."
200
+ )
201
+
202
+ def _build_sam_heads(self):
203
+ """Build SAM-style prompt encoder and mask decoder."""
204
+ self.sam_prompt_embed_dim = self.hidden_dim
205
+ self.sam_image_embedding_size = self.image_size // self.backbone_stride
206
+
207
+ # build PromptEncoder and MaskDecoder from SAM
208
+ # (their hyperparameters like `mask_in_chans=16` are from SAM code)
209
+ self.sam_prompt_encoder = PromptEncoder(
210
+ embed_dim=self.sam_prompt_embed_dim,
211
+ image_embedding_size=(
212
+ self.sam_image_embedding_size,
213
+ self.sam_image_embedding_size,
214
+ ),
215
+ input_image_size=(self.image_size, self.image_size),
216
+ mask_in_chans=16,
217
+ )
218
+ self.sam_mask_decoder = MaskDecoder(
219
+ num_multimask_outputs=3,
220
+ transformer=TwoWayTransformer(
221
+ depth=2,
222
+ embedding_dim=self.sam_prompt_embed_dim,
223
+ mlp_dim=2048,
224
+ num_heads=8,
225
+ ),
226
+ transformer_dim=self.sam_prompt_embed_dim,
227
+ iou_head_depth=3,
228
+ iou_head_hidden_dim=256,
229
+ use_high_res_features=self.use_high_res_features_in_sam,
230
+ iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
231
+ pred_obj_scores=self.pred_obj_scores,
232
+ pred_obj_scores_mlp=self.pred_obj_scores_mlp,
233
+ use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
234
+ **(self.sam_mask_decoder_extra_args or {}),
235
+ )
236
+ if self.use_obj_ptrs_in_encoder:
237
+ # a linear projection on SAM output tokens to turn them into object pointers
238
+ self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
239
+ if self.use_mlp_for_obj_ptr_proj:
240
+ self.obj_ptr_proj = MLP(
241
+ self.hidden_dim, self.hidden_dim, self.hidden_dim, 3
242
+ )
243
+ else:
244
+ self.obj_ptr_proj = torch.nn.Identity()
245
+ if self.proj_tpos_enc_in_obj_ptrs:
246
+ # a linear projection on temporal positional encoding in object pointers to
247
+ # avoid potential interference with spatial positional encoding
248
+ self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
249
+ else:
250
+ self.obj_ptr_tpos_proj = torch.nn.Identity()
251
+
252
+ def _forward_sam_heads(
253
+ self,
254
+ backbone_features,
255
+ point_inputs=None,
256
+ mask_inputs=None,
257
+ high_res_features=None,
258
+ multimask_output=False,
259
+ ):
260
+ """
261
+ Forward SAM prompt encoders and mask heads.
262
+
263
+ Inputs:
264
+ - backbone_features: image features of [B, C, H, W] shape
265
+ - point_inputs: a dictionary with "point_coords" and "point_labels", where
266
+ 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
267
+ absolute pixel-unit coordinate in (x, y) format of the P input points
268
+ 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
269
+ positive clicks, 0 means negative clicks, and -1 means padding
270
+ - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
271
+ same spatial size as the image.
272
+ - high_res_features: either 1) None or 2) or a list of length 2 containing
273
+ two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
274
+ which will be used as high-resolution feature maps for SAM decoder.
275
+ - multimask_output: if it's True, we output 3 candidate masks and their 3
276
+ corresponding IoU estimates, and if it's False, we output only 1 mask and
277
+ its corresponding IoU estimate.
278
+
279
+ Outputs:
280
+ - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
281
+ `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
282
+ output mask logits (before sigmoid) for the low-resolution masks, with 4x
283
+ the resolution (1/4 stride) of the input backbone_features.
284
+ - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
285
+ if `multimask_output=True` and M = 1 if `multimask_output=False`),
286
+ upsampled from the low-resolution masks, with shape size as the image
287
+ (stride is 1 pixel).
288
+ - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
289
+ if `multimask_output=False`), the estimated IoU of each output mask.
290
+ - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
291
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
292
+ If `multimask_output=False`, it's the same as `low_res_multimasks`.
293
+ - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
294
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
295
+ If `multimask_output=False`, it's the same as `high_res_multimasks`.
296
+ - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
297
+ based on the output token from the SAM mask decoder.
298
+ """
299
+ B = backbone_features.size(0)
300
+ device = backbone_features.device
301
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
302
+ assert backbone_features.size(2) == self.sam_image_embedding_size
303
+ assert backbone_features.size(3) == self.sam_image_embedding_size
304
+
305
+ # a) Handle point prompts
306
+ if point_inputs is not None:
307
+ sam_point_coords = point_inputs["point_coords"]
308
+ sam_point_labels = point_inputs["point_labels"]
309
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
310
+ else:
311
+ # If no points are provide, pad with an empty point (with label -1)
312
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
313
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
314
+
315
+ # b) Handle mask prompts
316
+ if mask_inputs is not None:
317
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
318
+ # and feed it as a dense mask prompt into the SAM mask encoder
319
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
320
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
321
+ sam_mask_prompt = F.interpolate(
322
+ mask_inputs.float(),
323
+ size=self.sam_prompt_encoder.mask_input_size,
324
+ align_corners=False,
325
+ mode="bilinear",
326
+ antialias=True, # use antialias for downsampling
327
+ )
328
+ else:
329
+ sam_mask_prompt = mask_inputs
330
+ else:
331
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
332
+ # a learned `no_mask_embed` to indicate no mask input in this case).
333
+ sam_mask_prompt = None
334
+
335
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
336
+ points=(sam_point_coords, sam_point_labels),
337
+ boxes=None,
338
+ masks=sam_mask_prompt,
339
+ )
340
+ (
341
+ low_res_multimasks,
342
+ ious,
343
+ sam_output_tokens,
344
+ object_score_logits,
345
+ ) = self.sam_mask_decoder(
346
+ image_embeddings=backbone_features, # 来自memory attention
347
+ image_pe=self.sam_prompt_encoder.get_dense_pe(),
348
+ sparse_prompt_embeddings=sparse_embeddings,
349
+ dense_prompt_embeddings=dense_embeddings,
350
+ multimask_output=multimask_output,
351
+ repeat_image=False, # the image is already batched
352
+ high_res_features=high_res_features,
353
+ )
354
+ if self.pred_obj_scores:
355
+ is_obj_appearing = object_score_logits > 0
356
+
357
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
358
+ # consistent with the actual mask prediction
359
+ low_res_multimasks = torch.where(
360
+ is_obj_appearing[:, None, None],
361
+ low_res_multimasks,
362
+ NO_OBJ_SCORE,
363
+ )
364
+
365
+ # convert masks from possibly bfloat16 (or float16) to float32
366
+ # (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
367
+ low_res_multimasks = low_res_multimasks.float()
368
+ high_res_multimasks = F.interpolate(
369
+ low_res_multimasks,
370
+ size=(self.image_size, self.image_size),
371
+ mode="bilinear",
372
+ align_corners=False,
373
+ )
374
+
375
+ sam_output_token = sam_output_tokens[:, 0]
376
+ if multimask_output:
377
+ # take the best mask prediction (with the highest IoU estimation)
378
+ best_iou_inds = torch.argmax(ious, dim=-1)
379
+ batch_inds = torch.arange(B, device=device)
380
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
381
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
382
+ if sam_output_tokens.size(1) > 1:
383
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
384
+ else:
385
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
386
+
387
+ # Extract object pointer from the SAM output token (with occlusion handling)
388
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
389
+ if self.pred_obj_scores:
390
+ # Allow *soft* no obj ptr, unlike for masks
391
+ if self.soft_no_obj_ptr:
392
+ # Only hard possible with gt
393
+ assert not self.teacher_force_obj_scores_for_mem
394
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
395
+ else:
396
+ lambda_is_obj_appearing = is_obj_appearing.float()
397
+
398
+ if self.fixed_no_obj_ptr:
399
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
400
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
401
+
402
+ return (
403
+ low_res_multimasks,
404
+ high_res_multimasks,
405
+ ious,
406
+ low_res_masks,
407
+ high_res_masks,
408
+ obj_ptr,
409
+ object_score_logits,
410
+ )
411
+
412
+ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
413
+ """
414
+ Directly turn binary `mask_inputs` into a output mask logits without using SAM.
415
+ (same input and output shapes as in _forward_sam_heads above).
416
+ """
417
+ # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
418
+ out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
419
+ mask_inputs_float = mask_inputs.float()
420
+ high_res_masks = mask_inputs_float * out_scale + out_bias
421
+ low_res_masks = F.interpolate(
422
+ high_res_masks,
423
+ size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
424
+ align_corners=False,
425
+ mode="bilinear",
426
+ antialias=True, # use antialias for downsampling
427
+ )
428
+ # a dummy IoU prediction of all 1's under mask input
429
+ ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
430
+ if not self.use_obj_ptrs_in_encoder:
431
+ # all zeros as a dummy object pointer (of shape [B, C])
432
+ obj_ptr = torch.zeros(
433
+ mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device
434
+ )
435
+ else:
436
+ # produce an object pointer using the SAM decoder from the mask input
437
+ _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
438
+ backbone_features=backbone_features,
439
+ mask_inputs=self.mask_downsample(mask_inputs_float),
440
+ high_res_features=high_res_features,
441
+ )
442
+ # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
443
+ # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
444
+ # on the object_scores from the SAM decoder.
445
+ is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
446
+ is_obj_appearing = is_obj_appearing[..., None]
447
+ lambda_is_obj_appearing = is_obj_appearing.float()
448
+ object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
449
+ if self.pred_obj_scores:
450
+ if self.fixed_no_obj_ptr:
451
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
452
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
453
+
454
+ return (
455
+ low_res_masks,
456
+ high_res_masks,
457
+ ious,
458
+ low_res_masks,
459
+ high_res_masks,
460
+ obj_ptr,
461
+ object_score_logits,
462
+ )
463
+
464
+ def forward_image(self, img_batch: torch.Tensor):
465
+ """Get the image feature on the input batch."""
466
+ backbone_out = self.image_encoder(img_batch)
467
+ if self.use_high_res_features_in_sam:
468
+ # precompute projected level 0 and level 1 features in SAM decoder
469
+ # to avoid running it again on every SAM click
470
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(
471
+ backbone_out["backbone_fpn"][0]
472
+ )
473
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(
474
+ backbone_out["backbone_fpn"][1]
475
+ )
476
+ return backbone_out
477
+
478
+ def _prepare_backbone_features(self, backbone_out):
479
+ """Prepare and flatten visual features."""
480
+ backbone_out = backbone_out.copy()
481
+ assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
482
+ assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
483
+
484
+ feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
485
+ vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
486
+
487
+ feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
488
+ # flatten NxCxHxW to HWxNxC
489
+ vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
490
+ vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
491
+
492
+ return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
493
+
494
+ def _prepare_memory_conditioned_features( # 调用 Memory Attention,将image embedding和来自memory bank的memorys进行信息融合
495
+ self,
496
+ frame_idx,
497
+ is_init_cond_frame,
498
+ current_vision_feats,
499
+ current_vision_pos_embeds,
500
+ feat_sizes,
501
+ output_dict,
502
+ num_frames,
503
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
504
+ ):
505
+ """Fuse the current frame's visual feature map with previous memory."""
506
+ B = current_vision_feats[-1].size(1) # batch size on this frame
507
+ C = self.hidden_dim
508
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
509
+ device = current_vision_feats[-1].device
510
+ # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
511
+ # In this case, we skip the fusion with any memory.
512
+ if self.num_maskmem == 0: # Disable memory and skip fusion,即不使用memory bank,退化成SAM; self.num_maskmem:memory bank的尺寸
513
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
514
+ return pix_feat
515
+ # ############################# 使用memory bank #############################
516
+ num_obj_ptr_tokens = 0
517
+ # Step 1: condition the visual features of the current frame on previous memories
518
+ if not is_init_cond_frame:
519
+ # Retrieve the memories encoded with the maskmem backbone
520
+ to_cat_memory, to_cat_memory_pos_embed = [], []
521
+ # Add conditioning frames's output first (all cond frames have t_pos=0 for
522
+ # when getting temporal positional embedding below)
523
+ assert len(output_dict["cond_frame_outputs"]) > 0
524
+ # Select a maximum number of temporally closest cond frames for cross attention
525
+ cond_outputs = output_dict["cond_frame_outputs"]
526
+ selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames( #从memory bank中选择出与当前frame_idx最近的max_cond_frames_in_attn个conditioning memory
527
+ frame_idx, cond_outputs, self.max_cond_frames_in_attn # self.max_cond_frames_in_attn=-1表示从memory bank取出所有,然后输入到memory attention
528
+ ) # selected_cond_outputs和unselected_cond_outputs分别表示从memory bank中选出的memory和剩下的memory
529
+ t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()] # 一、先把所有conditioning frame(即输入了prompt的frame)选出来,二、后面for循环再选unconditioning frame
530
+ # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
531
+ # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
532
+ # We also allow taking the memory frame non-consecutively (with r>1), in which case
533
+ # we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame.
534
+ r = self.memory_temporal_stride_for_eval # 步长,等于1表示取连续的frame memory
535
+ for t_pos in range(1, self.num_maskmem): # self.num_maskmem = 7,表示从memory bank取出的memory的个数; t_pos用于指示当前处理的帧在 self.num_maskmem 这个范围内的位置
536
+ t_rel = self.num_maskmem - t_pos # how many frames before current frame
537
+ if t_rel == 1: # t_rel表示与当前帧之间的相对距离(以帧为单位)
538
+ # for t_rel == 1, we take the last frame (regardless of r)
539
+ if not track_in_reverse:
540
+ # the frame immediately before this frame (i.e. frame_idx - 1)
541
+ prev_frame_idx = frame_idx - t_rel
542
+ else:
543
+ # the frame immediately after this frame (i.e. frame_idx + 1)
544
+ prev_frame_idx = frame_idx + t_rel
545
+ else:
546
+ # for t_rel >= 2, we take the memory frame from every r-th frames
547
+ if not track_in_reverse:
548
+ # first find the nearest frame among every r-th frames before this frame
549
+ # for r=1, this would be (frame_idx - 2)
550
+ prev_frame_idx = ((frame_idx - 2) // r) * r # (frame_idx - 2)表示先向前移动两帧(与当前帧距离两帧)
551
+ # then seek further among every r-th frames
552
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * r # 表示再向前移动 t_rel - 2 个帧,每个帧之间的距离为步长 r。
553
+ else:
554
+ # first find the nearest frame among every r-th frames after this frame
555
+ # for r=1, this would be (frame_idx + 2)
556
+ prev_frame_idx = -(-(frame_idx + 2) // r) * r
557
+ # then seek further among every r-th frames
558
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
559
+ out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)# 如果dict中没有则返回None
560
+ if out is None:
561
+ # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
562
+ # frames, we still attend to it as if it's a non-conditioning frame.
563
+ out = unselected_cond_outputs.get(prev_frame_idx, None)
564
+ t_pos_and_prevs.append((t_pos, out))
565
+
566
+ for t_pos, prev in t_pos_and_prevs:
567
+ if prev is None:
568
+ continue # skip padding frames
569
+ # "maskmem_features" might have been offloaded to CPU in demo use cases,
570
+ # so we load it back to GPU (it's a no-op if it's already on GPU).
571
+ feats = prev["maskmem_features"].cuda(non_blocking=True) # memory feature(1,64,64,64)
572
+ to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))#将momory feature连接在一起
573
+ # Spatial positional encoding (it might have been offloaded to CPU in eval)
574
+ maskmem_enc = prev["maskmem_pos_enc"][-1].cuda() # memory position
575
+ maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
576
+ # Temporal positional encoding
577
+ maskmem_enc = (
578
+ maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
579
+ )
580
+ to_cat_memory_pos_embed.append(maskmem_enc) #将momory的位置编码连接在一起
581
+
582
+ # Construct the list of past object pointers
583
+ if self.use_obj_ptrs_in_encoder:
584
+ max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
585
+ # First add those object pointers from selected conditioning frames
586
+ # (optionally, only include object pointers in the past during evaluation)
587
+ if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
588
+ ptr_cond_outputs = {
589
+ t: out
590
+ for t, out in selected_cond_outputs.items()
591
+ if (t >= frame_idx if track_in_reverse else t <= frame_idx) # track_in_reverse为False,因此条件语等价为if(t <= frame_idx)
592
+ }
593
+ else:
594
+ ptr_cond_outputs = selected_cond_outputs
595
+ pos_and_ptrs = [
596
+ # Temporal pos encoding contains how far away each pointer is from current frame
597
+ (abs(frame_idx - t), out["obj_ptr"])
598
+ for t, out in ptr_cond_outputs.items()
599
+ ]
600
+ # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
601
+ for t_diff in range(1, max_obj_ptrs_in_encoder):
602
+ t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff # t表示当前frame前面的帧的id
603
+ if t < 0 or (num_frames is not None and t >= num_frames):
604
+ break
605
+ out = output_dict["non_cond_frame_outputs"].get(
606
+ t, unselected_cond_outputs.get(t, None)
607
+ )
608
+ if out is not None:
609
+ pos_and_ptrs.append((t_diff, out["obj_ptr"]))
610
+ # If we have at least one object pointer, add them to the across attention
611
+ if len(pos_and_ptrs) > 0:
612
+ pos_list, ptrs_list = zip(*pos_and_ptrs)
613
+ # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
614
+ obj_ptrs = torch.stack(ptrs_list, dim=0)
615
+ # a temporal positional embedding based on how far each object pointer is from
616
+ # the current frame (sine embedding normalized by the max pointer num).
617
+ if self.add_tpos_enc_to_obj_ptrs:
618
+ t_diff_max = max_obj_ptrs_in_encoder - 1
619
+ tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
620
+ obj_pos = torch.tensor(pos_list, device=device)
621
+ obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
622
+ obj_pos = self.obj_ptr_tpos_proj(obj_pos)
623
+ obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
624
+ else:
625
+ obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
626
+ if self.mem_dim < C:
627
+ # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
628
+ obj_ptrs = obj_ptrs.reshape( # split the 256-dim object pointer into 4 tokens of 64-dim
629
+ -1, B, C // self.mem_dim, self.mem_dim
630
+ )
631
+ obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1) # object pointer tokens
632
+ obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
633
+ to_cat_memory.append(obj_ptrs) # 将 object pointer tokens 也连接到memory feature的列表中
634
+ to_cat_memory_pos_embed.append(obj_pos) # 位置编码也是
635
+ num_obj_ptr_tokens = obj_ptrs.shape[0]
636
+ else:
637
+ num_obj_ptr_tokens = 0
638
+ else:
639
+ # for initial conditioning frames, encode them without using any previous memory
640
+ if self.directly_add_no_mem_embed:
641
+ # directly add no-mem embedding (instead of using the transformer encoder)
642
+ pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
643
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
644
+ return pix_feat_with_mem
645
+
646
+ # Use a dummy token on the first frame (to avoid emtpy memory input to tranformer encoder)
647
+ to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
648
+ to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
649
+
650
+ # Step 2: Concatenate the memories and forward through the transformer encoder
651
+ memory = torch.cat(to_cat_memory, dim=0) # 将当前帧前面选出的memory features以及split后的object pointer tokens全部连接在一起生成一个N * 1 * 64的embedding
652
+ memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
653
+
654
+ # ############################ 调用Memory Attention #########################
655
+ pix_feat_with_mem = self.memory_attention( # Memory Attention: current_vision_feats和memory做交叉注意力进行信息融合
656
+ curr=current_vision_feats,
657
+ curr_pos=current_vision_pos_embeds,
658
+ memory=memory,
659
+ memory_pos=memory_pos_embed,
660
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
661
+ )
662
+ # reshape the output (HW)BC => BCHW
663
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
664
+ return pix_feat_with_mem
665
+
666
+ def _encode_new_memory(
667
+ self,
668
+ current_vision_feats,
669
+ feat_sizes,
670
+ pred_masks_high_res,
671
+ is_mask_from_pts,
672
+ ):
673
+ """Encode the current image and its prediction into a memory feature."""
674
+ B = current_vision_feats[-1].size(1) # batch size on this frame
675
+ C = self.hidden_dim
676
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
677
+ # top-level feature, (HW)BC => BCHW
678
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
679
+ if self.non_overlap_masks_for_mem_enc and not self.training:
680
+ # optionally, apply non-overlapping constraints to the masks (it's applied
681
+ # in the batch dimension and should only be used during eval, where all
682
+ # the objects come from the same video under batch size 1).
683
+ pred_masks_high_res = self._apply_non_overlapping_constraints(
684
+ pred_masks_high_res
685
+ )
686
+ # scale the raw mask logits with a temperature before applying sigmoid
687
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
688
+ if binarize and not self.training:
689
+ mask_for_mem = (pred_masks_high_res > 0).float()
690
+ else:
691
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
692
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
693
+ # apply scale and bias terms to the sigmoid probabilities
694
+ if self.sigmoid_scale_for_mem_enc != 1.0:
695
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
696
+ if self.sigmoid_bias_for_mem_enc != 0.0:
697
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
698
+ maskmem_out = self.memory_encoder( ###################### 调用 memory encoder
699
+ pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied
700
+ )
701
+ maskmem_features = maskmem_out["vision_features"]
702
+ maskmem_pos_enc = maskmem_out["vision_pos_enc"]
703
+
704
+ return maskmem_features, maskmem_pos_enc
705
+
706
+ def track_step(
707
+ self,
708
+ frame_idx,
709
+ is_init_cond_frame,
710
+ current_vision_feats,
711
+ current_vision_pos_embeds,
712
+ feat_sizes,
713
+ point_inputs,
714
+ mask_inputs,
715
+ output_dict,
716
+ num_frames,
717
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
718
+ # Whether to run the memory encoder on the predicted masks. Sometimes we might want
719
+ # to skip the memory encoder with `run_mem_encoder=False`. For example,
720
+ # in demo we might call `track_step` multiple times for each user click,
721
+ # and only encode the memory when the user finalizes their clicks. And in ablation
722
+ # settings like SAM training on static images, we don't need the memory encoder.
723
+ run_mem_encoder=True,
724
+ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
725
+ prev_sam_mask_logits=None,
726
+ ):
727
+ current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
728
+ # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
729
+ if len(current_vision_feats) > 1:
730
+ high_res_features = [
731
+ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
732
+ for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
733
+ ]
734
+ else:
735
+ high_res_features = None
736
+ if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
737
+ # When use_mask_input_as_output_without_sam=True, we directly output the mask input
738
+ # (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
739
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0)
740
+ pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
741
+ sam_outputs = self._use_mask_as_output(
742
+ pix_feat, high_res_features, mask_inputs
743
+ )
744
+ else:
745
+ # fused the visual feature with previous memory features in the memory bank
746
+ pix_feat_with_mem = self._prepare_memory_conditioned_features(# 里面调用 Memory Attention,将image embedding和来自memory bank的memorys进行信息融合
747
+ frame_idx=frame_idx,
748
+ is_init_cond_frame=is_init_cond_frame,
749
+ current_vision_feats=current_vision_feats[-1:],
750
+ current_vision_pos_embeds=current_vision_pos_embeds[-1:],
751
+ feat_sizes=feat_sizes[-1:],
752
+ output_dict=output_dict,
753
+ num_frames=num_frames,
754
+ track_in_reverse=track_in_reverse,
755
+ )
756
+ # apply SAM-style segmentation head
757
+ # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
758
+ # e.g. in demo where such logits come from earlier interaction instead of correction sampling
759
+ # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
760
+ if prev_sam_mask_logits is not None:
761
+ assert point_inputs is not None and mask_inputs is None
762
+ mask_inputs = prev_sam_mask_logits
763
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
764
+ sam_outputs = self._forward_sam_heads( # 调用mask decoder
765
+ backbone_features=pix_feat_with_mem, #pix_feat_with_mem:经过memory attention,将image embedding和memory bank的memorys进行信息融合后的编码
766
+ point_inputs=point_inputs,
767
+ mask_inputs=mask_inputs,
768
+ high_res_features=high_res_features,
769
+ multimask_output=multimask_output,
770
+ )
771
+ (
772
+ _,
773
+ _,
774
+ _,
775
+ low_res_masks,
776
+ high_res_masks,
777
+ obj_ptr,
778
+ _,
779
+ ) = sam_outputs
780
+
781
+ current_out["pred_masks"] = low_res_masks
782
+ current_out["pred_masks_high_res"] = high_res_masks
783
+ current_out["obj_ptr"] = obj_ptr
784
+
785
+ # Finally run the memory encoder on the predicted mask to encode
786
+ # it into a new memory feature (that can be used in future frames)
787
+ if run_mem_encoder and self.num_maskmem > 0:
788
+ high_res_masks_for_mem_enc = high_res_masks
789
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory( # Memory Encoder
790
+ current_vision_feats=current_vision_feats, # image encoder输出的image embedding
791
+ feat_sizes=feat_sizes,
792
+ pred_masks_high_res=high_res_masks_for_mem_enc,
793
+ is_mask_from_pts=(point_inputs is not None),
794
+ )
795
+ current_out["maskmem_features"] = maskmem_features
796
+ current_out["maskmem_pos_enc"] = maskmem_pos_enc
797
+ else:
798
+ current_out["maskmem_features"] = None
799
+ current_out["maskmem_pos_enc"] = None
800
+
801
+ return current_out
802
+
803
+ def _use_multimask(self, is_init_cond_frame, point_inputs):
804
+ """Whether to use multimask output in the SAM head."""
805
+ num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
806
+ multimask_output = (
807
+ self.multimask_output_in_sam
808
+ and (is_init_cond_frame or self.multimask_output_for_tracking)
809
+ and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
810
+ )
811
+ return multimask_output
812
+
813
+ def _apply_non_overlapping_constraints(self, pred_masks):
814
+ """
815
+ Apply non-overlapping constraints to the object scores in pred_masks. Here we
816
+ keep only the highest scoring object at each spatial location in pred_masks.
817
+ """
818
+ batch_size = pred_masks.size(0)
819
+ if batch_size == 1:
820
+ return pred_masks
821
+
822
+ device = pred_masks.device
823
+ # "max_obj_inds": object index of the object with the highest score at each location
824
+ max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
825
+ # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
826
+ batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
827
+ keep = max_obj_inds == batch_obj_inds
828
+ # suppress overlapping regions' scores below -10.0 so that the foreground regions
829
+ # don't overlap (here sigmoid(-10.0)=4.5398e-05)
830
+ pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
831
+ return pred_masks
SAM2/sam2/modeling/sam2_utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+
8
+ import copy
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+
15
+ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
16
+ """
17
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
18
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
19
+ - a) the closest conditioning frame before `frame_idx` (if any);
20
+ - b) the closest conditioning frame after `frame_idx` (if any);
21
+ - c) any other temporally closest conditioning frames until reaching a total
22
+ of `max_cond_frame_num` conditioning frames.
23
+
24
+ Outputs:
25
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
26
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
27
+ """
28
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
29
+ selected_outputs = cond_frame_outputs
30
+ unselected_outputs = {}
31
+ else:
32
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
33
+ selected_outputs = {}
34
+
35
+ # the closest conditioning frame before `frame_idx` (if any)
36
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
37
+ if idx_before is not None:
38
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
39
+
40
+ # the closest conditioning frame after `frame_idx` (if any)
41
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
42
+ if idx_after is not None:
43
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
44
+
45
+ # add other temporally closest conditioning frames until reaching a total
46
+ # of `max_cond_frame_num` conditioning frames.
47
+ num_remain = max_cond_frame_num - len(selected_outputs)
48
+ inds_remain = sorted(
49
+ (t for t in cond_frame_outputs if t not in selected_outputs),
50
+ key=lambda x: abs(x - frame_idx),
51
+ )[:num_remain]
52
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
53
+ unselected_outputs = {
54
+ t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs
55
+ }
56
+
57
+ return selected_outputs, unselected_outputs
58
+
59
+
60
+ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
61
+ """
62
+ Get 1D sine positional embedding as in the original Transformer paper.
63
+ """
64
+ pe_dim = dim // 2
65
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
66
+ dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
67
+
68
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
69
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
70
+ return pos_embed
71
+
72
+
73
+ def get_activation_fn(activation):
74
+ """Return an activation function given a string"""
75
+ if activation == "relu":
76
+ return F.relu
77
+ if activation == "gelu":
78
+ return F.gelu
79
+ if activation == "glu":
80
+ return F.glu
81
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
82
+
83
+
84
+ def get_clones(module, N):
85
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
86
+
87
+
88
+ class DropPath(nn.Module):
89
+ # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
90
+ def __init__(self, drop_prob=0.0, scale_by_keep=True):
91
+ super(DropPath, self).__init__()
92
+ self.drop_prob = drop_prob
93
+ self.scale_by_keep = scale_by_keep
94
+
95
+ def forward(self, x):
96
+ if self.drop_prob == 0.0 or not self.training:
97
+ return x
98
+ keep_prob = 1 - self.drop_prob
99
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1)
100
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
101
+ if keep_prob > 0.0 and self.scale_by_keep:
102
+ random_tensor.div_(keep_prob)
103
+ return x * random_tensor
104
+
105
+
106
+ # Lightly adapted from
107
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
108
+ class MLP(nn.Module):
109
+ def __init__(
110
+ self,
111
+ input_dim: int,
112
+ hidden_dim: int,
113
+ output_dim: int,
114
+ num_layers: int,
115
+ activation: nn.Module = nn.ReLU,
116
+ sigmoid_output: bool = False,
117
+ ) -> None:
118
+ super().__init__()
119
+ self.num_layers = num_layers
120
+ h = [hidden_dim] * (num_layers - 1)
121
+ self.layers = nn.ModuleList(
122
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
123
+ )
124
+ self.sigmoid_output = sigmoid_output
125
+ self.act = activation()
126
+
127
+ def forward(self, x):
128
+ for i, layer in enumerate(self.layers):
129
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
130
+ if self.sigmoid_output:
131
+ x = F.sigmoid(x)
132
+ return x
133
+
134
+
135
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
136
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
137
+ class LayerNorm2d(nn.Module):
138
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
139
+ super().__init__()
140
+ self.weight = nn.Parameter(torch.ones(num_channels))
141
+ self.bias = nn.Parameter(torch.zeros(num_channels))
142
+ self.eps = eps
143
+
144
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
145
+ u = x.mean(1, keepdim=True)
146
+ s = (x - u).pow(2).mean(1, keepdim=True)
147
+ x = (x - u) / torch.sqrt(s + self.eps)
148
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
149
+ return x
SAM2/sam2/sam2_image_predictor.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ from typing import List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ import torch
13
+ from PIL.Image import Image
14
+
15
+ from sam2.modeling.sam2_base import SAM2Base
16
+
17
+ from sam2.utils.transforms import SAM2Transforms
18
+
19
+
20
+ class SAM2ImagePredictor:
21
+ def __init__(
22
+ self,
23
+ sam_model: SAM2Base,
24
+ mask_threshold=0.0,
25
+ max_hole_area=0.0,
26
+ max_sprinkle_area=0.0,
27
+ ) -> None:
28
+ """
29
+ Uses SAM-2 to calculate the image embedding for an image, and then
30
+ allow repeated, efficient mask prediction given prompts.
31
+
32
+ Arguments:
33
+ sam_model (Sam-2): The model to use for mask prediction.
34
+ mask_threshold (float): The threshold to use when converting mask logits
35
+ to binary masks. Masks are thresholded at 0 by default.
36
+ fill_hole_area (int): If fill_hole_area > 0, we fill small holes in up to
37
+ the maximum area of fill_hole_area in low_res_masks.
38
+ """
39
+ super().__init__()
40
+ self.model = sam_model
41
+ self._transforms = SAM2Transforms(
42
+ resolution=self.model.image_size,
43
+ mask_threshold=mask_threshold,
44
+ max_hole_area=max_hole_area,
45
+ max_sprinkle_area=max_sprinkle_area,
46
+ )
47
+
48
+ # Predictor state
49
+ self._is_image_set = False
50
+ self._features = None
51
+ self._orig_hw = None
52
+ # Whether the predictor is set for single image or a batch of images
53
+ self._is_batch = False
54
+
55
+ # Predictor config
56
+ self.mask_threshold = mask_threshold
57
+
58
+ # Spatial dim for backbone feature maps
59
+ self._bb_feat_sizes = [
60
+ (256, 256),
61
+ (128, 128),
62
+ (64, 64),
63
+ ]
64
+
65
+ @torch.no_grad()
66
+ def set_image(
67
+ self,
68
+ image: Union[np.ndarray, Image],
69
+ ) -> None:
70
+ """
71
+ Calculates the image embeddings for the provided image, allowing
72
+ masks to be predicted with the 'predict' method.
73
+
74
+ Arguments:
75
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
76
+ with pixel values in [0, 255].
77
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
78
+ """
79
+ self.reset_predictor()
80
+ # Transform the image to the form expected by the model
81
+ if isinstance(image, np.ndarray):
82
+ logging.info("For numpy array image, we assume (HxWxC) format")
83
+ self._orig_hw = [image.shape[:2]]
84
+ elif isinstance(image, Image):
85
+ w, h = image.size
86
+ self._orig_hw = [(h, w)]
87
+ else:
88
+ raise NotImplementedError("Image format not supported")
89
+
90
+ input_image = self._transforms(image)
91
+ input_image = input_image[None, ...].to(self.device)
92
+
93
+ assert (
94
+ len(input_image.shape) == 4 and input_image.shape[1] == 3
95
+ ), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
96
+ logging.info("Computing image embeddings for the provided image...")
97
+ backbone_out = self.model.forward_image(input_image)
98
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
99
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
100
+ if self.model.directly_add_no_mem_embed:
101
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
102
+
103
+ feats = [
104
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
105
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
106
+ ][::-1]
107
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
108
+ self._is_image_set = True
109
+ logging.info("Image embeddings computed.")
110
+
111
+ @torch.no_grad()
112
+ def set_image_batch(
113
+ self,
114
+ image_list: List[Union[np.ndarray]],
115
+ ) -> None:
116
+ """
117
+ Calculates the image embeddings for the provided image batch, allowing
118
+ masks to be predicted with the 'predict_batch' method.
119
+
120
+ Arguments:
121
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
122
+ with pixel values in [0, 255].
123
+ """
124
+ self.reset_predictor()
125
+ assert isinstance(image_list, list)
126
+ self._orig_hw = []
127
+ for image in image_list:
128
+ assert isinstance(
129
+ image, np.ndarray
130
+ ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
131
+ self._orig_hw.append(image.shape[:2])
132
+ # Transform the image to the form expected by the model
133
+ img_batch = self._transforms.forward_batch(image_list)
134
+ img_batch = img_batch.to(self.device)
135
+ batch_size = img_batch.shape[0]
136
+ assert (
137
+ len(img_batch.shape) == 4 and img_batch.shape[1] == 3
138
+ ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
139
+ logging.info("Computing image embeddings for the provided images...")
140
+ backbone_out = self.model.forward_image(img_batch)
141
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
142
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
143
+ if self.model.directly_add_no_mem_embed:
144
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
145
+
146
+ feats = [
147
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
148
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
149
+ ][::-1]
150
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
151
+ self._is_image_set = True
152
+ self._is_batch = True
153
+ logging.info("Image embeddings computed.")
154
+
155
+ def predict_batch(
156
+ self,
157
+ point_coords_batch: List[np.ndarray] = None,
158
+ point_labels_batch: List[np.ndarray] = None,
159
+ box_batch: List[np.ndarray] = None,
160
+ mask_input_batch: List[np.ndarray] = None,
161
+ multimask_output: bool = True,
162
+ return_logits: bool = False,
163
+ normalize_coords=True,
164
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
165
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
166
+ It returns a tupele of lists of masks, ious, and low_res_masks_logits.
167
+ """
168
+ assert self._is_batch, "This function should only be used when in batched mode"
169
+ if not self._is_image_set:
170
+ raise RuntimeError(
171
+ "An image must be set with .set_image_batch(...) before mask prediction."
172
+ )
173
+ num_images = len(self._features["image_embed"])
174
+ all_masks = []
175
+ all_ious = []
176
+ all_low_res_masks = []
177
+ for img_idx in range(num_images):
178
+ # Transform input prompts
179
+ point_coords = (
180
+ point_coords_batch[img_idx] if point_coords_batch is not None else None
181
+ )
182
+ point_labels = (
183
+ point_labels_batch[img_idx] if point_labels_batch is not None else None
184
+ )
185
+ box = box_batch[img_idx] if box_batch is not None else None
186
+ mask_input = (
187
+ mask_input_batch[img_idx] if mask_input_batch is not None else None
188
+ )
189
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
190
+ point_coords,
191
+ point_labels,
192
+ box,
193
+ mask_input,
194
+ normalize_coords,
195
+ img_idx=img_idx,
196
+ )
197
+ masks, iou_predictions, low_res_masks = self._predict(
198
+ unnorm_coords,
199
+ labels,
200
+ unnorm_box,
201
+ mask_input,
202
+ multimask_output,
203
+ return_logits=return_logits,
204
+ img_idx=img_idx,
205
+ )
206
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
207
+ iou_predictions_np = (
208
+ iou_predictions.squeeze(0).float().detach().cpu().numpy()
209
+ )
210
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
211
+ all_masks.append(masks_np)
212
+ all_ious.append(iou_predictions_np)
213
+ all_low_res_masks.append(low_res_masks_np)
214
+
215
+ return all_masks, all_ious, all_low_res_masks
216
+
217
+ def predict(
218
+ self,
219
+ point_coords: Optional[np.ndarray] = None,
220
+ point_labels: Optional[np.ndarray] = None,
221
+ box: Optional[np.ndarray] = None,
222
+ mask_input: Optional[np.ndarray] = None,
223
+ multimask_output: bool = True,
224
+ return_logits: bool = False,
225
+ normalize_coords=True,
226
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
227
+ """
228
+ Predict masks for the given input prompts, using the currently set image.
229
+
230
+ Arguments:
231
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
232
+ model. Each point is in (X,Y) in pixels.
233
+ point_labels (np.ndarray or None): A length N array of labels for the
234
+ point prompts. 1 indicates a foreground point and 0 indicates a
235
+ background point.
236
+ box (np.ndarray or None): A length 4 array given a box prompt to the
237
+ model, in XYXY format.
238
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
239
+ coming from a previous prediction iteration. Has form 1xHxW, where
240
+ for SAM, H=W=256.
241
+ multimask_output (bool): If true, the model will return three masks.
242
+ For ambiguous input prompts (such as a single click), this will often
243
+ produce better masks than a single prediction. If only a single
244
+ mask is needed, the model's predicted quality score can be used
245
+ to select the best mask. For non-ambiguous prompts, such as multiple
246
+ input prompts, multimask_output=False can give better results.
247
+ return_logits (bool): If true, returns un-thresholded masks logits
248
+ instead of a binary mask.
249
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
250
+
251
+ Returns:
252
+ (np.ndarray): The output masks in CxHxW format, where C is the
253
+ number of masks, and (H, W) is the original image size.
254
+ (np.ndarray): An array of length C containing the model's
255
+ predictions for the quality of each mask.
256
+ (np.ndarray): An array of shape CxHxW, where C is the number
257
+ of masks and H=W=256. These low resolution logits can be passed to
258
+ a subsequent iteration as mask input.
259
+ """
260
+ if not self._is_image_set:
261
+ raise RuntimeError(
262
+ "An image must be set with .set_image(...) before mask prediction."
263
+ )
264
+
265
+ # Transform input prompts
266
+
267
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
268
+ point_coords, point_labels, box, mask_input, normalize_coords
269
+ )
270
+
271
+ masks, iou_predictions, low_res_masks = self._predict(
272
+ unnorm_coords,
273
+ labels,
274
+ unnorm_box,
275
+ mask_input,
276
+ multimask_output,
277
+ return_logits=return_logits,
278
+ )
279
+
280
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
281
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
282
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
283
+ return masks_np, iou_predictions_np, low_res_masks_np
284
+
285
+ def _prep_prompts(
286
+ self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1
287
+ ):
288
+
289
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
290
+ if point_coords is not None:
291
+ assert (
292
+ point_labels is not None
293
+ ), "point_labels must be supplied if point_coords is supplied."
294
+ point_coords = torch.as_tensor(
295
+ point_coords, dtype=torch.float, device=self.device
296
+ )
297
+ unnorm_coords = self._transforms.transform_coords(
298
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
299
+ )
300
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
301
+ if len(unnorm_coords.shape) == 2:
302
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
303
+ if box is not None:
304
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
305
+ unnorm_box = self._transforms.transform_boxes(
306
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
307
+ ) # Bx2x2
308
+ if mask_logits is not None:
309
+ mask_input = torch.as_tensor(
310
+ mask_logits, dtype=torch.float, device=self.device
311
+ )
312
+ if len(mask_input.shape) == 3:
313
+ mask_input = mask_input[None, :, :, :]
314
+ return mask_input, unnorm_coords, labels, unnorm_box
315
+
316
+ @torch.no_grad()
317
+ def _predict(
318
+ self,
319
+ point_coords: Optional[torch.Tensor],
320
+ point_labels: Optional[torch.Tensor],
321
+ boxes: Optional[torch.Tensor] = None,
322
+ mask_input: Optional[torch.Tensor] = None,
323
+ multimask_output: bool = True,
324
+ return_logits: bool = False,
325
+ img_idx: int = -1,
326
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
327
+ """
328
+ Predict masks for the given input prompts, using the currently set image.
329
+ Input prompts are batched torch tensors and are expected to already be
330
+ transformed to the input frame using SAM2Transforms.
331
+
332
+ Arguments:
333
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
334
+ model. Each point is in (X,Y) in pixels.
335
+ point_labels (torch.Tensor or None): A BxN array of labels for the
336
+ point prompts. 1 indicates a foreground point and 0 indicates a
337
+ background point.
338
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
339
+ model, in XYXY format.
340
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
341
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
342
+ for SAM, H=W=256. Masks returned by a previous iteration of the
343
+ predict method do not need further transformation.
344
+ multimask_output (bool): If true, the model will return three masks.
345
+ For ambiguous input prompts (such as a single click), this will often
346
+ produce better masks than a single prediction. If only a single
347
+ mask is needed, the model's predicted quality score can be used
348
+ to select the best mask. For non-ambiguous prompts, such as multiple
349
+ input prompts, multimask_output=False can give better results.
350
+ return_logits (bool): If true, returns un-thresholded masks logits
351
+ instead of a binary mask.
352
+
353
+ Returns:
354
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
355
+ number of masks, and (H, W) is the original image size.
356
+ (torch.Tensor): An array of shape BxC containing the model's
357
+ predictions for the quality of each mask.
358
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
359
+ of masks and H=W=256. These low res logits can be passed to
360
+ a subsequent iteration as mask input.
361
+ """
362
+ if not self._is_image_set:
363
+ raise RuntimeError(
364
+ "An image must be set with .set_image(...) before mask prediction."
365
+ )
366
+
367
+ if point_coords is not None:
368
+ concat_points = (point_coords, point_labels)
369
+ else:
370
+ concat_points = None
371
+
372
+ # Embed prompts
373
+ if boxes is not None:
374
+ box_coords = boxes.reshape(-1, 2, 2)
375
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
376
+ box_labels = box_labels.repeat(boxes.size(0), 1)
377
+ # we merge "boxes" and "points" into a single "concat_points" input (where
378
+ # boxes are added at the beginning) to sam_prompt_encoder
379
+ if concat_points is not None:
380
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
381
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
382
+ concat_points = (concat_coords, concat_labels)
383
+ else:
384
+ concat_points = (box_coords, box_labels)
385
+
386
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
387
+ points=concat_points,
388
+ boxes=None,
389
+ masks=mask_input,
390
+ )
391
+
392
+ # Predict masks
393
+ batched_mode = (
394
+ concat_points is not None and concat_points[0].shape[0] > 1
395
+ ) # multi object prediction
396
+ high_res_features = [
397
+ feat_level[img_idx].unsqueeze(0)
398
+ for feat_level in self._features["high_res_feats"]
399
+ ]
400
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
401
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
402
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
403
+ sparse_prompt_embeddings=sparse_embeddings,
404
+ dense_prompt_embeddings=dense_embeddings,
405
+ multimask_output=multimask_output,
406
+ repeat_image=batched_mode,
407
+ high_res_features=high_res_features,
408
+ )
409
+
410
+ # Upscale the masks to the original image resolution
411
+ masks = self._transforms.postprocess_masks(
412
+ low_res_masks, self._orig_hw[img_idx]
413
+ )
414
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
415
+ if not return_logits:
416
+ masks = masks > self.mask_threshold
417
+
418
+ return masks, iou_predictions, low_res_masks
419
+
420
+ def get_image_embedding(self) -> torch.Tensor:
421
+ """
422
+ Returns the image embeddings for the currently set image, with
423
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
424
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
425
+ """
426
+ if not self._is_image_set:
427
+ raise RuntimeError(
428
+ "An image must be set with .set_image(...) to generate an embedding."
429
+ )
430
+ assert (
431
+ self._features is not None
432
+ ), "Features must exist if an image has been set."
433
+ return self._features["image_embed"]
434
+
435
+ @property
436
+ def device(self) -> torch.device:
437
+ return self.model.device
438
+
439
+ def reset_predictor(self) -> None:
440
+ """
441
+ Resets the image embeddings and other state variables.
442
+ """
443
+ self._is_image_set = False
444
+ self._features = None
445
+ self._orig_hw = None
446
+ self._is_batch = False
SAM2/sam2/sam2_to_dust3r.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from PIL import Image
6
+ from sam2.build_sam import build_sam2_video_predictor
7
+ import json
8
+
9
+ def build_sam2(cfg, checkpoints):
10
+ return build_sam2_video_predictor(cfg, checkpoints)
11
+
12
+
13
+ def show_mask(mask, ax, obj_id=None, random_color=False):
14
+ if random_color:
15
+ color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
16
+ else:
17
+ cmap = plt.get_cmap("tab10")
18
+ cmap_idx = 0 if obj_id is None else obj_id
19
+ color = np.array([*cmap(cmap_idx)[:3], 0.6])
20
+ h, w = mask.shape[-2:]
21
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
22
+ ax.imshow(mask_image)
23
+
24
+
25
+
26
+ def show_points(coords, labels, ax, marker_size=200):
27
+ pos_points = coords[labels==1]
28
+ neg_points = coords[labels==0]
29
+ ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
30
+ ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
31
+
32
+ # 给帧添加points提示
33
+ # ann_frame_idx: the frame index we interact with
34
+ # ann_obj_id: give a unique id to each object we interact with (it can be any integers)
35
+ def add_new_points(predictor, inference_state, ann_frame_idx, ann_obj_id, points, labels):
36
+ _, out_obj_ids, out_mask_logits = predictor.add_new_points(
37
+ inference_state=inference_state,
38
+ frame_idx=ann_frame_idx,
39
+ obj_id=ann_obj_id,
40
+ points=points,
41
+ labels=labels,
42
+ )
43
+ return out_obj_ids, out_mask_logits
44
+
45
+ # 获取所有帧的分割结果
46
+ def all_frames_masks(predictor, inference_state):
47
+ video_segments = {} # video_segments contains the per-frame segmentation results
48
+ for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state):
49
+ video_segments[out_frame_idx] = {
50
+ out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy()
51
+ for i, out_obj_id in enumerate(out_obj_ids)
52
+ }
53
+ return video_segments
54
+
55
+ def resize_mask_to_img(masks, target_width, target_height):
56
+ frame_mask = []
57
+ origin_size = masks[0][1].shape # 1表示object id
58
+ for frame, objects_mask in masks.items(): # 每个frame和该frame对应的分割结果
59
+ # 每个frame可能包含多个object对应的mask
60
+ masks = list(objects_mask.values())
61
+ if not masks: # masks为空,即当前frame不包含object
62
+ frame_mask.append(np.ones(origin_size, dtype=bool))
63
+ else: # 将当前frame包含的所有object的mask取并集
64
+ union_mask = masks[0]
65
+ for mask in masks[1:]:
66
+ union_mask = np.logical_or(union_mask, mask)
67
+ frame_mask.append(union_mask)
68
+ resized_mask = []
69
+ for mask in frame_mask:
70
+ mask_image = Image.fromarray(mask.squeeze(0).astype(np.uint8) * 255)
71
+ resized_mask_image = mask_image.resize((target_width, target_height), Image.NEAREST)
72
+ resized_mask.append(np.array(resized_mask_image) > 0)
73
+
74
+ return resized_mask
75
+
76
+ def sava_mask(output_folder, mask):
77
+
78
+
79
+ # 转换为Image对象
80
+ binary_image = Image.fromarray(mask.squeeze(0).astype(np.uint8) * 255, 'L') # 'L'代表灰度模式
81
+
82
+ new_file_path = os.path.join(output_folder, "binary_mask.jpg")
83
+
84
+ # 保存新的图片
85
+ binary_image.save(new_file_path)
86
+ print(f"sava mask to {new_file_path} .")
87
+
88
+ # 经过SAM2获取所有frames的分割结果
89
+ def get_masks_from_sam2(dataset_name, scene_name, img_shape, h, w, target_ind):
90
+ # 加载模型
91
+ sam2_checkpoint = "D:\XMU\mac\hujie\\3D\DUST3RwithSAM2\dust3rWithSam2\SAM2\checkpoints\sam2_hiera_large.pt"
92
+ model_cfg = "sam2_hiera_l.yaml"
93
+
94
+ predictor = build_sam2(model_cfg, sam2_checkpoint)
95
+
96
+ # 视频帧所在的路径
97
+ video_dir = os.path.join("data", dataset_name, scene_name, "images_8")
98
+
99
+ # 读取帧图片
100
+ frame_names = [
101
+ p for p in sorted(os.listdir(video_dir))
102
+ if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG", ".png"]
103
+ ]
104
+
105
+ inference_state = predictor.init_state(video_path=video_dir)
106
+ predictor.reset_state(inference_state)
107
+
108
+
109
+ # 给一个帧添加points
110
+ # 读取prompts.json
111
+ json_dir = os.path.join("data", dataset_name, "prompts.json")
112
+ with open(json_dir, 'r') as file:
113
+ data = json.load(file)
114
+ # 解析 prompts
115
+ prompts = data[scene_name]
116
+ points = np.array(prompts['points'], dtype=np.float32)
117
+ labels = np.array(prompts['labels'], dtype=np.int32)
118
+
119
+
120
+
121
+ out_obj_ids, out_mask_logits = add_new_points(predictor, inference_state, 0, 1, points, labels)
122
+
123
+ # sam2获取所有帧的分割结果
124
+ video_segments = all_frames_masks(predictor, inference_state)
125
+
126
+ # 渲染处理后展示结果
127
+ vis_frame_stride = 3
128
+ plt.close("all")
129
+ for out_frame_idx in range(0, len(frame_names), vis_frame_stride):
130
+ plt.figure(figsize=(6, 4))
131
+ plt.title(f"frame {out_frame_idx}")
132
+ plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx])))
133
+ for out_obj_id, out_mask in video_segments[out_frame_idx].items():
134
+ show_mask(out_mask, plt.gca(), obj_id=out_obj_id)
135
+ if out_frame_idx == 0:
136
+ # 显示点
137
+ show_points(points, labels, plt.gca())
138
+
139
+
140
+ plt.title(f"Frame {out_frame_idx}")
141
+ plt.axis('off') # 可选:关闭坐标轴
142
+ plt.show()
143
+
144
+ # 保存target_ind对应的view的SAM2输出mask作为ground truth mask,用于计算IoU和Acc
145
+ mask_dir = os.path.join("data", dataset_name, "masks", scene_name)
146
+ sava_mask(mask_dir, video_segments[target_ind][1])
147
+ # 将 SAM2的mask resize成DUST3R要求的尺寸
148
+ resize_mask = resize_mask_to_img(video_segments, w, h)
149
+ return resize_mask
150
+
151
+
152
+ def array_to_tensor_masks(masks_list):
153
+ # 将列表转换为一个大的 ndarray,形状为 (n, H, W)
154
+ masks_array = np.stack(masks_list)
155
+
156
+ # 将其 reshape 为 (n, H*W, 1)
157
+ masks_array = masks_array.reshape(masks_array.shape[0], -1)
158
+
159
+ # 转换为 bool 类型的 Tensor
160
+ masks_tensor = torch.tensor(masks_array, dtype=torch.bool)
161
+ return masks_tensor
SAM2/sam2/sam2_video_predictor.py ADDED
@@ -0,0 +1,1042 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import warnings
8
+ from collections import OrderedDict
9
+
10
+ import torch
11
+
12
+ from tqdm import tqdm
13
+
14
+ from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
15
+ from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
16
+
17
+
18
+ class SAM2VideoPredictor(SAM2Base):
19
+ """The predictor class to handle user interactions and manage inference states."""
20
+
21
+ def __init__(
22
+ self,
23
+ fill_hole_area=0,
24
+ # whether to apply non-overlapping constraints on the output object masks
25
+ non_overlap_masks=False,
26
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
27
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
28
+ clear_non_cond_mem_around_input=False,
29
+ # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True).
30
+ clear_non_cond_mem_for_multi_obj=False,
31
+ **kwargs,
32
+ ):
33
+ super().__init__(**kwargs)
34
+ self.fill_hole_area = fill_hole_area
35
+ self.non_overlap_masks = non_overlap_masks
36
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
37
+ self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj
38
+
39
+ @torch.inference_mode()
40
+ def init_state(
41
+ self,
42
+ video_path,
43
+ offload_video_to_cpu=False,
44
+ offload_state_to_cpu=False,
45
+ async_loading_frames=False,
46
+ ):
47
+ """Initialize a inference state."""
48
+ images, video_height, video_width = load_video_frames(
49
+ video_path=video_path,
50
+ image_size=self.image_size,
51
+ offload_video_to_cpu=offload_video_to_cpu,
52
+ async_loading_frames=async_loading_frames,
53
+ )
54
+ inference_state = {}
55
+ inference_state["images"] = images
56
+ inference_state["num_frames"] = len(images)
57
+ # whether to offload the video frames to CPU memory
58
+ # turning on this option saves the GPU memory with only a very small overhead
59
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
60
+ # whether to offload the inference state to CPU memory
61
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
62
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
63
+ # and from 24 to 21 when tracking two objects)
64
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
65
+ # the original video height and width, used for resizing final output scores
66
+ inference_state["video_height"] = video_height
67
+ inference_state["video_width"] = video_width
68
+ inference_state["device"] = torch.device("cuda")
69
+ if offload_state_to_cpu:
70
+ inference_state["storage_device"] = torch.device("cpu")
71
+ else:
72
+ inference_state["storage_device"] = torch.device("cuda")
73
+ # inputs on each frame
74
+ inference_state["point_inputs_per_obj"] = {}
75
+ inference_state["mask_inputs_per_obj"] = {}
76
+ # visual features on a small number of recently visited frames for quick interactions
77
+ inference_state["cached_features"] = {}
78
+ # values that don't change across frames (so we only need to hold one copy of them)
79
+ inference_state["constants"] = {}
80
+ # mapping between client-side object id and model-side object index
81
+ inference_state["obj_id_to_idx"] = OrderedDict()
82
+ inference_state["obj_idx_to_id"] = OrderedDict()
83
+ inference_state["obj_ids"] = []
84
+ # A storage to hold the model's tracking results and states on each frame
85
+ inference_state["output_dict"] = {
86
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
87
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
88
+ }
89
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
90
+ inference_state["output_dict_per_obj"] = {}
91
+ # A temporary storage to hold new outputs when user interact with a frame
92
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
93
+ inference_state["temp_output_dict_per_obj"] = {}
94
+ # Frames that already holds consolidated outputs from click or mask inputs
95
+ # (we directly use their consolidated outputs during tracking)
96
+ inference_state["consolidated_frame_inds"] = {
97
+ "cond_frame_outputs": set(), # set containing frame indices
98
+ "non_cond_frame_outputs": set(), # set containing frame indices
99
+ }
100
+ # metadata for each tracking frame (e.g. which direction it's tracked)
101
+ inference_state["tracking_has_started"] = False
102
+ inference_state["frames_already_tracked"] = {}
103
+ # Warm up the visual backbone and cache the image feature on frame 0
104
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
105
+ return inference_state
106
+
107
+ def _obj_id_to_idx(self, inference_state, obj_id):
108
+ """Map client-side object id to model-side object index."""
109
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
110
+ if obj_idx is not None:
111
+ return obj_idx
112
+
113
+ # This is a new object id not sent to the server before. We only allow adding
114
+ # new objects *before* the tracking starts.
115
+ allow_new_object = not inference_state["tracking_has_started"]
116
+ if allow_new_object:
117
+ # get the next object slot
118
+ obj_idx = len(inference_state["obj_id_to_idx"])
119
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
120
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
121
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
122
+ # set up input and output structures for this object
123
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
124
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
125
+ inference_state["output_dict_per_obj"][obj_idx] = {
126
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
127
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
128
+ }
129
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
130
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
131
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
132
+ }
133
+ return obj_idx
134
+ else:
135
+ raise RuntimeError(
136
+ f"Cannot add new object id {obj_id} after tracking starts. "
137
+ f"All existing object ids: {inference_state['obj_ids']}. "
138
+ f"Please call 'reset_state' to restart from scratch."
139
+ )
140
+
141
+ def _obj_idx_to_id(self, inference_state, obj_idx):
142
+ """Map model-side object index to client-side object id."""
143
+ return inference_state["obj_idx_to_id"][obj_idx]
144
+
145
+ def _get_obj_num(self, inference_state):
146
+ """Get the total number of unique object ids received so far in this session."""
147
+ return len(inference_state["obj_idx_to_id"])
148
+
149
+ @torch.inference_mode()
150
+ def add_new_points_or_box(
151
+ self,
152
+ inference_state,
153
+ frame_idx,
154
+ obj_id,
155
+ points=None,
156
+ labels=None,
157
+ clear_old_points=True,
158
+ normalize_coords=True,
159
+ box=None,
160
+ ):
161
+ """Add new points to a frame."""
162
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
163
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
164
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
165
+
166
+ if (points is not None) != (labels is not None):
167
+ raise ValueError("points and labels must be provided together")
168
+ if points is None and box is None:
169
+ raise ValueError("at least one of points or box must be provided as input")
170
+
171
+ if points is None:
172
+ points = torch.zeros(0, 2, dtype=torch.float32, device=self.device)
173
+ elif not isinstance(points, torch.Tensor):
174
+ points = torch.tensor(points, dtype=torch.float32, device=self.device)
175
+ if labels is None:
176
+ labels = torch.zeros(0, dtype=torch.int32, device=self.device)
177
+ elif not isinstance(labels, torch.Tensor):
178
+ labels = torch.tensor(labels, dtype=torch.int32, device=self.device)
179
+ if points.dim() == 2:
180
+ points = points.unsqueeze(0) # add batch dimension
181
+ if labels.dim() == 1:
182
+ labels = labels.unsqueeze(0) # add batch dimension
183
+
184
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
185
+ # along with the user-provided points (consistent with how SAM 2 is trained).
186
+ if box is not None:
187
+ if not clear_old_points:
188
+ raise ValueError(
189
+ "cannot add box without clearing old points, since "
190
+ "box prompt must be provided before any point prompt "
191
+ "(please use clear_old_points=True instead)"
192
+ )
193
+ if inference_state["tracking_has_started"]:
194
+ warnings.warn(
195
+ "You are adding a box after tracking starts. SAM 2 may not always be "
196
+ "able to incorporate a box prompt for *refinement*. If you intend to "
197
+ "use box prompt as an *initial* input before tracking, please call "
198
+ "'reset_state' on the inference state to restart from scratch.",
199
+ category=UserWarning,
200
+ stacklevel=2,
201
+ )
202
+ if not isinstance(box, torch.Tensor):
203
+ box = torch.tensor(box, dtype=torch.float32, device=self.device)
204
+ box_coords = box.reshape(1, 2, 2)
205
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=self.device)
206
+ box_labels = box_labels.reshape(1, 2)
207
+ points = torch.cat([box_coords, points], dim=1)
208
+ labels = torch.cat([box_labels, labels], dim=1)
209
+
210
+ if normalize_coords:
211
+ video_H = inference_state["video_height"]
212
+ video_W = inference_state["video_width"]
213
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
214
+ # scale the (normalized) coordinates by the model's internal image size
215
+ points = points * self.image_size
216
+ points = points.to(inference_state["device"])
217
+ labels = labels.to(inference_state["device"])
218
+
219
+ if not clear_old_points:
220
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
221
+ else:
222
+ point_inputs = None
223
+ point_inputs = concat_points(point_inputs, points, labels)
224
+
225
+ point_inputs_per_frame[frame_idx] = point_inputs
226
+ mask_inputs_per_frame.pop(frame_idx, None)
227
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
228
+ # frame, meaning that the inputs points are to generate segments on this frame without
229
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
230
+ # the input points will be used to correct the already tracked masks.
231
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
232
+ # whether to track in reverse time order
233
+ if is_init_cond_frame:
234
+ reverse = False
235
+ else:
236
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
237
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
238
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
239
+ # Add a frame to conditioning output if it's an initial conditioning frame or
240
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
241
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
242
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
243
+
244
+ # Get any previously predicted mask logits on this object and feed it along with
245
+ # the new clicks into the SAM mask decoder.
246
+ prev_sam_mask_logits = None
247
+ # lookup temporary output dict first, which contains the most recent output
248
+ # (if not found, then lookup conditioning and non-conditioning frame output)
249
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
250
+ if prev_out is None:
251
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
252
+ if prev_out is None:
253
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
254
+
255
+ if prev_out is not None and prev_out["pred_masks"] is not None:
256
+ prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True)
257
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
258
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
259
+ current_out, _ = self._run_single_frame_inference(
260
+ inference_state=inference_state,
261
+ output_dict=obj_output_dict, # run on the slice of a single object
262
+ frame_idx=frame_idx,
263
+ batch_size=1, # run on the slice of a single object
264
+ is_init_cond_frame=is_init_cond_frame,
265
+ point_inputs=point_inputs,
266
+ mask_inputs=None,
267
+ reverse=reverse,
268
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
269
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
270
+ # allows us to enforce non-overlapping constraints on all objects before encoding
271
+ # them into memory.
272
+ run_mem_encoder=False,
273
+ prev_sam_mask_logits=prev_sam_mask_logits,
274
+ )
275
+ # Add the output to the output dict (to be used as future memory)
276
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
277
+
278
+ # Resize the output mask to the original video resolution
279
+ obj_ids = inference_state["obj_ids"]
280
+ consolidated_out = self._consolidate_temp_output_across_obj(
281
+ inference_state,
282
+ frame_idx,
283
+ is_cond=is_cond,
284
+ run_mem_encoder=False,
285
+ consolidate_at_video_res=True,
286
+ )
287
+ _, video_res_masks = self._get_orig_video_res_output(
288
+ inference_state, consolidated_out["pred_masks_video_res"]
289
+ )
290
+ return frame_idx, obj_ids, video_res_masks
291
+
292
+ @torch.inference_mode()
293
+ def add_new_points(
294
+ self,
295
+ inference_state,
296
+ frame_idx,
297
+ obj_id,
298
+ points,
299
+ labels,
300
+ clear_old_points=True,
301
+ normalize_coords=True,
302
+ ):
303
+ """Add new points to a frame."""
304
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
305
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
306
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
307
+
308
+ if not isinstance(points, torch.Tensor):
309
+ points = torch.tensor(points, dtype=torch.float32)
310
+ if not isinstance(labels, torch.Tensor):
311
+ labels = torch.tensor(labels, dtype=torch.int32)
312
+ if points.dim() == 2:
313
+ points = points.unsqueeze(0) # add batch dimension
314
+ if labels.dim() == 1:
315
+ labels = labels.unsqueeze(0) # add batch dimension
316
+ if normalize_coords:
317
+ video_H = inference_state["video_height"]
318
+ video_W = inference_state["video_width"]
319
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
320
+ # scale the (normalized) coordinates by the model's internal image size
321
+ points = points * self.image_size
322
+ points = points.to(inference_state["device"])
323
+ labels = labels.to(inference_state["device"])
324
+
325
+ if not clear_old_points:
326
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
327
+ else:
328
+ point_inputs = None
329
+ point_inputs = concat_points(point_inputs, points, labels)
330
+
331
+ point_inputs_per_frame[frame_idx] = point_inputs
332
+ mask_inputs_per_frame.pop(frame_idx, None)
333
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
334
+ # frame, meaning that the inputs points are to generate segments on this frame without
335
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
336
+ # the input points will be used to correct the already tracked masks.
337
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
338
+ # whether to track in reverse time order
339
+ if is_init_cond_frame:
340
+ reverse = False
341
+ else:
342
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
343
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
344
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
345
+ # Add a frame to conditioning output if it's an initial conditioning frame or
346
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
347
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
348
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
349
+
350
+ # Get any previously predicted mask logits on this object and feed it along with
351
+ # the new clicks into the SAM mask decoder.
352
+ prev_sam_mask_logits = None
353
+ # lookup temporary output dict first, which contains the most recent output
354
+ # (if not found, then lookup conditioning and non-conditioning frame output)
355
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
356
+ if prev_out is None:
357
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
358
+ if prev_out is None:
359
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
360
+
361
+ if prev_out is not None and prev_out["pred_masks"] is not None:
362
+ prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True)
363
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
364
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
365
+ current_out, _ = self._run_single_frame_inference(
366
+ inference_state=inference_state,
367
+ output_dict=obj_output_dict, # run on the slice of a single object
368
+ frame_idx=frame_idx,
369
+ batch_size=1, # run on the slice of a single object
370
+ is_init_cond_frame=is_init_cond_frame,
371
+ point_inputs=point_inputs,
372
+ mask_inputs=None,
373
+ reverse=reverse,
374
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
375
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
376
+ # allows us to enforce non-overlapping constraints on all objects before encoding
377
+ # them into memory.
378
+ run_mem_encoder=False,
379
+ prev_sam_mask_logits=prev_sam_mask_logits,
380
+ )
381
+ # Add the output to the output dict (to be used as future memory)
382
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
383
+
384
+ # Resize the output mask to the original video resolution
385
+ obj_ids = inference_state["obj_ids"]
386
+ consolidated_out = self._consolidate_temp_output_across_obj(
387
+ inference_state,
388
+ frame_idx,
389
+ is_cond=is_cond,
390
+ run_mem_encoder=False,
391
+ consolidate_at_video_res=True,
392
+ )
393
+ _, video_res_masks = self._get_orig_video_res_output(
394
+ inference_state, consolidated_out["pred_masks_video_res"]
395
+ )
396
+ return frame_idx, obj_ids, video_res_masks
397
+
398
+ @torch.inference_mode()
399
+ def add_new_mask(
400
+ self,
401
+ inference_state,
402
+ frame_idx,
403
+ obj_id,
404
+ mask,
405
+ ):
406
+ """Add new mask to a frame."""
407
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
408
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
409
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
410
+
411
+ if not isinstance(mask, torch.Tensor):
412
+ mask = torch.tensor(mask, dtype=torch.bool)
413
+ assert mask.dim() == 2
414
+ mask_H, mask_W = mask.shape
415
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
416
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
417
+
418
+ # resize the mask if it doesn't match the model's image size
419
+ if mask_H != self.image_size or mask_W != self.image_size:
420
+ mask_inputs = torch.nn.functional.interpolate(
421
+ mask_inputs_orig,
422
+ size=(self.image_size, self.image_size),
423
+ align_corners=False,
424
+ mode="bilinear",
425
+ antialias=True, # use antialias for downsampling
426
+ )
427
+ mask_inputs = (mask_inputs >= 0.5).float()
428
+ else:
429
+ mask_inputs = mask_inputs_orig
430
+
431
+ mask_inputs_per_frame[frame_idx] = mask_inputs
432
+ point_inputs_per_frame.pop(frame_idx, None)
433
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
434
+ # frame, meaning that the inputs points are to generate segments on this frame without
435
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
436
+ # the input points will be used to correct the already tracked masks.
437
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
438
+ # whether to track in reverse time order
439
+ if is_init_cond_frame:
440
+ reverse = False
441
+ else:
442
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
443
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
444
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
445
+ # Add a frame to conditioning output if it's an initial conditioning frame or
446
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
447
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
448
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
449
+
450
+ current_out, _ = self._run_single_frame_inference(
451
+ inference_state=inference_state,
452
+ output_dict=obj_output_dict, # run on the slice of a single object
453
+ frame_idx=frame_idx,
454
+ batch_size=1, # run on the slice of a single object
455
+ is_init_cond_frame=is_init_cond_frame,
456
+ point_inputs=None,
457
+ mask_inputs=mask_inputs,
458
+ reverse=reverse,
459
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
460
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
461
+ # allows us to enforce non-overlapping constraints on all objects before encoding
462
+ # them into memory.
463
+ run_mem_encoder=False,
464
+ )
465
+ # Add the output to the output dict (to be used as future memory)
466
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
467
+
468
+ # Resize the output mask to the original video resolution
469
+ obj_ids = inference_state["obj_ids"]
470
+ consolidated_out = self._consolidate_temp_output_across_obj(
471
+ inference_state,
472
+ frame_idx,
473
+ is_cond=is_cond,
474
+ run_mem_encoder=False,
475
+ consolidate_at_video_res=True,
476
+ )
477
+ _, video_res_masks = self._get_orig_video_res_output(
478
+ inference_state, consolidated_out["pred_masks_video_res"]
479
+ )
480
+ return frame_idx, obj_ids, video_res_masks
481
+
482
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
483
+ """
484
+ Resize the object scores to the original video resolution (video_res_masks)
485
+ and apply non-overlapping constraints for final output.
486
+ """
487
+ device = inference_state["device"]
488
+ video_H = inference_state["video_height"]
489
+ video_W = inference_state["video_width"]
490
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
491
+ if any_res_masks.shape[-2:] == (video_H, video_W):
492
+ video_res_masks = any_res_masks
493
+ else:
494
+ video_res_masks = torch.nn.functional.interpolate(
495
+ any_res_masks,
496
+ size=(video_H, video_W),
497
+ mode="bilinear",
498
+ align_corners=False,
499
+ )
500
+ if self.non_overlap_masks:
501
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
502
+ return any_res_masks, video_res_masks
503
+
504
+ def _consolidate_temp_output_across_obj(
505
+ self,
506
+ inference_state,
507
+ frame_idx,
508
+ is_cond,
509
+ run_mem_encoder,
510
+ consolidate_at_video_res=False,
511
+ ):
512
+ """
513
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
514
+ a frame into a single output for all objects, including
515
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
516
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
517
+ (if they don't exist in `output_dict_per_obj` for this frame);
518
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
519
+ on the object scores.
520
+ """
521
+ batch_size = self._get_obj_num(inference_state)
522
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
523
+ # Optionally, we allow consolidating the temporary outputs at the original
524
+ # video resolution (to provide a better editing experience for mask prompts).
525
+ if consolidate_at_video_res:
526
+ assert not run_mem_encoder, "memory encoder cannot run at video resolution"
527
+ consolidated_H = inference_state["video_height"]
528
+ consolidated_W = inference_state["video_width"]
529
+ consolidated_mask_key = "pred_masks_video_res"
530
+ else:
531
+ consolidated_H = consolidated_W = self.image_size // 4
532
+ consolidated_mask_key = "pred_masks"
533
+
534
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
535
+ # will be added when rerunning the memory encoder after applying non-overlapping
536
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
537
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
538
+ consolidated_out = {
539
+ "maskmem_features": None,
540
+ "maskmem_pos_enc": None,
541
+ consolidated_mask_key: torch.full(
542
+ size=(batch_size, 1, consolidated_H, consolidated_W),
543
+ fill_value=NO_OBJ_SCORE,
544
+ dtype=torch.float32,
545
+ device=inference_state["storage_device"],
546
+ ),
547
+ "obj_ptr": torch.full(
548
+ size=(batch_size, self.hidden_dim),
549
+ fill_value=NO_OBJ_SCORE,
550
+ dtype=torch.float32,
551
+ device=inference_state["device"],
552
+ ),
553
+ }
554
+ empty_mask_ptr = None
555
+ for obj_idx in range(batch_size):
556
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
557
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
558
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
559
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
560
+ # we fall back and look up its previous output in "output_dict_per_obj".
561
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
562
+ # "output_dict_per_obj" to find a previous output for this object.
563
+ if out is None:
564
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
565
+ if out is None:
566
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
567
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
568
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
569
+ # placeholder above) and set its object pointer to be a dummy pointer.
570
+ if out is None:
571
+ # Fill in dummy object pointers for those objects without any inputs or
572
+ # tracking outcomes on this frame (only do it under `run_mem_encoder=True`,
573
+ # i.e. when we need to build the memory for tracking).
574
+ if run_mem_encoder:
575
+ if empty_mask_ptr is None:
576
+ empty_mask_ptr = self._get_empty_mask_ptr(
577
+ inference_state, frame_idx
578
+ )
579
+ # fill object pointer with a dummy pointer (based on an empty mask)
580
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr
581
+ continue
582
+ # Add the temporary object output mask to consolidated output mask
583
+ obj_mask = out["pred_masks"]
584
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
585
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
586
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
587
+ else:
588
+ # Resize first if temporary object mask has a different resolution
589
+ resized_obj_mask = torch.nn.functional.interpolate(
590
+ obj_mask,
591
+ size=consolidated_pred_masks.shape[-2:],
592
+ mode="bilinear",
593
+ align_corners=False,
594
+ )
595
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
596
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"]
597
+
598
+ # Optionally, apply non-overlapping constraints on the consolidated scores
599
+ # and rerun the memory encoder
600
+ if run_mem_encoder:
601
+ device = inference_state["device"]
602
+ high_res_masks = torch.nn.functional.interpolate(
603
+ consolidated_out["pred_masks"].to(device, non_blocking=True),
604
+ size=(self.image_size, self.image_size),
605
+ mode="bilinear",
606
+ align_corners=False,
607
+ )
608
+ if self.non_overlap_masks_for_mem_enc:
609
+ high_res_masks = self._apply_non_overlapping_constraints(high_res_masks)
610
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
611
+ inference_state=inference_state,
612
+ frame_idx=frame_idx,
613
+ batch_size=batch_size,
614
+ high_res_masks=high_res_masks,
615
+ is_mask_from_pts=True, # these frames are what the user interacted with
616
+ )
617
+ consolidated_out["maskmem_features"] = maskmem_features
618
+ consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc
619
+
620
+ return consolidated_out
621
+
622
+ def _get_empty_mask_ptr(self, inference_state, frame_idx):
623
+ """Get a dummy object pointer based on an empty mask on the current frame."""
624
+ # A dummy (empty) mask with a single object
625
+ batch_size = 1
626
+ mask_inputs = torch.zeros(
627
+ (batch_size, 1, self.image_size, self.image_size),
628
+ dtype=torch.float32,
629
+ device=inference_state["device"],
630
+ )
631
+
632
+ # Retrieve correct image features
633
+ (
634
+ _,
635
+ _,
636
+ current_vision_feats,
637
+ current_vision_pos_embeds,
638
+ feat_sizes,
639
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
640
+
641
+ # Feed the empty mask and image feature above to get a dummy object pointer
642
+ current_out = self.track_step(
643
+ frame_idx=frame_idx,
644
+ is_init_cond_frame=True,
645
+ current_vision_feats=current_vision_feats,
646
+ current_vision_pos_embeds=current_vision_pos_embeds,
647
+ feat_sizes=feat_sizes,
648
+ point_inputs=None,
649
+ mask_inputs=mask_inputs,
650
+ output_dict={},
651
+ num_frames=inference_state["num_frames"],
652
+ track_in_reverse=False,
653
+ run_mem_encoder=False,
654
+ prev_sam_mask_logits=None,
655
+ )
656
+ return current_out["obj_ptr"]
657
+
658
+ @torch.inference_mode()
659
+ def propagate_in_video_preflight(self, inference_state):
660
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
661
+ # Tracking has started and we don't allow adding new objects until session is reset.
662
+ inference_state["tracking_has_started"] = True
663
+ batch_size = self._get_obj_num(inference_state)
664
+
665
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
666
+ # add them into "output_dict".
667
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
668
+ output_dict = inference_state["output_dict"]
669
+ # "consolidated_frame_inds" contains indices of those frames where consolidated
670
+ # temporary outputs have been added (either in this call or any previous calls
671
+ # to `propagate_in_video_preflight`).
672
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
673
+ for is_cond in [False, True]:
674
+ # Separately consolidate conditioning and non-conditioning temp outptus
675
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
676
+ # Find all the frames that contain temporary outputs for any objects
677
+ # (these should be the frames that have just received clicks for mask inputs
678
+ # via `add_new_points` or `add_new_mask`)
679
+ temp_frame_inds = set()
680
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
681
+ temp_frame_inds.update(obj_temp_output_dict[storage_key].keys())
682
+ consolidated_frame_inds[storage_key].update(temp_frame_inds)
683
+ # consolidate the temprary output across all objects on this frame
684
+ for frame_idx in temp_frame_inds:
685
+ consolidated_out = self._consolidate_temp_output_across_obj(
686
+ inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True
687
+ )
688
+ # merge them into "output_dict" and also create per-object slices
689
+ output_dict[storage_key][frame_idx] = consolidated_out
690
+ self._add_output_per_object(
691
+ inference_state, frame_idx, consolidated_out, storage_key
692
+ )
693
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
694
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
695
+ )
696
+ if clear_non_cond_mem:
697
+ # clear non-conditioning memory of the surrounding frames
698
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
699
+
700
+ # clear temporary outputs in `temp_output_dict_per_obj`
701
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
702
+ obj_temp_output_dict[storage_key].clear()
703
+
704
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
705
+ # output on the same frame in "non_cond_frame_outputs"
706
+ for frame_idx in output_dict["cond_frame_outputs"]:
707
+ output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
708
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
709
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
710
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
711
+ for frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
712
+ assert frame_idx in output_dict["cond_frame_outputs"]
713
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
714
+
715
+ # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames
716
+ # with either points or mask inputs (which should be true under a correct workflow).
717
+ all_consolidated_frame_inds = (
718
+ consolidated_frame_inds["cond_frame_outputs"]
719
+ | consolidated_frame_inds["non_cond_frame_outputs"]
720
+ )
721
+ input_frames_inds = set()
722
+ for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values():
723
+ input_frames_inds.update(point_inputs_per_frame.keys())
724
+ for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values():
725
+ input_frames_inds.update(mask_inputs_per_frame.keys())
726
+ assert all_consolidated_frame_inds == input_frames_inds
727
+
728
+ @torch.inference_mode()
729
+ def propagate_in_video(
730
+ self,
731
+ inference_state,
732
+ start_frame_idx=None,
733
+ max_frame_num_to_track=None,
734
+ reverse=False,
735
+ ):
736
+ """Propagate the input points across frames to track in the entire video."""
737
+ self.propagate_in_video_preflight(inference_state)
738
+
739
+ output_dict = inference_state["output_dict"]
740
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
741
+ obj_ids = inference_state["obj_ids"]
742
+ num_frames = inference_state["num_frames"]
743
+ batch_size = self._get_obj_num(inference_state)
744
+ if len(output_dict["cond_frame_outputs"]) == 0:
745
+ raise RuntimeError("No points are provided; please add points first")
746
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
747
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
748
+ )
749
+
750
+ # set start index, end index, and processing order
751
+ if start_frame_idx is None:
752
+ # default: start from the earliest frame with input points
753
+ start_frame_idx = min(output_dict["cond_frame_outputs"])
754
+ if max_frame_num_to_track is None:
755
+ # default: track all the frames in the video
756
+ max_frame_num_to_track = num_frames
757
+ if reverse:
758
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
759
+ if start_frame_idx > 0:
760
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
761
+ else:
762
+ processing_order = [] # skip reverse tracking if starting from frame 0
763
+ else:
764
+ end_frame_idx = min(
765
+ start_frame_idx + max_frame_num_to_track, num_frames - 1
766
+ )
767
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
768
+
769
+ for frame_idx in tqdm(processing_order, desc="propagate in video"):
770
+ # We skip those frames already in consolidated outputs (these are frames
771
+ # that received input clicks or mask). Note that we cannot directly run
772
+ # batched forward on them via `_run_single_frame_inference` because the
773
+ # number of clicks on each object might be different.
774
+ if frame_idx in consolidated_frame_inds["cond_frame_outputs"]: #prompts frames已经在add_new_points函数中输出mask了,所以不用再跑一遍了
775
+ storage_key = "cond_frame_outputs"
776
+ current_out = output_dict[storage_key][frame_idx]
777
+ pred_masks = current_out["pred_masks"]
778
+ if clear_non_cond_mem:
779
+ # clear non-conditioning memory of the surrounding frames
780
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
781
+ elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]:
782
+ storage_key = "non_cond_frame_outputs"
783
+ current_out = output_dict[storage_key][frame_idx]
784
+ pred_masks = current_out["pred_masks"]
785
+ else: # 对没有prompts的frame计算mask
786
+ storage_key = "non_cond_frame_outputs"
787
+ current_out, pred_masks = self._run_single_frame_inference(
788
+ inference_state=inference_state,
789
+ output_dict=output_dict,
790
+ frame_idx=frame_idx,
791
+ batch_size=batch_size,
792
+ is_init_cond_frame=False,
793
+ point_inputs=None,
794
+ mask_inputs=None,
795
+ reverse=reverse,
796
+ run_mem_encoder=True,
797
+ )
798
+ output_dict[storage_key][frame_idx] = current_out
799
+ # Create slices of per-object outputs for subsequent interaction with each
800
+ # individual object after tracking.
801
+ self._add_output_per_object(
802
+ inference_state, frame_idx, current_out, storage_key
803
+ )
804
+ inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse}
805
+
806
+ # Resize the output mask to the original video resolution (we directly use
807
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
808
+ _, video_res_masks = self._get_orig_video_res_output(
809
+ inference_state, pred_masks
810
+ )
811
+ yield frame_idx, obj_ids, video_res_masks
812
+
813
+ def _add_output_per_object(
814
+ self, inference_state, frame_idx, current_out, storage_key
815
+ ):
816
+ """
817
+ Split a multi-object output into per-object output slices and add them into
818
+ `output_dict_per_obj`. The resulting slices share the same tensor storage.
819
+ """
820
+ maskmem_features = current_out["maskmem_features"]
821
+ assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor)
822
+
823
+ maskmem_pos_enc = current_out["maskmem_pos_enc"]
824
+ assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list)
825
+
826
+ output_dict_per_obj = inference_state["output_dict_per_obj"]
827
+ for obj_idx, obj_output_dict in output_dict_per_obj.items():
828
+ obj_slice = slice(obj_idx, obj_idx + 1)
829
+ obj_out = {
830
+ "maskmem_features": None,
831
+ "maskmem_pos_enc": None,
832
+ "pred_masks": current_out["pred_masks"][obj_slice],
833
+ "obj_ptr": current_out["obj_ptr"][obj_slice],
834
+ }
835
+ if maskmem_features is not None:
836
+ obj_out["maskmem_features"] = maskmem_features[obj_slice]
837
+ if maskmem_pos_enc is not None:
838
+ obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc]
839
+ obj_output_dict[storage_key][frame_idx] = obj_out
840
+
841
+ @torch.inference_mode()
842
+ def reset_state(self, inference_state):
843
+ """Remove all input points or mask in all frames throughout the video."""
844
+ self._reset_tracking_results(inference_state)
845
+ # Remove all object ids
846
+ inference_state["obj_id_to_idx"].clear()
847
+ inference_state["obj_idx_to_id"].clear()
848
+ inference_state["obj_ids"].clear()
849
+ inference_state["point_inputs_per_obj"].clear()
850
+ inference_state["mask_inputs_per_obj"].clear()
851
+ inference_state["output_dict_per_obj"].clear()
852
+ inference_state["temp_output_dict_per_obj"].clear()
853
+
854
+ def _reset_tracking_results(self, inference_state):
855
+ """Reset all tracking inputs and results across the videos."""
856
+ for v in inference_state["point_inputs_per_obj"].values():
857
+ v.clear()
858
+ for v in inference_state["mask_inputs_per_obj"].values():
859
+ v.clear()
860
+ for v in inference_state["output_dict_per_obj"].values():
861
+ v["cond_frame_outputs"].clear()
862
+ v["non_cond_frame_outputs"].clear()
863
+ for v in inference_state["temp_output_dict_per_obj"].values():
864
+ v["cond_frame_outputs"].clear()
865
+ v["non_cond_frame_outputs"].clear()
866
+ inference_state["output_dict"]["cond_frame_outputs"].clear()
867
+ inference_state["output_dict"]["non_cond_frame_outputs"].clear()
868
+ inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear()
869
+ inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear()
870
+ inference_state["tracking_has_started"] = False
871
+ inference_state["frames_already_tracked"].clear()
872
+
873
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
874
+ """Compute the image features on a given frame."""
875
+ # Look up in the cache first
876
+ image, backbone_out = inference_state["cached_features"].get(
877
+ frame_idx, (None, None)
878
+ )
879
+ if backbone_out is None:
880
+ # Cache miss -- we will run inference on a single image
881
+ image = inference_state["images"][frame_idx].cuda().float().unsqueeze(0)
882
+ backbone_out = self.forward_image(image)
883
+ # Cache the most recent frame's feature (for repeated interactions with
884
+ # a frame; we can use an LRU cache for more frames in the future).
885
+ inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
886
+
887
+ # expand the features to have the same dimension as the number of objects
888
+ expanded_image = image.expand(batch_size, -1, -1, -1) # batch_size表示object的数量
889
+ expanded_backbone_out = {
890
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
891
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
892
+ }
893
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
894
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(
895
+ batch_size, -1, -1, -1
896
+ )
897
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
898
+ pos = pos.expand(batch_size, -1, -1, -1)
899
+ expanded_backbone_out["vision_pos_enc"][i] = pos
900
+
901
+ features = self._prepare_backbone_features(expanded_backbone_out)
902
+ features = (expanded_image,) + features # 加入一个元组中
903
+ return features
904
+
905
+ def _run_single_frame_inference(
906
+ self,
907
+ inference_state,
908
+ output_dict,
909
+ frame_idx,
910
+ batch_size,
911
+ is_init_cond_frame,
912
+ point_inputs,
913
+ mask_inputs,
914
+ reverse,
915
+ run_mem_encoder,
916
+ prev_sam_mask_logits=None,
917
+ ):
918
+ """Run tracking on a single frame based on current inputs and previous memory."""
919
+ # Retrieve correct image features
920
+ (
921
+ _,
922
+ _,
923
+ current_vision_feats,
924
+ current_vision_pos_embeds,
925
+ feat_sizes,
926
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size) # 运行 image encoder
927
+
928
+ # point and mask should not appear as input simultaneously on the same frame
929
+ assert point_inputs is None or mask_inputs is None
930
+ current_out = self.track_step(
931
+ frame_idx=frame_idx,
932
+ is_init_cond_frame=is_init_cond_frame,
933
+ current_vision_feats=current_vision_feats,
934
+ current_vision_pos_embeds=current_vision_pos_embeds,
935
+ feat_sizes=feat_sizes,
936
+ point_inputs=point_inputs,
937
+ mask_inputs=mask_inputs,
938
+ output_dict=output_dict,
939
+ num_frames=inference_state["num_frames"],
940
+ track_in_reverse=reverse,
941
+ run_mem_encoder=run_mem_encoder, # 针对当前frame的mask结果,运行memory encoder
942
+ prev_sam_mask_logits=prev_sam_mask_logits,
943
+ )
944
+
945
+ # optionally offload the output to CPU memory to save GPU space
946
+ storage_device = inference_state["storage_device"]
947
+ maskmem_features = current_out["maskmem_features"]
948
+ if maskmem_features is not None:
949
+ maskmem_features = maskmem_features.to(torch.bfloat16)
950
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
951
+ pred_masks_gpu = current_out["pred_masks"]
952
+ # potentially fill holes in the predicted masks
953
+ if self.fill_hole_area > 0:
954
+ pred_masks_gpu = fill_holes_in_mask_scores(
955
+ pred_masks_gpu, self.fill_hole_area
956
+ )
957
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
958
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
959
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
960
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
961
+ obj_ptr = current_out["obj_ptr"]
962
+ # make a compact version of this frame's output to reduce the state size
963
+ compact_current_out = {
964
+ "maskmem_features": maskmem_features,
965
+ "maskmem_pos_enc": maskmem_pos_enc,
966
+ "pred_masks": pred_masks,
967
+ "obj_ptr": obj_ptr,
968
+ }
969
+ return compact_current_out, pred_masks_gpu
970
+
971
+ def _run_memory_encoder(
972
+ self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts
973
+ ):
974
+ """
975
+ Run the memory encoder on `high_res_masks`. This is usually after applying
976
+ non-overlapping constraints to object scores. Since their scores changed, their
977
+ memory also need to be computed again with the memory encoder.
978
+ """
979
+ # Retrieve correct image features
980
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(
981
+ inference_state, frame_idx, batch_size
982
+ )
983
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
984
+ current_vision_feats=current_vision_feats,
985
+ feat_sizes=feat_sizes,
986
+ pred_masks_high_res=high_res_masks,
987
+ is_mask_from_pts=is_mask_from_pts,
988
+ )
989
+
990
+ # optionally offload the output to CPU memory to save GPU space
991
+ storage_device = inference_state["storage_device"]
992
+ maskmem_features = maskmem_features.to(torch.bfloat16)
993
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
994
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it,所有帧的memory embedding对应的位置编码都是一样的,所以只需要拷贝第一份即可
995
+ maskmem_pos_enc = self._get_maskmem_pos_enc(
996
+ inference_state, {"maskmem_pos_enc": maskmem_pos_enc}
997
+ )
998
+ return maskmem_features, maskmem_pos_enc
999
+
1000
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
1001
+ """
1002
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
1003
+ a constant in the inference session to reduce session storage size.
1004
+ """
1005
+ model_constants = inference_state["constants"]
1006
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
1007
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
1008
+ if out_maskmem_pos_enc is not None:
1009
+ if "maskmem_pos_enc" not in model_constants:
1010
+ assert isinstance(out_maskmem_pos_enc, list)
1011
+ # only take the slice for one object, since it's same across objects
1012
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
1013
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
1014
+ else:
1015
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
1016
+ # expand the cached maskmem_pos_enc to the actual batch size
1017
+ batch_size = out_maskmem_pos_enc[0].size(0)
1018
+ expanded_maskmem_pos_enc = [
1019
+ x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc
1020
+ ]
1021
+ else:
1022
+ expanded_maskmem_pos_enc = None
1023
+ return expanded_maskmem_pos_enc
1024
+
1025
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
1026
+ """
1027
+ Remove the non-conditioning memory around the input frame. When users provide
1028
+ correction clicks, the surrounding frames' non-conditioning memories can still
1029
+ contain outdated object appearance information and could confuse the model.
1030
+
1031
+ This method clears those non-conditioning memories surrounding the interacted
1032
+ frame to avoid giving the model both old and new information about the object.
1033
+ """
1034
+ r = self.memory_temporal_stride_for_eval
1035
+ frame_idx_begin = frame_idx - r * self.num_maskmem
1036
+ frame_idx_end = frame_idx + r * self.num_maskmem
1037
+ output_dict = inference_state["output_dict"]
1038
+ non_cond_frame_outputs = output_dict["non_cond_frame_outputs"]
1039
+ for t in range(frame_idx_begin, frame_idx_end + 1):
1040
+ non_cond_frame_outputs.pop(t, None)
1041
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
1042
+ obj_output_dict["non_cond_frame_outputs"].pop(t, None)
SAM2/sam2/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
SAM2/sam2/utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
SAM2/sam2/utils/__pycache__/amg.cpython-310.pyc ADDED
Binary file (12.1 kB). View file
 
SAM2/sam2/utils/__pycache__/misc.cpython-310.pyc ADDED
Binary file (7.38 kB). View file
 
SAM2/sam2/utils/__pycache__/transforms.cpython-310.pyc ADDED
Binary file (3.58 kB). View file