Refactor (#5)
Browse files* refactor loading script
* update test
* update CI
* update
* update
* update
* update
* fix for CI
* update README.md
- .github/workflows/ci.yaml +3 -3
- MSCOCO.py +439 -332
- README.md +273 -0
- tests/MSCOCO_test.py +16 -0
.github/workflows/ci.yaml
CHANGED
@@ -44,6 +44,6 @@ jobs:
|
|
44 |
--no-site-packages \
|
45 |
--cache-dir=/dev/null
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
44 |
--no-site-packages \
|
45 |
--cache-dir=/dev/null
|
46 |
|
47 |
+
- name: Run tests
|
48 |
+
run: |
|
49 |
+
poetry run pytest --color=yes -rf
|
MSCOCO.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import json
|
2 |
import logging
|
3 |
import os
|
@@ -89,6 +90,104 @@ _URLS = {
|
|
89 |
},
|
90 |
}
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
@dataclass
|
94 |
class AnnotationInfo(object):
|
@@ -250,21 +349,21 @@ class InstancesAnnotationData(AnnotationData):
|
|
250 |
image_data = images[image_id]
|
251 |
iscrowd = bool(json_dict["iscrowd"])
|
252 |
|
253 |
-
|
254 |
-
|
255 |
segmentation=segmentation,
|
256 |
iscrowd=iscrowd,
|
257 |
height=image_data.height,
|
258 |
width=image_data.width,
|
259 |
)
|
260 |
-
|
261 |
-
|
262 |
-
segmentation_mask = cls.compress_rle(
|
263 |
segmentation=segmentation,
|
264 |
iscrowd=iscrowd,
|
265 |
height=image_data.height,
|
266 |
width=image_data.width,
|
267 |
)
|
|
|
268 |
return cls(
|
269 |
#
|
270 |
# for AnnotationData
|
@@ -327,22 +426,21 @@ class PersonKeypointsAnnotationData(InstancesAnnotationData):
|
|
327 |
image_data = images[image_id]
|
328 |
iscrowd = bool(json_dict["iscrowd"])
|
329 |
|
330 |
-
|
331 |
-
|
332 |
segmentation=segmentation,
|
333 |
iscrowd=iscrowd,
|
334 |
height=image_data.height,
|
335 |
width=image_data.width,
|
336 |
)
|
337 |
-
|
338 |
-
|
339 |
-
segmentation_mask = cls.compress_rle(
|
340 |
segmentation=segmentation,
|
341 |
iscrowd=iscrowd,
|
342 |
height=image_data.height,
|
343 |
width=image_data.width,
|
344 |
)
|
345 |
-
|
346 |
flatten_keypoints = json_dict["keypoints"]
|
347 |
num_keypoints = json_dict["num_keypoints"]
|
348 |
keypoints = cls.get_person_keypoints(flatten_keypoints, num_keypoints)
|
@@ -397,32 +495,6 @@ class CaptionExample(BaseExample):
|
|
397 |
annotations: List[CaptionAnnotationDict]
|
398 |
|
399 |
|
400 |
-
def generate_captions_examples(
|
401 |
-
image_dir: str,
|
402 |
-
images: Dict[ImageId, ImageData],
|
403 |
-
annotations: Dict[ImageId, List[CaptionsAnnotationData]],
|
404 |
-
licenses: Dict[LicenseId, LicenseData],
|
405 |
-
) -> Iterator[Tuple[int, CaptionExample]]:
|
406 |
-
for idx, image_id in enumerate(images.keys()):
|
407 |
-
image_data = images[image_id]
|
408 |
-
image_anns = annotations[image_id]
|
409 |
-
|
410 |
-
assert len(image_anns) > 0
|
411 |
-
|
412 |
-
image = _load_image(
|
413 |
-
image_path=os.path.join(image_dir, image_data.file_name),
|
414 |
-
)
|
415 |
-
example = asdict(image_data)
|
416 |
-
example["image"] = image
|
417 |
-
example["license"] = asdict(licenses[image_data.license_id])
|
418 |
-
|
419 |
-
example["annotations"] = []
|
420 |
-
for ann in image_anns:
|
421 |
-
example["annotations"].append(asdict(ann))
|
422 |
-
|
423 |
-
yield idx, example # type: ignore
|
424 |
-
|
425 |
-
|
426 |
class CategoryDict(TypedDict):
|
427 |
category_id: CategoryId
|
428 |
name: str
|
@@ -444,38 +516,6 @@ class InstanceExample(BaseExample):
|
|
444 |
annotations: List[InstanceAnnotationDict]
|
445 |
|
446 |
|
447 |
-
def generate_instances_examples(
|
448 |
-
image_dir: str,
|
449 |
-
images: Dict[ImageId, ImageData],
|
450 |
-
annotations: Dict[ImageId, List[InstancesAnnotationData]],
|
451 |
-
licenses: Dict[LicenseId, LicenseData],
|
452 |
-
categories: Dict[CategoryId, CategoryData],
|
453 |
-
) -> Iterator[Tuple[int, InstanceExample]]:
|
454 |
-
for idx, image_id in enumerate(images.keys()):
|
455 |
-
image_data = images[image_id]
|
456 |
-
image_anns = annotations[image_id]
|
457 |
-
|
458 |
-
if len(image_anns) < 1:
|
459 |
-
logger.warning(f"No annotation found for image id: {image_id}.")
|
460 |
-
continue
|
461 |
-
|
462 |
-
image = _load_image(
|
463 |
-
image_path=os.path.join(image_dir, image_data.file_name),
|
464 |
-
)
|
465 |
-
example = asdict(image_data)
|
466 |
-
example["image"] = image
|
467 |
-
example["license"] = asdict(licenses[image_data.license_id])
|
468 |
-
|
469 |
-
example["annotations"] = []
|
470 |
-
for ann in image_anns:
|
471 |
-
ann_dict = asdict(ann)
|
472 |
-
category = categories[ann.category_id]
|
473 |
-
ann_dict["category"] = asdict(category)
|
474 |
-
example["annotations"].append(ann_dict)
|
475 |
-
|
476 |
-
yield idx, example # type: ignore
|
477 |
-
|
478 |
-
|
479 |
class KeypointDict(TypedDict):
|
480 |
x: int
|
481 |
y: int
|
@@ -492,37 +532,300 @@ class PersonKeypointExample(BaseExample):
|
|
492 |
annotations: List[PersonKeypointAnnotationDict]
|
493 |
|
494 |
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
)
|
502 |
-
|
503 |
-
|
504 |
-
image_anns = annotations[image_id]
|
505 |
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
510 |
|
511 |
-
|
512 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
513 |
)
|
514 |
-
|
515 |
-
|
516 |
-
example["license"] = asdict(licenses[image_data.license_id])
|
517 |
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
524 |
|
525 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
526 |
|
527 |
|
528 |
class MsCocoConfig(ds.BuilderConfig):
|
@@ -558,6 +861,7 @@ class MsCocoConfig(ds.BuilderConfig):
|
|
558 |
|
559 |
self._year = year
|
560 |
self._task = coco_task
|
|
|
561 |
self.decode_rle = decode_rle
|
562 |
|
563 |
def _check_year(self, year: int) -> None:
|
@@ -568,7 +872,7 @@ class MsCocoConfig(ds.BuilderConfig):
|
|
568 |
assert task in self.TASKS, task
|
569 |
elif isinstance(task, list) or isinstance(task, tuple):
|
570 |
for t in task:
|
571 |
-
assert
|
572 |
else:
|
573 |
raise ValueError(f"Invalid task: {task}")
|
574 |
|
@@ -585,6 +889,16 @@ class MsCocoConfig(ds.BuilderConfig):
|
|
585 |
else:
|
586 |
raise ValueError(f"Invalid task: {self._task}")
|
587 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
588 |
@classmethod
|
589 |
def config_name(cls, year: int, task: Union[str, Sequence[str]]) -> str:
|
590 |
if isinstance(task, str):
|
@@ -596,178 +910,6 @@ class MsCocoConfig(ds.BuilderConfig):
|
|
596 |
raise ValueError(f"Invalid task: {task}")
|
597 |
|
598 |
|
599 |
-
def _load_image(image_path: str) -> PilImage:
|
600 |
-
return Image.open(image_path)
|
601 |
-
|
602 |
-
|
603 |
-
def _load_annotation_json(ann_file_path: str) -> JsonDict:
|
604 |
-
logger.info(f"Load annotation json from {ann_file_path}")
|
605 |
-
with open(ann_file_path, "r") as rf:
|
606 |
-
ann_json = json.load(rf)
|
607 |
-
return ann_json
|
608 |
-
|
609 |
-
|
610 |
-
def _load_licenses_data(license_dicts: List[JsonDict]) -> Dict[LicenseId, LicenseData]:
|
611 |
-
licenses = {}
|
612 |
-
for license_dict in license_dicts:
|
613 |
-
license_data = LicenseData.from_dict(license_dict)
|
614 |
-
licenses[license_data.license_id] = license_data
|
615 |
-
return licenses
|
616 |
-
|
617 |
-
|
618 |
-
def _load_images_data(
|
619 |
-
image_dicts: List[JsonDict],
|
620 |
-
tqdm_desc: str = "Load images",
|
621 |
-
) -> Dict[ImageId, ImageData]:
|
622 |
-
images = {}
|
623 |
-
for image_dict in tqdm(image_dicts, desc=tqdm_desc):
|
624 |
-
image_data = ImageData.from_dict(image_dict)
|
625 |
-
images[image_data.image_id] = image_data
|
626 |
-
return images
|
627 |
-
|
628 |
-
|
629 |
-
def _load_categories_data(
|
630 |
-
category_dicts: List[JsonDict],
|
631 |
-
tqdm_desc: str = "Load categories",
|
632 |
-
) -> Dict[CategoryId, CategoryData]:
|
633 |
-
categories = {}
|
634 |
-
for category_dict in tqdm(category_dicts, desc=tqdm_desc):
|
635 |
-
category_data = CategoryData.from_dict(category_dict)
|
636 |
-
categories[category_data.category_id] = category_data
|
637 |
-
return categories
|
638 |
-
|
639 |
-
|
640 |
-
def _load_captions_data(
|
641 |
-
ann_dicts: List[JsonDict],
|
642 |
-
tqdm_desc: str = "Load captions data",
|
643 |
-
) -> Dict[ImageId, List[CaptionsAnnotationData]]:
|
644 |
-
annotations = defaultdict(list)
|
645 |
-
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
646 |
-
ann_data = CaptionsAnnotationData.from_dict(ann_dict)
|
647 |
-
annotations[ann_data.image_id].append(ann_data)
|
648 |
-
return annotations
|
649 |
-
|
650 |
-
|
651 |
-
def _load_instances_data(
|
652 |
-
ann_dicts: List[JsonDict],
|
653 |
-
images: Dict[ImageId, ImageData],
|
654 |
-
decode_rle: bool,
|
655 |
-
tqdm_desc: str = "Load instances data",
|
656 |
-
) -> Dict[ImageId, List[InstancesAnnotationData]]:
|
657 |
-
annotations = defaultdict(list)
|
658 |
-
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
659 |
-
|
660 |
-
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
661 |
-
ann_data = InstancesAnnotationData.from_dict(
|
662 |
-
ann_dict, images=images, decode_rle=decode_rle
|
663 |
-
)
|
664 |
-
annotations[ann_data.image_id].append(ann_data)
|
665 |
-
|
666 |
-
return annotations
|
667 |
-
|
668 |
-
|
669 |
-
def _load_person_keypoints_data(
|
670 |
-
ann_dicts: List[JsonDict],
|
671 |
-
images: Dict[ImageId, ImageData],
|
672 |
-
decode_rle: bool,
|
673 |
-
tqdm_desc: str = "Load person keypoints data",
|
674 |
-
) -> Dict[ImageId, List[PersonKeypointsAnnotationData]]:
|
675 |
-
annotations = defaultdict(list)
|
676 |
-
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
677 |
-
|
678 |
-
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
679 |
-
ann_data = PersonKeypointsAnnotationData.from_dict(
|
680 |
-
ann_dict, images=images, decode_rle=decode_rle
|
681 |
-
)
|
682 |
-
annotations[ann_data.image_id].append(ann_data)
|
683 |
-
return annotations
|
684 |
-
|
685 |
-
|
686 |
-
def get_features_base_dict():
|
687 |
-
return {
|
688 |
-
"image_id": ds.Value("int64"),
|
689 |
-
"image": ds.Image(),
|
690 |
-
"file_name": ds.Value("string"),
|
691 |
-
"coco_url": ds.Value("string"),
|
692 |
-
"height": ds.Value("int32"),
|
693 |
-
"width": ds.Value("int32"),
|
694 |
-
"date_captured": ds.Value("string"),
|
695 |
-
"flickr_url": ds.Value("string"),
|
696 |
-
"license_id": ds.Value("int32"),
|
697 |
-
"license": {
|
698 |
-
"url": ds.Value("string"),
|
699 |
-
"license_id": ds.Value("int8"),
|
700 |
-
"name": ds.Value("string"),
|
701 |
-
},
|
702 |
-
}
|
703 |
-
|
704 |
-
|
705 |
-
def get_features_instance_dict(decode_rle: bool):
|
706 |
-
if decode_rle:
|
707 |
-
segmentation_feature = ds.Image()
|
708 |
-
else:
|
709 |
-
segmentation_feature = {
|
710 |
-
"counts": ds.Sequence(ds.Value("int64")),
|
711 |
-
"size": ds.Sequence(ds.Value("int32")),
|
712 |
-
}
|
713 |
-
return {
|
714 |
-
"annotation_id": ds.Value("int64"),
|
715 |
-
"image_id": ds.Value("int64"),
|
716 |
-
"segmentation": segmentation_feature,
|
717 |
-
"area": ds.Value("float32"),
|
718 |
-
"iscrowd": ds.Value("bool"),
|
719 |
-
"bbox": ds.Sequence(ds.Value("float32"), length=4),
|
720 |
-
"category_id": ds.Value("int32"),
|
721 |
-
"category": {
|
722 |
-
"category_id": ds.Value("int32"),
|
723 |
-
"name": ds.Value("string"),
|
724 |
-
"supercategory": ds.Value("string"),
|
725 |
-
},
|
726 |
-
}
|
727 |
-
|
728 |
-
|
729 |
-
def get_features_captions() -> ds.Features:
|
730 |
-
features_dict = get_features_base_dict()
|
731 |
-
annotations = ds.Sequence(
|
732 |
-
{
|
733 |
-
"annotation_id": ds.Value("int64"),
|
734 |
-
"image_id": ds.Value("int64"),
|
735 |
-
"caption": ds.Value("string"),
|
736 |
-
}
|
737 |
-
)
|
738 |
-
features_dict.update({"annotations": annotations})
|
739 |
-
|
740 |
-
return ds.Features(features_dict)
|
741 |
-
|
742 |
-
|
743 |
-
def get_features_instances(decode_rle: bool) -> ds.Features:
|
744 |
-
features_dict = get_features_base_dict()
|
745 |
-
annotations = ds.Sequence(get_features_instance_dict(decode_rle=decode_rle))
|
746 |
-
features_dict.update({"annotations": annotations})
|
747 |
-
return ds.Features(features_dict)
|
748 |
-
|
749 |
-
|
750 |
-
def get_features_person_keypoints(decode_rle: bool) -> ds.Features:
|
751 |
-
features_dict = get_features_base_dict()
|
752 |
-
features_instance_dict = get_features_instance_dict(decode_rle=decode_rle)
|
753 |
-
features_instance_dict.update(
|
754 |
-
{
|
755 |
-
"keypoints": ds.Sequence(
|
756 |
-
{
|
757 |
-
"state": ds.Value("string"),
|
758 |
-
"x": ds.Value("int32"),
|
759 |
-
"y": ds.Value("int32"),
|
760 |
-
"v": ds.Value("int32"),
|
761 |
-
}
|
762 |
-
),
|
763 |
-
"num_keypoints": ds.Value("int32"),
|
764 |
-
}
|
765 |
-
)
|
766 |
-
annotations = ds.Sequence(features_instance_dict)
|
767 |
-
features_dict.update({"annotations": annotations})
|
768 |
-
return ds.Features(features_dict)
|
769 |
-
|
770 |
-
|
771 |
def dataset_configs(year: int, version: ds.Version) -> List[MsCocoConfig]:
|
772 |
return [
|
773 |
MsCocoConfig(
|
@@ -785,16 +927,16 @@ def dataset_configs(year: int, version: ds.Version) -> List[MsCocoConfig]:
|
|
785 |
coco_task="person_keypoints",
|
786 |
version=version,
|
787 |
),
|
788 |
-
MsCocoConfig(
|
789 |
-
|
790 |
-
|
791 |
-
|
792 |
-
),
|
793 |
-
MsCocoConfig(
|
794 |
-
|
795 |
-
|
796 |
-
|
797 |
-
),
|
798 |
]
|
799 |
|
800 |
|
@@ -822,19 +964,8 @@ class MsCocoDataset(ds.GeneratorBasedBuilder):
|
|
822 |
return config.task
|
823 |
|
824 |
def _info(self) -> ds.DatasetInfo:
|
825 |
-
|
826 |
-
|
827 |
-
elif self.task == "instances":
|
828 |
-
features = get_features_instances(
|
829 |
-
decode_rle=self.config.decode_rle, # type: ignore
|
830 |
-
)
|
831 |
-
elif self.task == "person_keypoints":
|
832 |
-
features = get_features_person_keypoints(
|
833 |
-
decode_rle=self.config.decode_rle, # type: ignore
|
834 |
-
)
|
835 |
-
else:
|
836 |
-
raise ValueError(f"Invalid task: {self.task}")
|
837 |
-
|
838 |
return ds.DatasetInfo(
|
839 |
description=_DESCRIPTION,
|
840 |
citation=_CITATION,
|
@@ -884,57 +1015,33 @@ class MsCocoDataset(ds.GeneratorBasedBuilder):
|
|
884 |
ann_dir = os.path.join(base_annotation_dir, "annotations")
|
885 |
ann_file_path = os.path.join(ann_dir, f"{self.task}_{split}{self.year}.json")
|
886 |
|
887 |
-
|
|
|
|
|
888 |
|
889 |
# info = AnnotationInfo.from_dict(ann_json["info"])
|
890 |
-
licenses =
|
891 |
-
images =
|
892 |
|
893 |
category_dicts = ann_json.get("categories")
|
894 |
categories = (
|
895 |
-
|
896 |
if category_dicts is not None
|
897 |
else None
|
898 |
)
|
899 |
|
900 |
-
config: MsCocoConfig = self.config
|
901 |
-
|
902 |
-
|
903 |
-
annotations
|
904 |
-
ann_dicts=ann_json["annotations"],
|
905 |
-
),
|
906 |
-
image_dir=image_dir,
|
907 |
-
images=images,
|
908 |
-
licenses=licenses,
|
909 |
-
)
|
910 |
-
elif config.task == "instances":
|
911 |
-
assert categories is not None
|
912 |
-
yield from generate_instances_examples(
|
913 |
-
annotations=_load_instances_data(
|
914 |
-
images=images,
|
915 |
-
ann_dicts=ann_json["annotations"],
|
916 |
-
decode_rle=self.config.decode_rle, # type: ignore
|
917 |
-
),
|
918 |
-
categories=categories,
|
919 |
-
image_dir=image_dir,
|
920 |
-
images=images,
|
921 |
-
licenses=licenses,
|
922 |
-
)
|
923 |
-
elif config.task == "person_keypoints":
|
924 |
-
assert categories is not None
|
925 |
-
yield from generate_person_keypoints_examples(
|
926 |
-
annotations=_load_person_keypoints_data(
|
927 |
-
images=images,
|
928 |
-
ann_dicts=ann_json["annotations"],
|
929 |
-
decode_rle=self.config.decode_rle, # type: ignore
|
930 |
-
),
|
931 |
-
categories=categories,
|
932 |
-
image_dir=image_dir,
|
933 |
images=images,
|
934 |
-
|
935 |
-
)
|
936 |
-
|
937 |
-
|
|
|
|
|
|
|
938 |
|
939 |
def _generate_test_examples(self, test_image_info_path: str):
|
940 |
raise NotImplementedError
|
|
|
1 |
+
import abc
|
2 |
import json
|
3 |
import logging
|
4 |
import os
|
|
|
90 |
},
|
91 |
}
|
92 |
|
93 |
+
CATEGORIES: Final[List[str]] = [
|
94 |
+
"person",
|
95 |
+
"bicycle",
|
96 |
+
"car",
|
97 |
+
"motorcycle",
|
98 |
+
"airplane",
|
99 |
+
"bus",
|
100 |
+
"train",
|
101 |
+
"truck",
|
102 |
+
"boat",
|
103 |
+
"traffic light",
|
104 |
+
"fire hydrant",
|
105 |
+
"stop sign",
|
106 |
+
"parking meter",
|
107 |
+
"bench",
|
108 |
+
"bird",
|
109 |
+
"cat",
|
110 |
+
"dog",
|
111 |
+
"horse",
|
112 |
+
"sheep",
|
113 |
+
"cow",
|
114 |
+
"elephant",
|
115 |
+
"bear",
|
116 |
+
"zebra",
|
117 |
+
"giraffe",
|
118 |
+
"backpack",
|
119 |
+
"umbrella",
|
120 |
+
"handbag",
|
121 |
+
"tie",
|
122 |
+
"suitcase",
|
123 |
+
"frisbee",
|
124 |
+
"skis",
|
125 |
+
"snowboard",
|
126 |
+
"sports ball",
|
127 |
+
"kite",
|
128 |
+
"baseball bat",
|
129 |
+
"baseball glove",
|
130 |
+
"skateboard",
|
131 |
+
"surfboard",
|
132 |
+
"tennis racket",
|
133 |
+
"bottle",
|
134 |
+
"wine glass",
|
135 |
+
"cup",
|
136 |
+
"fork",
|
137 |
+
"knife",
|
138 |
+
"spoon",
|
139 |
+
"bowl",
|
140 |
+
"banana",
|
141 |
+
"apple",
|
142 |
+
"sandwich",
|
143 |
+
"orange",
|
144 |
+
"broccoli",
|
145 |
+
"carrot",
|
146 |
+
"hot dog",
|
147 |
+
"pizza",
|
148 |
+
"donut",
|
149 |
+
"cake",
|
150 |
+
"chair",
|
151 |
+
"couch",
|
152 |
+
"potted plant",
|
153 |
+
"bed",
|
154 |
+
"dining table",
|
155 |
+
"toilet",
|
156 |
+
"tv",
|
157 |
+
"laptop",
|
158 |
+
"mouse",
|
159 |
+
"remote",
|
160 |
+
"keyboard",
|
161 |
+
"cell phone",
|
162 |
+
"microwave",
|
163 |
+
"oven",
|
164 |
+
"toaster",
|
165 |
+
"sink",
|
166 |
+
"refrigerator",
|
167 |
+
"book",
|
168 |
+
"clock",
|
169 |
+
"vase",
|
170 |
+
"scissors",
|
171 |
+
"teddy bear",
|
172 |
+
"hair drier",
|
173 |
+
"toothbrush",
|
174 |
+
]
|
175 |
+
|
176 |
+
SUPER_CATEGORIES: Final[List[str]] = [
|
177 |
+
"person",
|
178 |
+
"vehicle",
|
179 |
+
"outdoor",
|
180 |
+
"animal",
|
181 |
+
"accessory",
|
182 |
+
"sports",
|
183 |
+
"kitchen",
|
184 |
+
"food",
|
185 |
+
"furniture",
|
186 |
+
"electronic",
|
187 |
+
"appliance",
|
188 |
+
"indoor",
|
189 |
+
]
|
190 |
+
|
191 |
|
192 |
@dataclass
|
193 |
class AnnotationInfo(object):
|
|
|
349 |
image_data = images[image_id]
|
350 |
iscrowd = bool(json_dict["iscrowd"])
|
351 |
|
352 |
+
segmentation_mask = (
|
353 |
+
cls.rle_segmentation_to_mask(
|
354 |
segmentation=segmentation,
|
355 |
iscrowd=iscrowd,
|
356 |
height=image_data.height,
|
357 |
width=image_data.width,
|
358 |
)
|
359 |
+
if decode_rle
|
360 |
+
else cls.compress_rle(
|
|
|
361 |
segmentation=segmentation,
|
362 |
iscrowd=iscrowd,
|
363 |
height=image_data.height,
|
364 |
width=image_data.width,
|
365 |
)
|
366 |
+
)
|
367 |
return cls(
|
368 |
#
|
369 |
# for AnnotationData
|
|
|
426 |
image_data = images[image_id]
|
427 |
iscrowd = bool(json_dict["iscrowd"])
|
428 |
|
429 |
+
segmentation_mask = (
|
430 |
+
cls.rle_segmentation_to_mask(
|
431 |
segmentation=segmentation,
|
432 |
iscrowd=iscrowd,
|
433 |
height=image_data.height,
|
434 |
width=image_data.width,
|
435 |
)
|
436 |
+
if decode_rle
|
437 |
+
else cls.compress_rle(
|
|
|
438 |
segmentation=segmentation,
|
439 |
iscrowd=iscrowd,
|
440 |
height=image_data.height,
|
441 |
width=image_data.width,
|
442 |
)
|
443 |
+
)
|
444 |
flatten_keypoints = json_dict["keypoints"]
|
445 |
num_keypoints = json_dict["num_keypoints"]
|
446 |
keypoints = cls.get_person_keypoints(flatten_keypoints, num_keypoints)
|
|
|
495 |
annotations: List[CaptionAnnotationDict]
|
496 |
|
497 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
498 |
class CategoryDict(TypedDict):
|
499 |
category_id: CategoryId
|
500 |
name: str
|
|
|
516 |
annotations: List[InstanceAnnotationDict]
|
517 |
|
518 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
519 |
class KeypointDict(TypedDict):
|
520 |
x: int
|
521 |
y: int
|
|
|
532 |
annotations: List[PersonKeypointAnnotationDict]
|
533 |
|
534 |
|
535 |
+
class MsCocoProcessor(object, metaclass=abc.ABCMeta):
|
536 |
+
def load_image(self, image_path: str) -> PilImage:
|
537 |
+
return Image.open(image_path)
|
538 |
+
|
539 |
+
def load_annotation_json(self, ann_file_path: str) -> JsonDict:
|
540 |
+
logger.info(f"Load annotation json from {ann_file_path}")
|
541 |
+
with open(ann_file_path, "r") as rf:
|
542 |
+
ann_json = json.load(rf)
|
543 |
+
return ann_json
|
|
|
544 |
|
545 |
+
def load_licenses_data(
|
546 |
+
self, license_dicts: List[JsonDict]
|
547 |
+
) -> Dict[LicenseId, LicenseData]:
|
548 |
+
licenses = {}
|
549 |
+
for license_dict in license_dicts:
|
550 |
+
license_data = LicenseData.from_dict(license_dict)
|
551 |
+
licenses[license_data.license_id] = license_data
|
552 |
+
return licenses
|
553 |
+
|
554 |
+
def load_images_data(
|
555 |
+
self,
|
556 |
+
image_dicts: List[JsonDict],
|
557 |
+
tqdm_desc: str = "Load images",
|
558 |
+
) -> Dict[ImageId, ImageData]:
|
559 |
+
images = {}
|
560 |
+
for image_dict in tqdm(image_dicts, desc=tqdm_desc):
|
561 |
+
image_data = ImageData.from_dict(image_dict)
|
562 |
+
images[image_data.image_id] = image_data
|
563 |
+
return images
|
564 |
+
|
565 |
+
def load_categories_data(
|
566 |
+
self,
|
567 |
+
category_dicts: List[JsonDict],
|
568 |
+
tqdm_desc: str = "Load categories",
|
569 |
+
) -> Dict[CategoryId, CategoryData]:
|
570 |
+
categories = {}
|
571 |
+
for category_dict in tqdm(category_dicts, desc=tqdm_desc):
|
572 |
+
category_data = CategoryData.from_dict(category_dict)
|
573 |
+
categories[category_data.category_id] = category_data
|
574 |
+
return categories
|
575 |
+
|
576 |
+
def get_features_base_dict(self):
|
577 |
+
return {
|
578 |
+
"image_id": ds.Value("int64"),
|
579 |
+
"image": ds.Image(),
|
580 |
+
"file_name": ds.Value("string"),
|
581 |
+
"coco_url": ds.Value("string"),
|
582 |
+
"height": ds.Value("int32"),
|
583 |
+
"width": ds.Value("int32"),
|
584 |
+
"date_captured": ds.Value("string"),
|
585 |
+
"flickr_url": ds.Value("string"),
|
586 |
+
"license_id": ds.Value("int32"),
|
587 |
+
"license": {
|
588 |
+
"url": ds.Value("string"),
|
589 |
+
"license_id": ds.Value("int8"),
|
590 |
+
"name": ds.Value("string"),
|
591 |
+
},
|
592 |
+
}
|
593 |
+
|
594 |
+
@abc.abstractmethod
|
595 |
+
def get_features(self, *args, **kwargs) -> ds.Features:
|
596 |
+
raise NotImplementedError
|
597 |
+
|
598 |
+
@abc.abstractmethod
|
599 |
+
def load_data(
|
600 |
+
self, ann_dicts: List[JsonDict], tqdm_desc: Optional[str] = None, **kwargs
|
601 |
+
):
|
602 |
+
raise NotImplementedError
|
603 |
|
604 |
+
@abc.abstractmethod
|
605 |
+
def generate_examples(
|
606 |
+
self,
|
607 |
+
image_dir: str,
|
608 |
+
images: Dict[ImageId, ImageData],
|
609 |
+
annotations: Dict[ImageId, List[CaptionsAnnotationData]],
|
610 |
+
licenses: Dict[LicenseId, LicenseData],
|
611 |
+
**kwargs,
|
612 |
+
):
|
613 |
+
raise NotImplementedError
|
614 |
+
|
615 |
+
|
616 |
+
class CaptionsProcessor(MsCocoProcessor):
|
617 |
+
def get_features(self, *args, **kwargs) -> ds.Features:
|
618 |
+
features_dict = self.get_features_base_dict()
|
619 |
+
annotations = ds.Sequence(
|
620 |
+
{
|
621 |
+
"annotation_id": ds.Value("int64"),
|
622 |
+
"image_id": ds.Value("int64"),
|
623 |
+
"caption": ds.Value("string"),
|
624 |
+
}
|
625 |
)
|
626 |
+
features_dict.update({"annotations": annotations})
|
627 |
+
return ds.Features(features_dict)
|
|
|
628 |
|
629 |
+
def load_data(
|
630 |
+
self,
|
631 |
+
ann_dicts: List[JsonDict],
|
632 |
+
tqdm_desc: str = "Load captions data",
|
633 |
+
**kwargs,
|
634 |
+
) -> Dict[ImageId, List[CaptionsAnnotationData]]:
|
635 |
+
annotations = defaultdict(list)
|
636 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
637 |
+
ann_data = CaptionsAnnotationData.from_dict(ann_dict)
|
638 |
+
annotations[ann_data.image_id].append(ann_data)
|
639 |
+
return annotations
|
640 |
+
|
641 |
+
def generate_examples(
|
642 |
+
self,
|
643 |
+
image_dir: str,
|
644 |
+
images: Dict[ImageId, ImageData],
|
645 |
+
annotations: Dict[ImageId, List[CaptionsAnnotationData]],
|
646 |
+
licenses: Dict[LicenseId, LicenseData],
|
647 |
+
**kwargs,
|
648 |
+
) -> Iterator[Tuple[int, CaptionExample]]:
|
649 |
+
for idx, image_id in enumerate(images.keys()):
|
650 |
+
image_data = images[image_id]
|
651 |
+
image_anns = annotations[image_id]
|
652 |
+
|
653 |
+
assert len(image_anns) > 0
|
654 |
+
|
655 |
+
image = self.load_image(
|
656 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
657 |
+
)
|
658 |
+
example = asdict(image_data)
|
659 |
+
example["image"] = image
|
660 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
661 |
+
|
662 |
+
example["annotations"] = []
|
663 |
+
for ann in image_anns:
|
664 |
+
example["annotations"].append(asdict(ann))
|
665 |
+
|
666 |
+
yield idx, example # type: ignore
|
667 |
+
|
668 |
+
|
669 |
+
class InstancesProcessor(MsCocoProcessor):
|
670 |
+
def get_features_instance_dict(self, decode_rle: bool):
|
671 |
+
segmentation_feature = (
|
672 |
+
ds.Image()
|
673 |
+
if decode_rle
|
674 |
+
else {
|
675 |
+
"counts": ds.Sequence(ds.Value("int64")),
|
676 |
+
"size": ds.Sequence(ds.Value("int32")),
|
677 |
+
}
|
678 |
+
)
|
679 |
+
return {
|
680 |
+
"annotation_id": ds.Value("int64"),
|
681 |
+
"image_id": ds.Value("int64"),
|
682 |
+
"segmentation": segmentation_feature,
|
683 |
+
"area": ds.Value("float32"),
|
684 |
+
"iscrowd": ds.Value("bool"),
|
685 |
+
"bbox": ds.Sequence(ds.Value("float32"), length=4),
|
686 |
+
"category_id": ds.Value("int32"),
|
687 |
+
"category": {
|
688 |
+
"category_id": ds.Value("int32"),
|
689 |
+
"name": ds.ClassLabel(
|
690 |
+
num_classes=len(CATEGORIES),
|
691 |
+
names=CATEGORIES,
|
692 |
+
),
|
693 |
+
"supercategory": ds.ClassLabel(
|
694 |
+
num_classes=len(SUPER_CATEGORIES),
|
695 |
+
names=SUPER_CATEGORIES,
|
696 |
+
),
|
697 |
+
},
|
698 |
+
}
|
699 |
|
700 |
+
def get_features(self, decode_rle: bool) -> ds.Features:
|
701 |
+
features_dict = self.get_features_base_dict()
|
702 |
+
annotations = ds.Sequence(
|
703 |
+
self.get_features_instance_dict(decode_rle=decode_rle)
|
704 |
+
)
|
705 |
+
features_dict.update({"annotations": annotations})
|
706 |
+
return ds.Features(features_dict)
|
707 |
+
|
708 |
+
def load_data( # type: ignore[override]
|
709 |
+
self,
|
710 |
+
ann_dicts: List[JsonDict],
|
711 |
+
images: Dict[ImageId, ImageData],
|
712 |
+
decode_rle: bool,
|
713 |
+
tqdm_desc: str = "Load instances data",
|
714 |
+
) -> Dict[ImageId, List[InstancesAnnotationData]]:
|
715 |
+
annotations = defaultdict(list)
|
716 |
+
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
717 |
+
|
718 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
719 |
+
ann_data = InstancesAnnotationData.from_dict(
|
720 |
+
ann_dict, images=images, decode_rle=decode_rle
|
721 |
+
)
|
722 |
+
annotations[ann_data.image_id].append(ann_data)
|
723 |
+
|
724 |
+
return annotations
|
725 |
+
|
726 |
+
def generate_examples( # type: ignore[override]
|
727 |
+
self,
|
728 |
+
image_dir: str,
|
729 |
+
images: Dict[ImageId, ImageData],
|
730 |
+
annotations: Dict[ImageId, List[InstancesAnnotationData]],
|
731 |
+
licenses: Dict[LicenseId, LicenseData],
|
732 |
+
categories: Dict[CategoryId, CategoryData],
|
733 |
+
) -> Iterator[Tuple[int, InstanceExample]]:
|
734 |
+
for idx, image_id in enumerate(images.keys()):
|
735 |
+
image_data = images[image_id]
|
736 |
+
image_anns = annotations[image_id]
|
737 |
+
|
738 |
+
if len(image_anns) < 1:
|
739 |
+
logger.warning(f"No annotation found for image id: {image_id}.")
|
740 |
+
continue
|
741 |
+
|
742 |
+
image = self.load_image(
|
743 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
744 |
+
)
|
745 |
+
example = asdict(image_data)
|
746 |
+
example["image"] = image
|
747 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
748 |
+
|
749 |
+
example["annotations"] = []
|
750 |
+
for ann in image_anns:
|
751 |
+
ann_dict = asdict(ann)
|
752 |
+
category = categories[ann.category_id]
|
753 |
+
ann_dict["category"] = asdict(category)
|
754 |
+
example["annotations"].append(ann_dict)
|
755 |
+
|
756 |
+
yield idx, example # type: ignore
|
757 |
+
|
758 |
+
|
759 |
+
class PersonKeypointsProcessor(InstancesProcessor):
|
760 |
+
def get_features(self, decode_rle: bool) -> ds.Features:
|
761 |
+
features_dict = self.get_features_base_dict()
|
762 |
+
features_instance_dict = self.get_features_instance_dict(decode_rle=decode_rle)
|
763 |
+
features_instance_dict.update(
|
764 |
+
{
|
765 |
+
"keypoints": ds.Sequence(
|
766 |
+
{
|
767 |
+
"state": ds.Value("string"),
|
768 |
+
"x": ds.Value("int32"),
|
769 |
+
"y": ds.Value("int32"),
|
770 |
+
"v": ds.Value("int32"),
|
771 |
+
}
|
772 |
+
),
|
773 |
+
"num_keypoints": ds.Value("int32"),
|
774 |
+
}
|
775 |
+
)
|
776 |
+
annotations = ds.Sequence(features_instance_dict)
|
777 |
+
features_dict.update({"annotations": annotations})
|
778 |
+
return ds.Features(features_dict)
|
779 |
+
|
780 |
+
def load_data( # type: ignore[override]
|
781 |
+
self,
|
782 |
+
ann_dicts: List[JsonDict],
|
783 |
+
images: Dict[ImageId, ImageData],
|
784 |
+
decode_rle: bool,
|
785 |
+
tqdm_desc: str = "Load person keypoints data",
|
786 |
+
) -> Dict[ImageId, List[PersonKeypointsAnnotationData]]:
|
787 |
+
annotations = defaultdict(list)
|
788 |
+
ann_dicts = sorted(ann_dicts, key=lambda d: d["image_id"])
|
789 |
+
|
790 |
+
for ann_dict in tqdm(ann_dicts, desc=tqdm_desc):
|
791 |
+
ann_data = PersonKeypointsAnnotationData.from_dict(
|
792 |
+
ann_dict, images=images, decode_rle=decode_rle
|
793 |
+
)
|
794 |
+
annotations[ann_data.image_id].append(ann_data)
|
795 |
+
return annotations
|
796 |
+
|
797 |
+
def generate_examples( # type: ignore[override]
|
798 |
+
self,
|
799 |
+
image_dir: str,
|
800 |
+
images: Dict[ImageId, ImageData],
|
801 |
+
annotations: Dict[ImageId, List[PersonKeypointsAnnotationData]],
|
802 |
+
licenses: Dict[LicenseId, LicenseData],
|
803 |
+
categories: Dict[CategoryId, CategoryData],
|
804 |
+
) -> Iterator[Tuple[int, PersonKeypointExample]]:
|
805 |
+
for idx, image_id in enumerate(images.keys()):
|
806 |
+
image_data = images[image_id]
|
807 |
+
image_anns = annotations[image_id]
|
808 |
+
|
809 |
+
if len(image_anns) < 1:
|
810 |
+
# If there are no persons in the image,
|
811 |
+
# no keypoint annotations will be assigned.
|
812 |
+
continue
|
813 |
+
|
814 |
+
image = self.load_image(
|
815 |
+
image_path=os.path.join(image_dir, image_data.file_name),
|
816 |
+
)
|
817 |
+
example = asdict(image_data)
|
818 |
+
example["image"] = image
|
819 |
+
example["license"] = asdict(licenses[image_data.license_id])
|
820 |
+
|
821 |
+
example["annotations"] = []
|
822 |
+
for ann in image_anns:
|
823 |
+
ann_dict = asdict(ann)
|
824 |
+
category = categories[ann.category_id]
|
825 |
+
ann_dict["category"] = asdict(category)
|
826 |
+
example["annotations"].append(ann_dict)
|
827 |
+
|
828 |
+
yield idx, example # type: ignore
|
829 |
|
830 |
|
831 |
class MsCocoConfig(ds.BuilderConfig):
|
|
|
861 |
|
862 |
self._year = year
|
863 |
self._task = coco_task
|
864 |
+
self.processor = self.get_processor()
|
865 |
self.decode_rle = decode_rle
|
866 |
|
867 |
def _check_year(self, year: int) -> None:
|
|
|
872 |
assert task in self.TASKS, task
|
873 |
elif isinstance(task, list) or isinstance(task, tuple):
|
874 |
for t in task:
|
875 |
+
assert t, task
|
876 |
else:
|
877 |
raise ValueError(f"Invalid task: {task}")
|
878 |
|
|
|
889 |
else:
|
890 |
raise ValueError(f"Invalid task: {self._task}")
|
891 |
|
892 |
+
def get_processor(self) -> MsCocoProcessor:
|
893 |
+
if self.task == "captions":
|
894 |
+
return CaptionsProcessor()
|
895 |
+
elif self.task == "instances":
|
896 |
+
return InstancesProcessor()
|
897 |
+
elif self.task == "person_keypoints":
|
898 |
+
return PersonKeypointsProcessor()
|
899 |
+
else:
|
900 |
+
raise ValueError(f"Invalid task: {self.task}")
|
901 |
+
|
902 |
@classmethod
|
903 |
def config_name(cls, year: int, task: Union[str, Sequence[str]]) -> str:
|
904 |
if isinstance(task, str):
|
|
|
910 |
raise ValueError(f"Invalid task: {task}")
|
911 |
|
912 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
913 |
def dataset_configs(year: int, version: ds.Version) -> List[MsCocoConfig]:
|
914 |
return [
|
915 |
MsCocoConfig(
|
|
|
927 |
coco_task="person_keypoints",
|
928 |
version=version,
|
929 |
),
|
930 |
+
# MsCocoConfig(
|
931 |
+
# year=year,
|
932 |
+
# coco_task=("captions", "instances"),
|
933 |
+
# version=version,
|
934 |
+
# ),
|
935 |
+
# MsCocoConfig(
|
936 |
+
# year=year,
|
937 |
+
# coco_task=("captions", "person_keypoints"),
|
938 |
+
# version=version,
|
939 |
+
# ),
|
940 |
]
|
941 |
|
942 |
|
|
|
964 |
return config.task
|
965 |
|
966 |
def _info(self) -> ds.DatasetInfo:
|
967 |
+
processor: MsCocoProcessor = self.config.processor
|
968 |
+
features = processor.get_features(decode_rle=self.config.decode_rle)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
969 |
return ds.DatasetInfo(
|
970 |
description=_DESCRIPTION,
|
971 |
citation=_CITATION,
|
|
|
1015 |
ann_dir = os.path.join(base_annotation_dir, "annotations")
|
1016 |
ann_file_path = os.path.join(ann_dir, f"{self.task}_{split}{self.year}.json")
|
1017 |
|
1018 |
+
processor: MsCocoProcessor = self.config.processor
|
1019 |
+
|
1020 |
+
ann_json = processor.load_annotation_json(ann_file_path=ann_file_path)
|
1021 |
|
1022 |
# info = AnnotationInfo.from_dict(ann_json["info"])
|
1023 |
+
licenses = processor.load_licenses_data(license_dicts=ann_json["licenses"])
|
1024 |
+
images = processor.load_images_data(image_dicts=ann_json["images"])
|
1025 |
|
1026 |
category_dicts = ann_json.get("categories")
|
1027 |
categories = (
|
1028 |
+
processor.load_categories_data(category_dicts=category_dicts)
|
1029 |
if category_dicts is not None
|
1030 |
else None
|
1031 |
)
|
1032 |
|
1033 |
+
config: MsCocoConfig = self.config
|
1034 |
+
yield from processor.generate_examples(
|
1035 |
+
annotations=processor.load_data(
|
1036 |
+
ann_dicts=ann_json["annotations"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1037 |
images=images,
|
1038 |
+
decode_rle=config.decode_rle,
|
1039 |
+
),
|
1040 |
+
categories=categories,
|
1041 |
+
image_dir=image_dir,
|
1042 |
+
images=images,
|
1043 |
+
licenses=licenses,
|
1044 |
+
)
|
1045 |
|
1046 |
def _generate_test_examples(self, test_image_info_path: str):
|
1047 |
raise NotImplementedError
|
README.md
CHANGED
@@ -1,3 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
# Dataset Card for MSCOCO
|
2 |
|
3 |
[![CI](https://github.com/shunk031/huggingface-datasets_MSCOCO/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_MSCOCO/actions/workflows/ci.yaml)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- crowdsourced
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
language_creators:
|
7 |
+
- found
|
8 |
+
license:
|
9 |
+
- cc-by-4.0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
pretty_name: MSCOCO
|
13 |
+
size_categories: []
|
14 |
+
source_datasets:
|
15 |
+
- original
|
16 |
+
tags:
|
17 |
+
- image-captioning
|
18 |
+
- object-detection
|
19 |
+
- keypoint-detection
|
20 |
+
- stuff-segmentation
|
21 |
+
- panoptic-segmentation
|
22 |
+
task_categories:
|
23 |
+
- image-segmentation
|
24 |
+
- object-detection
|
25 |
+
- other
|
26 |
+
task_ids:
|
27 |
+
- instance-segmentation
|
28 |
+
- semantic-segmentation
|
29 |
+
- panoptic-segmentation
|
30 |
+
---
|
31 |
+
|
32 |
# Dataset Card for MSCOCO
|
33 |
|
34 |
[![CI](https://github.com/shunk031/huggingface-datasets_MSCOCO/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_MSCOCO/actions/workflows/ci.yaml)
|
35 |
+
|
36 |
+
## Table of Contents
|
37 |
+
- [Dataset Card Creation Guide](#dataset-card-creation-guide)
|
38 |
+
- [Table of Contents](#table-of-contents)
|
39 |
+
- [Dataset Description](#dataset-description)
|
40 |
+
- [Dataset Summary](#dataset-summary)
|
41 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
42 |
+
- [Languages](#languages)
|
43 |
+
- [Dataset Structure](#dataset-structure)
|
44 |
+
- [Data Instances](#data-instances)
|
45 |
+
- [Data Fields](#data-fields)
|
46 |
+
- [Data Splits](#data-splits)
|
47 |
+
- [Dataset Creation](#dataset-creation)
|
48 |
+
- [Curation Rationale](#curation-rationale)
|
49 |
+
- [Source Data](#source-data)
|
50 |
+
- [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
|
51 |
+
- [Who are the source language producers?](#who-are-the-source-language-producers)
|
52 |
+
- [Annotations](#annotations)
|
53 |
+
- [Annotation process](#annotation-process)
|
54 |
+
- [Who are the annotators?](#who-are-the-annotators)
|
55 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
56 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
57 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
58 |
+
- [Discussion of Biases](#discussion-of-biases)
|
59 |
+
- [Other Known Limitations](#other-known-limitations)
|
60 |
+
- [Additional Information](#additional-information)
|
61 |
+
- [Dataset Curators](#dataset-curators)
|
62 |
+
- [Licensing Information](#licensing-information)
|
63 |
+
- [Citation Information](#citation-information)
|
64 |
+
- [Contributions](#contributions)
|
65 |
+
|
66 |
+
## Dataset Description
|
67 |
+
|
68 |
+
- **Homepage:** https://cocodataset.org/#home
|
69 |
+
- **Repository:** https://github.com/shunk031/huggingface-datasets_MSCOCO
|
70 |
+
- **Paper (Preprint):** https://arxiv.org/abs/1405.0312
|
71 |
+
- **Paper (ECCV2014):** https://link.springer.com/chapter/10.1007/978-3-319-10602-1_48
|
72 |
+
- **Leaderboard (Detection):** https://cocodataset.org/#detection-leaderboard
|
73 |
+
- **Leaderboard (Keypoint):** https://cocodataset.org/#keypoints-leaderboard
|
74 |
+
- **Leaderboard (Stuff):** https://cocodataset.org/#stuff-leaderboard
|
75 |
+
- **Leaderboard (Panoptic):** https://cocodataset.org/#panoptic-leaderboard
|
76 |
+
- **Leaderboard (Captioning):** https://cocodataset.org/#captions-leaderboard
|
77 |
+
- **Point of Contact:** [email protected]
|
78 |
+
|
79 |
+
### Dataset Summary
|
80 |
+
|
81 |
+
> COCO is a large-scale object detection, segmentation, and captioning dataset. COCO has several features:
|
82 |
+
> - Object segmentation
|
83 |
+
> - Recognition in context
|
84 |
+
> - Superpixel stuff segmentation
|
85 |
+
> - 330K images (>200K labeled)
|
86 |
+
> - 1.5 million object instances
|
87 |
+
> - 80 object categories
|
88 |
+
> - 91 stuff categories
|
89 |
+
> - 5 captions per image
|
90 |
+
> - 250,000 people with keypoints
|
91 |
+
|
92 |
+
### Supported Tasks and Leaderboards
|
93 |
+
|
94 |
+
[More Information Needed]
|
95 |
+
|
96 |
+
### Languages
|
97 |
+
|
98 |
+
[More Information Needed]
|
99 |
+
|
100 |
+
## Dataset Structure
|
101 |
+
|
102 |
+
### Data Instances
|
103 |
+
|
104 |
+
#### 2014
|
105 |
+
|
106 |
+
- captioning dataset
|
107 |
+
|
108 |
+
```python
|
109 |
+
import datasets as ds
|
110 |
+
|
111 |
+
dataset = ds.load_dataset(
|
112 |
+
"shunk031/MSCOCO",
|
113 |
+
year=2014,
|
114 |
+
coco_task="captions",
|
115 |
+
)
|
116 |
+
```
|
117 |
+
|
118 |
+
- instances dataset
|
119 |
+
|
120 |
+
```python
|
121 |
+
import datasets as ds
|
122 |
+
|
123 |
+
dataset = ds.load_dataset(
|
124 |
+
"shunk031/MSCOCO",
|
125 |
+
year=2014,
|
126 |
+
coco_task="instances",
|
127 |
+
decode_rle=True, # True if Run-length Encoding (RLE) is to be decoded and converted to binary mask.
|
128 |
+
)
|
129 |
+
```
|
130 |
+
|
131 |
+
- person keypoints dataset
|
132 |
+
|
133 |
+
```python
|
134 |
+
import datasets as ds
|
135 |
+
|
136 |
+
dataset = ds.load_dataset(
|
137 |
+
"shunk031/MSCOCO",
|
138 |
+
year=2014,
|
139 |
+
coco_task="person_keypoints",
|
140 |
+
decode_rle=True, # True if Run-length Encoding (RLE) is to be decoded and converted to binary mask.
|
141 |
+
)
|
142 |
+
```
|
143 |
+
|
144 |
+
#### 2017
|
145 |
+
|
146 |
+
- captioning dataset
|
147 |
+
|
148 |
+
```python
|
149 |
+
import datasets as ds
|
150 |
+
|
151 |
+
dataset = ds.load_dataset(
|
152 |
+
"shunk031/MSCOCO",
|
153 |
+
year=2017,
|
154 |
+
coco_task="captions",
|
155 |
+
)
|
156 |
+
```
|
157 |
+
|
158 |
+
- instances dataset
|
159 |
+
|
160 |
+
```python
|
161 |
+
import datasets as ds
|
162 |
+
|
163 |
+
dataset = ds.load_dataset(
|
164 |
+
"shunk031/MSCOCO",
|
165 |
+
year=2017,
|
166 |
+
coco_task="instances",
|
167 |
+
decode_rle=True, # True if Run-length Encoding (RLE) is to be decoded and converted to binary mask.
|
168 |
+
)
|
169 |
+
```
|
170 |
+
|
171 |
+
- person keypoints dataset
|
172 |
+
|
173 |
+
```python
|
174 |
+
import datasets as ds
|
175 |
+
|
176 |
+
dataset = ds.load_dataset(
|
177 |
+
"shunk031/MSCOCO",
|
178 |
+
year=2017,
|
179 |
+
coco_task="person_keypoints",
|
180 |
+
decode_rle=True, # True if Run-length Encoding (RLE) is to be decoded and converted to binary mask.
|
181 |
+
)
|
182 |
+
```
|
183 |
+
|
184 |
+
### Data Fields
|
185 |
+
|
186 |
+
[More Information Needed]
|
187 |
+
|
188 |
+
### Data Splits
|
189 |
+
|
190 |
+
[More Information Needed]
|
191 |
+
|
192 |
+
## Dataset Creation
|
193 |
+
|
194 |
+
### Curation Rationale
|
195 |
+
|
196 |
+
[More Information Needed]
|
197 |
+
|
198 |
+
### Source Data
|
199 |
+
|
200 |
+
[More Information Needed]
|
201 |
+
|
202 |
+
#### Initial Data Collection and Normalization
|
203 |
+
|
204 |
+
[More Information Needed]
|
205 |
+
|
206 |
+
#### Who are the source language producers?
|
207 |
+
|
208 |
+
[More Information Needed]
|
209 |
+
|
210 |
+
### Annotations
|
211 |
+
|
212 |
+
[More Information Needed]
|
213 |
+
|
214 |
+
#### Annotation process
|
215 |
+
|
216 |
+
[More Information Needed]
|
217 |
+
|
218 |
+
#### Who are the annotators?
|
219 |
+
|
220 |
+
[More Information Needed]
|
221 |
+
|
222 |
+
### Personal and Sensitive Information
|
223 |
+
|
224 |
+
[More Information Needed]
|
225 |
+
|
226 |
+
## Considerations for Using the Data
|
227 |
+
|
228 |
+
### Social Impact of Dataset
|
229 |
+
|
230 |
+
[More Information Needed]
|
231 |
+
|
232 |
+
### Discussion of Biases
|
233 |
+
|
234 |
+
[More Information Needed]
|
235 |
+
|
236 |
+
### Other Known Limitations
|
237 |
+
|
238 |
+
[More Information Needed]
|
239 |
+
|
240 |
+
## Additional Information
|
241 |
+
|
242 |
+
### Dataset Curators
|
243 |
+
|
244 |
+
[More Information Needed]
|
245 |
+
|
246 |
+
### Licensing Information
|
247 |
+
|
248 |
+
> The annotations in this dataset along with this website belong to the COCO Consortium and are licensed under a [Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/legalcode).
|
249 |
+
>
|
250 |
+
> ## Images
|
251 |
+
> The COCO Consortium does not own the copyright of the images. Use of the images must abide by the Flickr Terms of Use. The users of the images accept full responsibility for the use of the dataset, including but not limited to the use of any copies of copyrighted images that they may create from the dataset.
|
252 |
+
>
|
253 |
+
> ## Software
|
254 |
+
> Copyright (c) 2015, COCO Consortium. All rights reserved. Redistribution and use software in source and binary form, with or without modification, are permitted provided that the following conditions are met:
|
255 |
+
> - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
256 |
+
> - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
257 |
+
> - Neither the name of the COCO Consortium nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
258 |
+
>
|
259 |
+
> THIS SOFTWARE AND ANNOTATIONS ARE PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
260 |
+
|
261 |
+
### Citation Information
|
262 |
+
|
263 |
+
```bibtex
|
264 |
+
@inproceedings{lin2014microsoft,
|
265 |
+
title={Microsoft coco: Common objects in context},
|
266 |
+
author={Lin, Tsung-Yi and Maire, Michael and Belongie, Serge and Hays, James and Perona, Pietro and Ramanan, Deva and Doll{\'a}r, Piotr and Zitnick, C Lawrence},
|
267 |
+
booktitle={Computer Vision--ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13},
|
268 |
+
pages={740--755},
|
269 |
+
year={2014},
|
270 |
+
organization={Springer}
|
271 |
+
}
|
272 |
+
```
|
273 |
+
|
274 |
+
### Contributions
|
275 |
+
|
276 |
+
Thanks to [COCO Consortium](https://cocodataset.org/#people) for creating this dataset.
|
tests/MSCOCO_test.py
CHANGED
@@ -1,12 +1,23 @@
|
|
|
|
|
|
1 |
import datasets as ds
|
2 |
import pytest
|
3 |
|
|
|
|
|
4 |
|
5 |
@pytest.fixture
|
6 |
def dataset_path() -> str:
|
7 |
return "MSCOCO.py"
|
8 |
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
@pytest.mark.parametrize(
|
11 |
argnames="decode_rle,",
|
12 |
argvalues=(
|
@@ -46,3 +57,8 @@ def test_load_dataset(
|
|
46 |
)
|
47 |
assert dataset["train"].num_rows == expected_num_train
|
48 |
assert dataset["validation"].num_rows == expected_num_validation
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
import datasets as ds
|
4 |
import pytest
|
5 |
|
6 |
+
from MSCOCO import CATEGORIES, SUPER_CATEGORIES
|
7 |
+
|
8 |
|
9 |
@pytest.fixture
|
10 |
def dataset_path() -> str:
|
11 |
return "MSCOCO.py"
|
12 |
|
13 |
|
14 |
+
@pytest.mark.skipif(
|
15 |
+
condition=bool(os.environ.get("CI", False)),
|
16 |
+
reason=(
|
17 |
+
"Because this loading script downloads a large dataset, "
|
18 |
+
"we will skip running it on CI."
|
19 |
+
),
|
20 |
+
)
|
21 |
@pytest.mark.parametrize(
|
22 |
argnames="decode_rle,",
|
23 |
argvalues=(
|
|
|
57 |
)
|
58 |
assert dataset["train"].num_rows == expected_num_train
|
59 |
assert dataset["validation"].num_rows == expected_num_validation
|
60 |
+
|
61 |
+
|
62 |
+
def test_consts():
|
63 |
+
assert len(CATEGORIES) == 80
|
64 |
+
assert len(SUPER_CATEGORIES) == 12
|