xiazhi1 commited on
Commit
aea73e2
1 Parent(s): d661fa3

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +2 -0
  2. app.py +115 -0
  3. base_ml/base_cli.py +120 -0
  4. base_ml/base_early_stopping.py +83 -0
  5. base_ml/base_experiment.py +445 -0
  6. base_ml/base_loss.py +1171 -0
  7. base_ml/base_optim.py +36 -0
  8. base_ml/base_trainer.py +274 -0
  9. base_ml/base_utils.py +136 -0
  10. base_ml/base_validator.py +18 -0
  11. base_ml/optim_factory.py +190 -0
  12. base_ml/unireplknet_layer_decay_optimizer_constructor.py +169 -0
  13. cell_segmentation/__init__.py +6 -0
  14. cell_segmentation/datasets/base_cell.py +85 -0
  15. cell_segmentation/datasets/cell_graph_datamodel.py +26 -0
  16. cell_segmentation/datasets/conic.py +243 -0
  17. cell_segmentation/datasets/consep.py +118 -0
  18. cell_segmentation/datasets/dataset_coordinator.py +73 -0
  19. cell_segmentation/datasets/monuseg.py +128 -0
  20. cell_segmentation/datasets/pannuke.py +537 -0
  21. cell_segmentation/datasets/prepare_monuseg.py +115 -0
  22. cell_segmentation/datasets/prepare_pannuke_origin.py +95 -0
  23. cell_segmentation/experiments/__init__.py +6 -0
  24. cell_segmentation/experiments/experiment_cellvit_conic.py +808 -0
  25. cell_segmentation/experiments/experiment_cellvit_pannuke.py +861 -0
  26. cell_segmentation/experiments/experiment_cpp_net_pannuke.py +296 -0
  27. cell_segmentation/experiments/experiment_stardist_pannuke.py +392 -0
  28. cell_segmentation/inference/__init__.py +6 -0
  29. cell_segmentation/inference/cell_detection.py +1077 -0
  30. cell_segmentation/inference/cell_detection_256.py +1111 -0
  31. cell_segmentation/inference/cell_detection_mp.py +1527 -0
  32. cell_segmentation/inference/inference_cellvit_experiment_monuseg.py +1002 -0
  33. cell_segmentation/inference/inference_cellvit_experiment_pannuke.py +1157 -0
  34. cell_segmentation/run_cellvit.py +103 -0
  35. cell_segmentation/trainer/__init__.py +6 -0
  36. cell_segmentation/trainer/trainer_cellvit.py +1092 -0
  37. cell_segmentation/utils/__init__.py +6 -0
  38. cell_segmentation/utils/metrics.py +276 -0
  39. cell_segmentation/utils/post_proc_cellvit.py +328 -0
  40. cell_segmentation/utils/template_geojson.py +52 -0
  41. cell_segmentation/utils/tools.py +400 -0
  42. config.yaml +158 -0
  43. datamodel/__init__.py +6 -0
  44. datamodel/graph_datamodel.py +29 -0
  45. datamodel/wsi_datamodel.py +193 -0
  46. docs/datasets/PanNuke/dataset_config.yaml +28 -0
  47. docs/datasets/PanNuke/fold0/cell_count.csv +2657 -0
  48. docs/datasets/PanNuke/fold0/types.csv +2657 -0
  49. docs/datasets/PanNuke/fold1/cell_count.csv +2524 -0
  50. docs/datasets/PanNuke/fold1/types.csv +2524 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ **/__pycache__
2
+ /.vscode
app.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os, requests
3
+ import numpy as np
4
+ import torch
5
+ import cv2
6
+ from cell_segmentation.inference.inference_cellvit_experiment_pannuke import InferenceCellViTParser,InferenceCellViT
7
+ from cell_segmentation.inference.inference_cellvit_experiment_monuseg import InferenceCellViTMoNuSegParser,MoNuSegInference
8
+
9
+
10
+ ## local | remote
11
+ RUN_MODE = "remote"
12
+ if RUN_MODE != "local":
13
+ os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/model_best.pth")
14
+ ## examples
15
+ os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/1.png")
16
+ os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/2.png")
17
+ os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/3.png")
18
+ os.system("wget https://huggingface.co/xiazhi/LKCell-demo/resolve/main/4.png")
19
+
20
+ ## step 1: set up model
21
+
22
+ device = "cpu"
23
+
24
+ ## pannuke set
25
+ pannuke_parser = InferenceCellViTParser()
26
+ pannuke_configurations = pannuke_parser.parse_arguments()
27
+ pannuke_inf = InferenceCellViT(
28
+ run_dir=pannuke_configurations["run_dir"],
29
+ checkpoint_name=pannuke_configurations["checkpoint_name"],
30
+ gpu=pannuke_configurations["gpu"],
31
+ magnification=pannuke_configurations["magnification"],
32
+ )
33
+
34
+ pannuke_checkpoint = torch.load(
35
+ pannuke_inf.run_dir / pannuke_inf.checkpoint_name, map_location="cpu"
36
+ )
37
+ pannuke_model = pannuke_inf.get_model(model_type=pannuke_checkpoint["arch"])
38
+ pannuke_model.load_state_dict(pannuke_checkpoint["model_state_dict"])
39
+ # # put model in eval mode
40
+ pannuke_model.to(device)
41
+ pannuke_model.eval()
42
+
43
+
44
+ ## monuseg set
45
+ monuseg_parser = InferenceCellViTMoNuSegParser()
46
+ monuseg_configurations = monuseg_parser.parse_arguments()
47
+ monuseg_inf = MoNuSegInference(
48
+ model_path=monuseg_configurations["model"],
49
+ dataset_path=monuseg_configurations["dataset"],
50
+ outdir=monuseg_configurations["outdir"],
51
+ gpu=monuseg_configurations["gpu"],
52
+ patching=monuseg_configurations["patching"],
53
+ magnification=monuseg_configurations["magnification"],
54
+ overlap=monuseg_configurations["overlap"],
55
+ )
56
+
57
+
58
+ def click_process(image_input , type_dataset):
59
+ if type_dataset == "pannuke":
60
+ pannuke_inf.run_single_image_inference(pannuke_model,image_input)
61
+ else:
62
+ monuseg_inf.run_single_image_inference(monuseg_inf.model, image_input)
63
+
64
+ image_output = cv2.imread("pred_img.png")
65
+ image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2RGB)
66
+ return image_output
67
+
68
+
69
+ demo = gr.Blocks(title="LkCell")
70
+ with demo:
71
+ gr.Markdown(value="""
72
+ **Gradio demo for LKCell: Efficient Cell Nuclei Instance Segmentation with Large Convolution Kernels**. Check our [Github Repo](https://github.com/ziwei-cui/LKCellv1) 😛.
73
+ """)
74
+ with gr.Row():
75
+ with gr.Column():
76
+ with gr.Row():
77
+ Image_input = gr.Image(type="numpy", label="Input", interactive=True,height=480)
78
+ with gr.Row():
79
+ Type_dataset = gr.Radio(choices=["pannuke", "monuseg"], label=" input image's dataset type",value="pannuke")
80
+
81
+ with gr.Column():
82
+ with gr.Row():
83
+ image_output = gr.Image(type="numpy", label="Output",height=480)
84
+ with gr.Row():
85
+ Button_run = gr.Button("🚀 Submit (发送) ")
86
+ clear_button = gr.ClearButton(components=[Image_input,Type_dataset,image_output],value="🧹 Clear (清除)")
87
+
88
+ Button_run.click(fn=click_process, inputs=[Image_input, Type_dataset ], outputs=[image_output])
89
+
90
+ ## guiline
91
+ gr.Markdown(value="""
92
+ 🔔**Guideline**
93
+ 1. Upload your image or select one from the examples.
94
+ 2. Set up the arguments: "Type_dataset".
95
+ 3. Run the Submit button to get the output.
96
+ """)
97
+ # if RUN_MODE != "local":
98
+ gr.Examples(examples=[
99
+ ['1.png', "pannuke"],
100
+ ['2.png', "pannuke"],
101
+ ['3.png', "monuseg"],
102
+ ['4.png', "monuseg"],
103
+ ],
104
+ inputs=[Image_input, Type_dataset], outputs=[image_output], label="Examples")
105
+ gr.HTML(value="""
106
+ <p style="text-align:center; color:orange"> <a href='https://github.com/ziwei-cui/LKCellv1' target='_blank'>Github Repo</a></p>
107
+ """)
108
+ gr.Markdown(value="""
109
+ Template is adapted from [Here](https://huggingface.co/spaces/menghanxia/disco)
110
+ """)
111
+
112
+ if RUN_MODE == "local":
113
+ demo.launch(server_name='127.0.0.1',server_port=8003)
114
+ else:
115
+ demo.launch()
base_ml/base_cli.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Base CLI to parse Arguments
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import argparse
9
+ import logging
10
+ from abc import ABC, abstractmethod
11
+ from typing import Tuple, Union
12
+
13
+ import yaml
14
+ from pydantic import BaseModel
15
+
16
+
17
+ class ABCParser(ABC):
18
+ """Blueprint for Argument Parser"""
19
+
20
+ @abstractmethod
21
+ def __init__(self) -> None:
22
+ pass
23
+
24
+ @abstractmethod
25
+ def get_config(self) -> Tuple[Union[BaseModel, dict], logging.Logger]:
26
+ """Load configuration and create a logger
27
+
28
+ Returns:
29
+ Tuple[PreProcessingConfig, logging.Logger]: Configuration and Logger
30
+ """
31
+ pass
32
+
33
+ @abstractmethod
34
+ def store_config(self) -> None:
35
+ """Store the config file in the logging directory to keep track of the configuration."""
36
+ pass
37
+
38
+
39
+ class ExperimentBaseParser:
40
+ """Configuration Parser for Machine Learning Experiments"""
41
+
42
+ def __init__(self) -> None:
43
+ parser = argparse.ArgumentParser(
44
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
45
+ description="Start an experiment with given configuration file.",
46
+ )
47
+ requiredNamed = parser.add_argument_group("required named arguments")
48
+ requiredNamed.add_argument(
49
+ "--config", type=str, help="Path to a config file", required=True
50
+ )
51
+ parser.add_argument("--gpu", type=int, help="Cuda-GPU ID")
52
+ group = parser.add_mutually_exclusive_group(required=False)
53
+ group.add_argument(
54
+ "--sweep",
55
+ action="store_true",
56
+ help="Starting a sweep. For this the configuration file must be structured according to WandB sweeping. "
57
+ "Compare https://docs.wandb.ai/guides/sweeps and https://community.wandb.ai/t/nested-sweep-configuration/3369/3 "
58
+ "for further information. This parameter cannot be set in the config file!",
59
+ )
60
+ group.add_argument(
61
+ "--agent",
62
+ type=str,
63
+ help="Add a new agent to the sweep. "
64
+ "Please pass the sweep ID as argument in the way entity/project/sweep_id, e.g., user1/test_project/v4hwbijh. "
65
+ "The agent configuration can be found in the WandB dashboard for the running sweep in the sweep overview tab "
66
+ "under launch agent. Just paste the entity/project/sweep_id given there. The provided config file must be a sweep config file."
67
+ "This parameter cannot be set in the config file!",
68
+ )
69
+ group.add_argument(
70
+ "--checkpoint",
71
+ type=str,
72
+ help="Path to a PyTorch checkpoint file. "
73
+ "The file is loaded and continued to train with the provided settings. "
74
+ "If this is passed, no sweeps are possible. "
75
+ "This parameter cannot be set in the config file!",
76
+ )
77
+
78
+ self.parser = parser
79
+
80
+ def parse_arguments(self) -> Tuple[Union[BaseModel, dict]]:
81
+ """Parse the arguments from CLI and load yaml config
82
+
83
+ Returns:
84
+ Tuple[Union[BaseModel, dict]]: Parsed arguments
85
+ """
86
+ # parse the arguments
87
+ opt = self.parser.parse_args() #定义了一个opt变量,用来存储参数
88
+ with open(opt.config, "r") as config_file:
89
+ yaml_config = yaml.safe_load(config_file)
90
+ yaml_config_dict = dict(yaml_config) #将yaml文件转换为字典
91
+
92
+ opt_dict = vars(opt) #将opt转换为字典
93
+ # check for gpu to overwrite with cli argument
94
+ if "gpu" in opt_dict: #如果gpu在opt_dict中
95
+ if opt_dict["gpu"] is not None:
96
+ yaml_config_dict["gpu"] = opt_dict["gpu"] #将opt_dict中的gpu值赋给yaml_config_dict中的gpu
97
+
98
+ # check if either training, sweep, checkpoint or start agent should be called
99
+ # first step: remove such keys from the config file
100
+ if "run_sweep" in yaml_config_dict: #如果yaml_config_dict中有run_sweep
101
+ yaml_config_dict.pop("run_sweep") #删除yaml_config_dict中的run_sweep
102
+ if "agent" in yaml_config_dict:
103
+ yaml_config_dict.pop("agent")
104
+ if "checkpoint" in yaml_config_dict:
105
+ yaml_config_dict.pop("checkpoint")
106
+
107
+ # select one of the options
108
+ if "sweep" in opt_dict and opt_dict["sweep"] is True:
109
+ yaml_config_dict["run_sweep"] = True
110
+ else:
111
+ yaml_config_dict["run_sweep"] = False
112
+ if "agent" in opt_dict:
113
+ yaml_config_dict["agent"] = opt_dict["agent"]
114
+ if "checkpoint" in opt_dict:
115
+ if opt_dict["checkpoint"] is not None:
116
+ yaml_config_dict["checkpoint"] = opt_dict["checkpoint"]
117
+
118
+ self.config = yaml_config_dict #将yaml_config_dict赋给self.config
119
+
120
+ return self.config
base_ml/base_early_stopping.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Base Machine Learning Experiment
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import logging
9
+
10
+ logger = logging.getLogger("__main__")
11
+ logger.addHandler(logging.NullHandler())
12
+
13
+ import wandb
14
+
15
+
16
+ class EarlyStopping:
17
+ """Early Stopping Class
18
+
19
+ Args:
20
+ patience (int): Patience to wait before stopping
21
+ strategy (str, optional): Optimization strategy.
22
+ Please select 'minimize' or 'maximize' for strategy. Defaults to "minimize".
23
+ """
24
+
25
+ def __init__(self, patience: int, strategy: str = "minimize"):
26
+ assert strategy.lower() in [
27
+ "minimize",
28
+ "maximize",
29
+ ], "Please select 'minimize' or 'maximize' for strategy"
30
+
31
+ self.patience = patience
32
+ self.counter = 0
33
+ self.strategy = strategy.lower()
34
+ self.best_metric = None
35
+ self.best_epoch = None
36
+ self.early_stop = False
37
+
38
+ logger.info(
39
+ f"Using early stopping with a range of {self.patience} and {self.strategy} strategy"
40
+ )
41
+
42
+ def __call__(self, metric: float, epoch: int) -> bool:
43
+ """Early stopping update call
44
+
45
+ Args:
46
+ metric (float): Metric for early stopping
47
+ epoch (int): Current epoch
48
+
49
+ Returns:
50
+ bool: Returns true if the model is performing better than the current best model,
51
+ otherwise false
52
+ """
53
+ if self.best_metric is None:
54
+ self.best_metric = metric
55
+ self.best_epoch = epoch
56
+ return True
57
+ else:
58
+ if self.strategy == "minimize":
59
+ if self.best_metric >= metric:
60
+ self.best_metric = metric
61
+ self.best_epoch = epoch
62
+ self.counter = 0
63
+ wandb.run.summary["Best-Epoch"] = epoch
64
+ wandb.run.summary["Best-Metric"] = metric
65
+ return True
66
+ else:
67
+ self.counter += 1
68
+ if self.counter >= self.patience:
69
+ self.early_stop = True
70
+ return False
71
+ elif self.strategy == "maximize":
72
+ if self.best_metric <= metric:
73
+ self.best_metric = metric
74
+ self.best_epoch = epoch
75
+ self.counter = 0
76
+ wandb.run.summary["Best-Epoch"] = epoch
77
+ wandb.run.summary["Best-Metric"] = metric
78
+ return True
79
+ else:
80
+ self.counter += 1
81
+ if self.counter >= self.patience:
82
+ self.early_stop = True
83
+ return False
base_ml/base_experiment.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Base Machine Learning Experiment
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import copy
9
+ import inspect
10
+ import logging
11
+ import os
12
+ import random
13
+ import sys
14
+ from abc import abstractmethod
15
+ from pathlib import Path
16
+ from typing import Tuple, Union
17
+ import argparse
18
+
19
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20
+ parentdir = os.path.dirname(currentdir)
21
+ sys.path.insert(0, parentdir)
22
+
23
+ import numpy as np
24
+ import pandas as pd
25
+ import torch
26
+ import torch.nn as nn
27
+ import yaml
28
+ from pydantic import BaseModel
29
+ from torch.nn.modules.loss import _Loss
30
+ from torch.optim import Optimizer
31
+ from torch.optim.lr_scheduler import ConstantLR, _LRScheduler
32
+ from torch.utils.data import Dataset, Sampler
33
+
34
+ from base_ml.base_optim import OPTI_DICT
35
+ from base_ml.base_validator import sweep_schema
36
+ from utils.logger import Logger
37
+ from utils.tools import flatten_dict, remove_parameter_tag, unflatten_dict
38
+
39
+ from base_ml.optim_factory import LayerDecayValueAssigner, create_optimizer
40
+
41
+
42
+ class BaseExperiment:
43
+ """BaseExperiment Class
44
+
45
+ An experiment consistsn of the follwing key methods:
46
+
47
+ * run_experiment: Main Code for running the experiment with implemented coordinaten and training call
48
+ *
49
+ *
50
+ Args:
51
+ default_conf (dict): Default configuration
52
+ """
53
+
54
+ def __init__(self, default_conf: dict, checkpoint=None) -> None:
55
+ # setup configuration
56
+ self.default_conf = default_conf
57
+ self.run_conf = None
58
+ self.logger = logging.getLogger(__name__)
59
+
60
+ # resolve_paths
61
+ self.default_conf["logging"]["log_dir"] = str(
62
+ Path(default_conf["logging"]["log_dir"]).resolve()
63
+ )
64
+ self.default_conf["logging"]["wandb_dir"] = str(
65
+ Path(default_conf["logging"]["wandb_dir"]).resolve()
66
+ )
67
+
68
+ if checkpoint is not None:
69
+ self.checkpoint = torch.load(checkpoint, map_location="cpu")
70
+ else:
71
+ self.checkpoint = None
72
+
73
+ # seeding
74
+ self.seed_run(seed=self.default_conf["random_seed"])
75
+
76
+ @abstractmethod
77
+ def run_experiment(self):
78
+ """Experiment Code
79
+
80
+ Main Code for running the experiment. The following steps should be performed:
81
+ 1.) Set run name
82
+ 2.) Initialize WandB and update config (According to Sweep or predefined)
83
+ 3.) Create Output directory and setup logger
84
+ 4.) Machine Learning Setup
85
+ 4.1) Loss functions
86
+ 4.2) Model
87
+ 4.3) Optimizer
88
+ 4.4) Scheduler
89
+ 5.) Load and Setup Dataset
90
+ 6.) Define Trainer
91
+ 7.) trainer.fit()
92
+
93
+ Raises:
94
+ NotImplementedError: Needs to be implemented
95
+ """
96
+ raise NotImplementedError
97
+
98
+ @abstractmethod
99
+ def get_train_model(self) -> nn.Module:
100
+ """Retrieve torch model for training
101
+
102
+ Raises:
103
+ NotImplementedError: Needs to be implemented
104
+
105
+ Returns:
106
+ nn.Module: Torch Model
107
+ """
108
+ raise NotImplementedError
109
+
110
+ @abstractmethod
111
+ def get_loss_fn(self) -> _Loss:
112
+ """Retrieve torch loss function for training
113
+
114
+ Raises:
115
+ NotImplementedError: Needs to be implemented
116
+
117
+ Returns:
118
+ _Loss: Loss function
119
+ """
120
+ raise NotImplementedError
121
+
122
+ def get_argparser():
123
+ parser = argparse.ArgumentParser('ConvNeXt training and evaluation script for image classification', add_help=False)
124
+
125
+ # Optimization parameters
126
+ parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
127
+ help='Optimizer (default: "adamw"')
128
+ parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
129
+ help='Optimizer Epsilon (default: 1e-8)')
130
+ parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
131
+ help='Optimizer Betas (default: None, use opt default)')
132
+ parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
133
+ help='SGD momentum (default: 0.9)')
134
+ parser.add_argument('--weight_decay', type=float, default=0.05,
135
+ help='weight decay (default: 0.05)')
136
+ parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
137
+ weight decay. We use a cosine schedule for WD and using a larger decay by
138
+ the end of training improves performance for ViTs.""")
139
+
140
+ parser.add_argument('--lr', type=float, default=4e-3, metavar='LR',
141
+ help='learning rate (default: 4e-3), with total batch size 4096')
142
+ parser.add_argument('--layer_decay', type=float, default=0.9999)
143
+
144
+
145
+ return parser
146
+
147
+
148
+
149
+
150
+ def get_optimizer(
151
+ self, model: nn.Module, opt: str, hp: dict, layer_decay:float,
152
+ ) -> Optimizer:
153
+ """Retrieve optimizer for training
154
+
155
+ All Torch Optimizers are possible
156
+
157
+ Args:
158
+ model (nn.Module): Training model
159
+ optimizer_name (str): Name of the optimizer, all current PyTorch Optimizer are possible
160
+ hp (dict): Hyperparameter as dictionary. For further information,
161
+ see documentation here: https://pytorch.org/docs/stable/optim.html#algorithms
162
+
163
+ Raises:
164
+ NotImplementedError: Raises error if an undefined Optimizer differing from torch is used
165
+
166
+ Returns:
167
+ Optimizer: PyTorch Optimizer
168
+ """
169
+ # if optimizer_name not in OPTI_DICT:
170
+ # raise NotImplementedError("Optimizer not known")
171
+
172
+ if layer_decay < 1.0 or layer_decay > 1.0:
173
+ num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
174
+
175
+ assigner = LayerDecayValueAssigner(list(layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
176
+ else:
177
+ assigner = None
178
+
179
+ #optim = OPTI_DICT[optimizer_name]
180
+ # optimizer = optim(
181
+ # params=filter(lambda p: p.requires_grad, model.parameters()), **hp
182
+ # )
183
+ #optimizer = optim(params=model.parameters(), **hp)
184
+
185
+ optimizer = create_optimizer(
186
+ model, weight_decay=hp["weight_decay"], lr=hp["lr"], opt=opt, get_num_layer=assigner.get_layer_id, get_layer_scale=assigner.get_scale)
187
+
188
+ self.logger.info(
189
+ f"Loaded Optimizer with following hyperparameters:"
190
+ )
191
+ self.logger.info(hp)
192
+
193
+ return optimizer
194
+
195
+ def get_scheduler(self, optimizer: Optimizer) -> _LRScheduler:
196
+ """Retrieve learning rate scheduler for training
197
+
198
+ Currently, just constant scheduler. Should be extended to add a configurable scheduler.
199
+ Maybe reimplement in specific experiment file.
200
+
201
+ Args:
202
+ optimizer (Optimizer): Optimizer
203
+
204
+ Returns:
205
+ _LRScheduler: PyTorch Scheduler
206
+ """
207
+ scheduler = ConstantLR(optimizer, factor=1, total_iters=1000)
208
+ self.logger.info("Scheduler: ConstantLR scheduler")
209
+ return scheduler
210
+
211
+ def get_sampler(self) -> Sampler:
212
+ """Retrieve data sampler for training
213
+
214
+ Raises:
215
+ NotImplementedError: Needs to be implemented
216
+
217
+ Returns:
218
+ Sampler: Training sampler
219
+ """
220
+ raise NotImplementedError
221
+
222
+ def get_train_dataset(self) -> Dataset:
223
+ """Retrieve training dataset
224
+
225
+ Raises:
226
+ NotImplementedError: Needs to be implemented
227
+
228
+ Returns:
229
+ Dataset: Training dataset
230
+ """
231
+ raise NotImplementedError
232
+
233
+ def get_val_dataset(self) -> Dataset:
234
+ """Retrieve validation dataset
235
+
236
+ Raises:
237
+ NotImplementedError: Needs to be implemented
238
+
239
+ Returns:
240
+ Dataset: Validation dataset
241
+ """
242
+ raise NotImplementedError
243
+
244
+ def load_file_split(
245
+ self, fold: int = None
246
+ ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
247
+ """Load the file split for training, validation and test
248
+
249
+ If no fold is provided, the current file split is loaded. Otherwise the files in the fold are loaded
250
+
251
+ The folder (filelist_path) must be built up in the following way:
252
+ 1.) No-Multifold:
253
+ filelist_path:
254
+ train_split.csv
255
+ val_split.csv
256
+ test_split.csv
257
+ 2.) Multifold:
258
+ filelist_path:
259
+ fold1:
260
+ train_split.csv
261
+ val_split.csv
262
+ test_split.csv
263
+ fold2:
264
+ train_split.csv
265
+ val_split.csv
266
+ test_split.csv
267
+ ...
268
+ foldN:
269
+ train_split.csv
270
+ val_split.csv
271
+ test_split.csv
272
+
273
+ Args:
274
+ fold (int, optional): Fold. Defaults to None.
275
+
276
+ Raises:
277
+ NotImplementedError: Fold selection is currently not Implemented
278
+
279
+ Returns:
280
+ Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: Train, Val and Test split as Pandas Dataframe
281
+ """
282
+ filelist_path = Path(self.default_conf["split_path"]).resolve()
283
+ self.logger.info(f"Loading filesplit from folder: {filelist_path}")
284
+ if fold is None:
285
+ train_split = pd.read_csv(filelist_path / "train_split.csv")
286
+ val_split = pd.read_csv(filelist_path / "val_split.csv")
287
+ test_split = pd.read_csv(filelist_path / "test_split.csv")
288
+ else:
289
+ train_split = pd.read_csv(filelist_path / f"fold{fold}" / "train_split.csv")
290
+ val_split = pd.read_csv(filelist_path / f"fold{fold}" / "val_split.csv")
291
+ test_split = None
292
+
293
+ self.logger.info(f"Train size: {len(train_split)}")
294
+ self.logger.info(f"Val-Split: {len(val_split)}")
295
+ return train_split, val_split, test_split
296
+
297
+ # Methods regarding logging and storing
298
+ def instantiate_logger(self) -> Logger:
299
+ """Instantiate a logger
300
+
301
+ Returns:
302
+ Logger: Logger
303
+ """
304
+ logger = Logger(
305
+ level=self.default_conf["logging"]["level"].upper(),
306
+ log_dir=Path(self.run_conf["logging"]["log_dir"]).resolve(),
307
+ comment="logs",
308
+ use_timestamp=False,
309
+ )
310
+ self.logger = logger.create_logger()
311
+ return self.logger
312
+
313
+ @staticmethod
314
+ def create_output_dir(folder_path: Union[str, Path]) -> None:
315
+ """Create folder at given path
316
+
317
+ Args:
318
+ folder_path (Union[str, Path]): Folder that should be created
319
+ """
320
+ folder_path = Path(folder_path).resolve()
321
+ folder_path.mkdir(parents=True, exist_ok=True)
322
+
323
+ def store_config(self) -> None:
324
+ """Store the config file in the logging directory to keep track of the configuration."""
325
+ # store in log directory
326
+ with open(
327
+ (Path(self.run_conf["logging"]["log_dir"]) / "config.yaml").resolve(), "w"
328
+ ) as yaml_file:
329
+ tmp_config = copy.deepcopy(self.run_conf)
330
+ tmp_config["logging"]["log_dir"] = str(tmp_config["logging"]["log_dir"])
331
+
332
+ yaml.dump(tmp_config, yaml_file, sort_keys=False)
333
+
334
+ self.logger.debug(
335
+ f"Stored config under: {(Path(self.run_conf['logging']['log_dir']) / 'config.yaml').resolve()}"
336
+ )
337
+
338
+ @staticmethod
339
+ def extract_sweep_arguments(config: dict) -> Tuple[Union[BaseModel, dict]]:
340
+ """Extract sweep argument from the provided dictionary
341
+
342
+ The config dictionary must contain a "sweep" entry with the sweep configuration.
343
+ The file structure is documented here: ./base_ml/base_validator.py
344
+ We follow the official sweep guidlines of WandB
345
+ Example Sweep files are provided in the ./configs/examples folder
346
+
347
+ Args:
348
+ config (dict): Dictionary with all configurations
349
+
350
+ Raises:
351
+ KeyError: Missing Sweep Keys
352
+
353
+ Returns:
354
+ Tuple[Union[BaseModel, dict]]: Sweep arguments
355
+ """
356
+ # validate sweep settings
357
+ if "sweep" not in config:
358
+ raise KeyError("No Sweep configuration provided")
359
+ sweep_schema.validate(config["sweep"])
360
+
361
+ sweep_conf = config["sweep"]
362
+
363
+ # load parameters
364
+ flattened_dict = flatten_dict(config, sep=".")
365
+ filtered_dict = {
366
+ k: v for k, v in flattened_dict.items() if "parameters" in k.split(".")
367
+ }
368
+ parameters = remove_parameter_tag(filtered_dict, sep=".")
369
+
370
+ sweep_conf["parameters"] = parameters
371
+
372
+ return sweep_conf
373
+
374
+ def overwrite_sweep_values(self, run_conf: dict, sweep_run_conf: dict) -> None:
375
+ """Overwrite run_conf file with the sweep values
376
+
377
+ For the sweep, sweeping parameters are a flattened dict, with keys beeing specific with '.' separator.
378
+ These dictionary with the sweep hyperparameter selection needs to be unflattened (convert '.' into nested dict)
379
+ Afterward, keys are insertd in the run_conf dictionary
380
+
381
+ Args:
382
+ run_conf (dict): Current dictionary without sweep selected parameters
383
+ sweep_run_conf (dict): Dictionary with the sweep config
384
+ """
385
+ flattened_run_conf = flatten_dict(run_conf, sep=".")
386
+ filtered_dict = {
387
+ k: v
388
+ for k, v in flattened_run_conf.items()
389
+ if "parameters" not in k.split(".")
390
+ }
391
+ run_parameters = {**filtered_dict, **sweep_run_conf}
392
+ run_parameters = unflatten_dict(run_parameters, ".")
393
+
394
+ self.run_conf = run_parameters
395
+
396
+ @staticmethod
397
+ def seed_run(seed: int) -> None:
398
+ """Seed the experiment
399
+
400
+ Args:
401
+ seed (int): Seed
402
+ """
403
+ # seeding
404
+ torch.manual_seed(seed)
405
+ torch.cuda.manual_seed_all(seed)
406
+ torch.backends.cudnn.deterministic = True
407
+ torch.backends.cudnn.benchmark = False
408
+ os.environ["PYTHONHASHSEED"] = str(seed)
409
+ np.random.seed(seed)
410
+ random.seed(seed)
411
+ from packaging.version import parse, Version
412
+
413
+ try:
414
+ import tensorflow as tf
415
+ except ImportError:
416
+ pass
417
+ else:
418
+ if parse(tf.__version__) >= Version("2.0.0"):
419
+ tf.random.set_seed(seed)
420
+ elif parse(tf.__version__) <= Version("1.13.2"):
421
+ tf.set_random_seed(seed)
422
+ else:
423
+ tf.compat.v1.set_random_seed(seed)
424
+
425
+ @staticmethod
426
+ def seed_worker(worker_id) -> None:
427
+ """Seed a worker
428
+
429
+ Args:
430
+ worker_id (_type_): Worker ID
431
+ """
432
+ worker_seed = torch.initial_seed() % 2**32
433
+ torch.manual_seed(worker_seed)
434
+ torch.cuda.manual_seed_all(worker_seed)
435
+ np.random.seed(worker_seed)
436
+ random.seed(worker_seed)
437
+
438
+ def close_remaining_logger(self) -> None:
439
+ """Close all remaining loggers"""
440
+ logger = logging.getLogger("__main__")
441
+ for handler in logger.handlers:
442
+ logger.removeHandler(handler)
443
+ handler.close()
444
+ logger.handlers.clear()
445
+ logging.shutdown()
base_ml/base_loss.py ADDED
@@ -0,0 +1,1171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Loss functions (PyTorch and own defined)
3
+ #
4
+ # Own defined loss functions:
5
+ # xentropy_loss, dice_loss, mse_loss and msge_loss (https://github.com/vqdang/hover_net)
6
+ # WeightedBaseLoss, MAEWeighted, MSEWeighted, BCEWeighted, CEWeighted (https://github.com/okunator/cellseg_models.pytorch)
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from typing import List, Tuple
15
+ from torch import nn
16
+ from torch.nn.modules.loss import _Loss
17
+ from base_ml.base_utils import filter2D, gaussian_kernel2d
18
+
19
+
20
+ class XentropyLoss(_Loss):
21
+ """Cross entropy loss"""
22
+
23
+ def __init__(self, reduction: str = "mean") -> None:
24
+ super().__init__(size_average=None, reduce=None, reduction=reduction)
25
+
26
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
27
+ """Assumes NCHW shape of array, must be torch.float32 dtype
28
+
29
+ Args:
30
+ input (torch.Tensor): Ground truth array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes
31
+ target (torch.Tensor): Prediction array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes
32
+
33
+ Returns:
34
+ torch.Tensor: Cross entropy loss, with shape () [scalar], grad_fn = MeanBackward0
35
+ """
36
+ # reshape
37
+ input = input.permute(0, 2, 3, 1)
38
+ target = target.permute(0, 2, 3, 1)
39
+
40
+ epsilon = 10e-8
41
+ # scale preds so that the class probs of each sample sum to 1
42
+ pred = input / torch.sum(input, -1, keepdim=True)
43
+ # manual computation of crossentropy
44
+ pred = torch.clamp(pred, epsilon, 1.0 - epsilon)
45
+ loss = -torch.sum((target * torch.log(pred)), -1, keepdim=True)
46
+ loss = loss.mean() if self.reduction == "mean" else loss.sum()
47
+
48
+ return loss
49
+
50
+
51
+ class DiceLoss(_Loss):
52
+ """Dice loss
53
+
54
+ Args:
55
+ smooth (float, optional): Smoothing value. Defaults to 1e-3.
56
+ """
57
+
58
+ def __init__(self, smooth: float = 1e-3) -> None:
59
+ super().__init__(size_average=None, reduce=None, reduction="mean")
60
+ self.smooth = smooth
61
+
62
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
63
+ """Assumes NCHW shape of array, must be torch.float32 dtype
64
+
65
+ `pred` and `true` must be of torch.float32. Assuming of shape NxHxWxC.
66
+
67
+ Args:
68
+ input (torch.Tensor): Prediction array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes
69
+ target (torch.Tensor): Ground truth array with shape (N, C, H, W) with N being the batch-size, H the height, W the width and C the number of classes
70
+
71
+ Returns:
72
+ torch.Tensor: Dice loss, with shape () [scalar], grad_fn=SumBackward0
73
+ """
74
+ input = input.permute(0, 2, 3, 1)
75
+ target = target.permute(0, 2, 3, 1)
76
+ inse = torch.sum(input * target, (0, 1, 2))
77
+ l = torch.sum(input, (0, 1, 2))
78
+ r = torch.sum(target, (0, 1, 2))
79
+ loss = 1.0 - (2.0 * inse + self.smooth) / (l + r + self.smooth)
80
+ loss = torch.sum(loss)
81
+
82
+ return loss
83
+
84
+
85
+ class MSELossMaps(_Loss):
86
+ """Calculate mean squared error loss for combined horizontal and vertical maps of segmentation tasks."""
87
+
88
+ def __init__(self) -> None:
89
+ super().__init__(size_average=None, reduce=None, reduction="mean")
90
+
91
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
92
+ """Loss calculation
93
+
94
+ Args:
95
+ input (torch.Tensor): Prediction of combined horizontal and vertical maps
96
+ with shape (N, 2, H, W), channel 0 is vertical and channel 1 is horizontal
97
+ target (torch.Tensor): Ground truth of combined horizontal and vertical maps
98
+ with shape (N, 2, H, W), channel 0 is vertical and channel 1 is horizontal
99
+
100
+ Returns:
101
+ torch.Tensor: Mean squared error per pixel with shape (N, 2, H, W), grad_fn=SubBackward0
102
+
103
+ """
104
+ # reshape
105
+ loss = input - target
106
+ loss = (loss * loss).mean()
107
+ return loss
108
+
109
+
110
+ class MSGELossMaps(_Loss):
111
+ def __init__(self) -> None:
112
+ super().__init__(size_average=None, reduce=None, reduction="mean")
113
+
114
+ def get_sobel_kernel(
115
+ self, size: int, device: str
116
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
117
+ """Get sobel kernel with a given size.
118
+
119
+ Args:
120
+ size (int): Kernel site
121
+ device (str): Cuda device
122
+
123
+ Returns:
124
+ Tuple[torch.Tensor, torch.Tensor]: Horizontal and vertical sobel kernel, each with shape (size, size)
125
+ """
126
+ assert size % 2 == 1, "Must be odd, get size=%d" % size
127
+
128
+ h_range = torch.arange(
129
+ -size // 2 + 1,
130
+ size // 2 + 1,
131
+ dtype=torch.float32,
132
+ device=device,
133
+ requires_grad=False,
134
+ )
135
+ v_range = torch.arange(
136
+ -size // 2 + 1,
137
+ size // 2 + 1,
138
+ dtype=torch.float32,
139
+ device=device,
140
+ requires_grad=False,
141
+ )
142
+ h, v = torch.meshgrid(h_range, v_range, indexing="ij")
143
+ kernel_h = h / (h * h + v * v + 1.0e-15)
144
+ kernel_v = v / (h * h + v * v + 1.0e-15)
145
+ return kernel_h, kernel_v
146
+
147
+ def get_gradient_hv(self, hv: torch.Tensor, device: str) -> torch.Tensor:
148
+ """For calculating gradient of horizontal and vertical prediction map
149
+
150
+
151
+ Args:
152
+ hv (torch.Tensor): horizontal and vertical map
153
+ device (str): CUDA device
154
+
155
+ Returns:
156
+ torch.Tensor: Gradient with same shape as input
157
+ """
158
+ kernel_h, kernel_v = self.get_sobel_kernel(5, device=device)
159
+ kernel_h = kernel_h.view(1, 1, 5, 5) # constant
160
+ kernel_v = kernel_v.view(1, 1, 5, 5) # constant
161
+
162
+ h_ch = hv[..., 0].unsqueeze(1) # Nx1xHxW
163
+ v_ch = hv[..., 1].unsqueeze(1) # Nx1xHxW
164
+
165
+ # can only apply in NCHW mode
166
+ h_dh_ch = F.conv2d(h_ch, kernel_h, padding=2)
167
+ v_dv_ch = F.conv2d(v_ch, kernel_v, padding=2)
168
+ dhv = torch.cat([h_dh_ch, v_dv_ch], dim=1)
169
+ dhv = dhv.permute(0, 2, 3, 1).contiguous() # to NHWC
170
+ return dhv
171
+
172
+ def forward(
173
+ self,
174
+ input: torch.Tensor,
175
+ target: torch.Tensor,
176
+ focus: torch.Tensor,
177
+ device: str,
178
+ ) -> torch.Tensor:
179
+ """MSGE (Gradient of MSE) loss
180
+
181
+ Args:
182
+ input (torch.Tensor): Input with shape (B, C, H, W)
183
+ target (torch.Tensor): Target with shape (B, C, H, W)
184
+ focus (torch.Tensor): Focus, type of masking (B, C, W, W)
185
+ device (str): CUDA device to work with.
186
+
187
+ Returns:
188
+ torch.Tensor: MSGE loss
189
+ """
190
+ input = input.permute(0, 2, 3, 1)
191
+ target = target.permute(0, 2, 3, 1)
192
+ focus = focus.permute(0, 2, 3, 1)
193
+ focus = focus[..., 1]
194
+
195
+ focus = (focus[..., None]).float() # assume input NHW
196
+ focus = torch.cat([focus, focus], axis=-1).to(device)
197
+ true_grad = self.get_gradient_hv(target, device)
198
+ pred_grad = self.get_gradient_hv(input, device)
199
+ loss = pred_grad - true_grad
200
+ loss = focus * (loss * loss)
201
+ # artificial reduce_mean with focused region
202
+ loss = loss.sum() / (focus.sum() + 1.0e-8)
203
+ return loss
204
+
205
+
206
+ class FocalTverskyLoss(nn.Module):
207
+ """FocalTverskyLoss
208
+
209
+ PyTorch implementation of the Focal Tversky Loss Function for multiple classes
210
+ doi: 10.1109/ISBI.2019.8759329
211
+ Abraham, N., & Khan, N. M. (2019).
212
+ A Novel Focal Tversky Loss Function With Improved Attention U-Net for Lesion Segmentation.
213
+ In International Symposium on Biomedical Imaging. https://doi.org/10.1109/isbi.2019.8759329
214
+
215
+ @ Fabian Hörst, [email protected]
216
+ Institute for Artifical Intelligence in Medicine,
217
+ University Medicine Essen
218
+
219
+ Args:
220
+ alpha_t (float, optional): Alpha parameter for tversky loss (multiplied with false-negatives). Defaults to 0.7.
221
+ beta_t (float, optional): Beta parameter for tversky loss (multiplied with false-positives). Defaults to 0.3.
222
+ gamma_f (float, optional): Gamma Focal parameter. Defaults to 4/3.
223
+ smooth (float, optional): Smooting factor. Defaults to 0.000001.
224
+ """
225
+
226
+ def __init__(
227
+ self,
228
+ alpha_t: float = 0.7,
229
+ beta_t: float = 0.3,
230
+ gamma_f: float = 4 / 3,
231
+ smooth: float = 1e-6,
232
+ ) -> None:
233
+ super().__init__()
234
+ self.alpha_t = alpha_t
235
+ self.beta_t = beta_t
236
+ self.gamma_f = gamma_f
237
+ self.smooth = smooth
238
+ self.num_classes = 2
239
+
240
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
241
+ """Loss calculation
242
+
243
+ Args:
244
+ input (torch.Tensor): Predictions, logits (without Softmax). Shape: (B, C, H, W)
245
+ target (torch.Tensor): Targets, either flattened (Shape: (C, H, W) or as one-hot encoded (Shape: (batch-size, C, H, W)).
246
+
247
+ Raises:
248
+ ValueError: Error if there is a shape missmatch
249
+
250
+ Returns:
251
+ torch.Tensor: FocalTverskyLoss (weighted)
252
+ """
253
+ input = input.permute(0, 2, 3, 1)
254
+ if input.shape[-1] != self.num_classes:
255
+ raise ValueError(
256
+ "Predictions must be a logit tensor with the last dimension shape beeing equal to the number of classes"
257
+ )
258
+ if len(target.shape) != len(input.shape):
259
+ # convert the targets to onehot
260
+ target = F.one_hot(target, num_classes=self.num_classes)
261
+
262
+ # flatten
263
+ target = target.permute(0, 2, 3, 1)
264
+ target = target.view(-1)
265
+ input = torch.softmax(input, dim=-1).view(-1)
266
+
267
+ # calculate true positives, false positives and false negatives
268
+ tp = (input * target).sum()
269
+ fp = ((1 - target) * input).sum()
270
+ fn = (target * (1 - input)).sum()
271
+
272
+ Tversky = (tp + self.smooth) / (
273
+ tp + self.alpha_t * fn + self.beta_t * fp + self.smooth
274
+ )
275
+ FocalTversky = (1 - Tversky) ** self.gamma_f
276
+
277
+ return FocalTversky
278
+
279
+
280
+ class MCFocalTverskyLoss(FocalTverskyLoss):
281
+ """Multiclass FocalTverskyLoss
282
+
283
+ PyTorch implementation of the Focal Tversky Loss Function for multiple classes
284
+ doi: 10.1109/ISBI.2019.8759329
285
+ Abraham, N., & Khan, N. M. (2019).
286
+ A Novel Focal Tversky Loss Function With Improved Attention U-Net for Lesion Segmentation.
287
+ In International Symposium on Biomedical Imaging. https://doi.org/10.1109/isbi.2019.8759329
288
+
289
+ @ Fabian Hörst, [email protected]
290
+ Institute for Artifical Intelligence in Medicine,
291
+ University Medicine Essen
292
+
293
+ Args:
294
+ alpha_t (float, optional): Alpha parameter for tversky loss (multiplied with false-negatives). Defaults to 0.7.
295
+ beta_t (float, optional): Beta parameter for tversky loss (multiplied with false-positives). Defaults to 0.3.
296
+ gamma_f (float, optional): Gamma Focal parameter. Defaults to 4/3.
297
+ smooth (float, optional): Smooting factor. Defaults to 0.000001.
298
+ num_classes (int, optional): Number of output classes. For binary segmentation, prefer FocalTverskyLoss (speed optimized). Defaults to 2.
299
+ class_weights (List[int], optional): Weights for each class. If not provided, equal weight. Length must be equal to num_classes. Defaults to None.
300
+ """
301
+
302
+ def __init__(
303
+ self,
304
+ alpha_t: float = 0.7,
305
+ beta_t: float = 0.3,
306
+ gamma_f: float = 4 / 3,
307
+ smooth: float = 0.000001,
308
+ num_classes: int = 2,
309
+ class_weights: List[int] = None,
310
+ ) -> None:
311
+ super().__init__(alpha_t, beta_t, gamma_f, smooth)
312
+ self.num_classes = num_classes
313
+ if class_weights is None:
314
+ self.class_weights = [1 for i in range(self.num_classes)]
315
+ else:
316
+ assert (
317
+ len(class_weights) == self.num_classes
318
+ ), "Please provide matching weights"
319
+ self.class_weights = class_weights
320
+ self.class_weights = torch.Tensor(self.class_weights)
321
+
322
+ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
323
+ """Loss calculation
324
+
325
+ Args:
326
+ input (torch.Tensor): Predictions, logits (without Softmax). Shape: (B, num_classes, H, W)
327
+ target (torch.Tensor): Targets, either flattened (Shape: (B, H, W) or as one-hot encoded (Shape: (B, num_classes, H, W)).
328
+
329
+ Raises:
330
+ ValueError: Error if there is a shape missmatch
331
+
332
+ Returns:
333
+ torch.Tensor: FocalTverskyLoss (weighted)
334
+ """
335
+ input = input.permute(0, 2, 3, 1)
336
+ if input.shape[-1] != self.num_classes:
337
+ raise ValueError(
338
+ "Predictions must be a logit tensor with the last dimension shape beeing equal to the number of classes"
339
+ )
340
+ if len(target.shape) != len(input.shape):
341
+ # convert the targets to onehot
342
+ target = F.one_hot(target, num_classes=self.num_classes)
343
+
344
+ target = target.permute(0, 2, 3, 1)
345
+ # Softmax
346
+ input = torch.softmax(input, dim=-1)
347
+
348
+ # Reshape
349
+ input = torch.permute(input, (3, 1, 2, 0))
350
+ target = torch.permute(target, (3, 1, 2, 0))
351
+
352
+ input = torch.flatten(input, start_dim=1)
353
+ target = torch.flatten(target, start_dim=1)
354
+
355
+ tp = torch.sum(input * target, 1)
356
+ fp = torch.sum((1 - target) * input, 1)
357
+ fn = torch.sum(target * (1 - input), 1)
358
+
359
+ Tversky = (tp + self.smooth) / (
360
+ tp + self.alpha_t * fn + self.beta_t * fp + self.smooth
361
+ )
362
+ FocalTversky = (1 - Tversky) ** self.gamma_f
363
+
364
+ self.class_weights = self.class_weights.to(FocalTversky.device)
365
+ return torch.sum(self.class_weights * FocalTversky)
366
+
367
+
368
+ class WeightedBaseLoss(nn.Module):
369
+ """Init a base class for weighted cross entropy based losses.
370
+
371
+ Enables weighting for object instance edges and classes.
372
+
373
+ Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617)
374
+
375
+ Args:
376
+ apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the
377
+ loss matrix. Defaults to False.
378
+ apply_ls (bool, optional): If True, Label smoothing will be applied to the target.. Defaults to False.
379
+ apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False.
380
+ apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False.
381
+ class_weights (torch.Tensor, optional): Class weights. A tensor of shape (C, ). Defaults to None.
382
+ edge_weight (float, optional): Weight for the object instance border pixels. Defaults to None.
383
+ """
384
+
385
+ def __init__(
386
+ self,
387
+ apply_sd: bool = False,
388
+ apply_ls: bool = False,
389
+ apply_svls: bool = False,
390
+ apply_mask: bool = False,
391
+ class_weights: torch.Tensor = None,
392
+ edge_weight: float = None,
393
+ **kwargs,
394
+ ) -> None:
395
+ super().__init__()
396
+ self.apply_sd = apply_sd
397
+ self.apply_ls = apply_ls
398
+ self.apply_svls = apply_svls
399
+ self.apply_mask = apply_mask
400
+ self.class_weights = class_weights
401
+ self.edge_weight = edge_weight
402
+
403
+ def apply_spectral_decouple(
404
+ self, loss_matrix: torch.Tensor, yhat: torch.Tensor, lam: float = 0.01
405
+ ) -> torch.Tensor:
406
+ """Apply spectral decoupling L2 norm after the loss.
407
+
408
+ https://arxiv.org/abs/2011.09468
409
+
410
+ Args:
411
+ loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W).
412
+ yhat (torch.Tensor): The pixel predictions of the model. Shape (B, C, H, W).
413
+ lam (float, optional): Lambda constant.. Defaults to 0.01.
414
+
415
+ Returns:
416
+ torch.Tensor: SD-regularized loss matrix. Same shape as input.
417
+ """
418
+ return loss_matrix + (lam / 2) * (yhat**2).mean(axis=1)
419
+
420
+ def apply_ls_to_target(
421
+ self,
422
+ target: torch.Tensor,
423
+ num_classes: int,
424
+ label_smoothing: float = 0.1,
425
+ ) -> torch.Tensor:
426
+ """_summary_
427
+
428
+ Args:
429
+ target (torch.Tensor): Number of classes in the data.
430
+ num_classes (int): The target one hot tensor. Shape (B, C, H, W)
431
+ label_smoothing (float, optional): The smoothing coeff alpha. Defaults to 0.1.
432
+
433
+ Returns:
434
+ torch.Tensor: Label smoothed target. Same shape as input.
435
+ """
436
+ return target * (1 - label_smoothing) + label_smoothing / num_classes
437
+
438
+ def apply_svls_to_target(
439
+ self,
440
+ target: torch.Tensor,
441
+ num_classes: int,
442
+ kernel_size: int = 5,
443
+ sigma: int = 3,
444
+ **kwargs,
445
+ ) -> torch.Tensor:
446
+ """Apply spatially varying label smoothihng to target map.
447
+
448
+ https://arxiv.org/abs/2104.05788
449
+
450
+ Args:
451
+ target (torch.Tensor): The target one hot tensor. Shape (B, C, H, W).
452
+ num_classes (int): Number of classes in the data.
453
+ kernel_size (int, optional): Size of a square kernel.. Defaults to 5.
454
+ sigma (int, optional): The std of the gaussian. Defaults to 3.
455
+
456
+ Returns:
457
+ torch.Tensor: Label smoothed target. Same shape as input.
458
+ """
459
+ my, mx = kernel_size // 2, kernel_size // 2
460
+ gaussian_kernel = gaussian_kernel2d(
461
+ kernel_size, sigma, num_classes, device=target.device
462
+ )
463
+ neighborsum = (1 - gaussian_kernel[..., my, mx]) + 1e-16
464
+ gaussian_kernel = gaussian_kernel.clone()
465
+ gaussian_kernel[..., my, mx] = neighborsum
466
+ svls_kernel = gaussian_kernel / neighborsum[0]
467
+
468
+ return filter2D(target.float(), svls_kernel) / svls_kernel[0].sum()
469
+
470
+ def apply_class_weights(
471
+ self, loss_matrix: torch.Tensor, target: torch.Tensor
472
+ ) -> torch.Tensor:
473
+ """Multiply pixelwise loss matrix by the class weights.
474
+
475
+ NOTE: No normalization
476
+
477
+ Args:
478
+ loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W).
479
+ target (torch.Tensor): The target mask. Shape (B, H, W).
480
+
481
+ Returns:
482
+ torch.Tensor: The loss matrix scaled with the weight matrix. Shape (B, H, W).
483
+ """
484
+ weight_mat = self.class_weights[target.long()].to(target.device) # to (B, H, W)
485
+ loss = loss_matrix * weight_mat
486
+
487
+ return loss
488
+
489
+ def apply_edge_weights(
490
+ self, loss_matrix: torch.Tensor, weight_map: torch.Tensor
491
+ ) -> torch.Tensor:
492
+ """Apply weights to the object boundaries.
493
+
494
+ Basically just computes `edge_weight`**`weight_map`.
495
+
496
+ Args:
497
+ loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W).
498
+ weight_map (torch.Tensor): Map that points to the pixels that will be weighted. Shape (B, H, W).
499
+
500
+ Returns:
501
+ torch.Tensor: The loss matrix scaled with the nuclear boundary weights. Shape (B, H, W).
502
+ """
503
+ return loss_matrix * self.edge_weight**weight_map
504
+
505
+ def apply_mask_weight(
506
+ self, loss_matrix: torch.Tensor, mask: torch.Tensor, norm: bool = True
507
+ ) -> torch.Tensor:
508
+ """Apply a mask to the loss matrix.
509
+
510
+ Args:
511
+ loss_matrix (torch.Tensor): Pixelwise losses. A tensor of shape (B, H, W).
512
+ mask (torch.Tensor): The mask. Shape (B, H, W).
513
+ norm (bool, optional): If True, the loss matrix will be normalized by the mean of the mask. Defaults to True.
514
+
515
+ Returns:
516
+ torch.Tensor: The loss matrix scaled with the mask. Shape (B, H, W).
517
+ """
518
+ loss_matrix *= mask
519
+ if norm:
520
+ norm_mask = torch.mean(mask.float()) + 1e-7
521
+ loss_matrix /= norm_mask
522
+
523
+ return loss_matrix
524
+
525
+ def extra_repr(self) -> str:
526
+ """Add info to print."""
527
+ s = "apply_sd={apply_sd}, apply_ls={apply_ls}, apply_svls={apply_svls}, apply_mask={apply_mask}, class_weights={class_weights}, edge_weight={edge_weight}" # noqa
528
+ return s.format(**self.__dict__)
529
+
530
+
531
+ class MAEWeighted(WeightedBaseLoss):
532
+ """Compute the MAE loss. Used in the stardist method.
533
+
534
+ Stardist:
535
+ https://arxiv.org/pdf/1806.03535.pdf
536
+ Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617)
537
+
538
+ NOTE: We have added the option to apply spectral decoupling and edge weights
539
+ to the loss matrix.
540
+
541
+ Args:
542
+ alpha (float, optional): Weight regulizer b/w [0,1]. In stardist repo, this is the
543
+ 'train_background_reg' parameter. Defaults to 1e-4.
544
+ apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the
545
+ loss matrix. Defaults to False.
546
+ apply_mask (bool, optional): f True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False.
547
+ edge_weight (float, optional): Weight that is added to object borders. Defaults to None.
548
+ """
549
+
550
+ def __init__(
551
+ self,
552
+ alpha: float = 1e-4,
553
+ apply_sd: bool = False,
554
+ apply_mask: bool = False,
555
+ edge_weight: float = None,
556
+ **kwargs,
557
+ ) -> None:
558
+ super().__init__(apply_sd, False, False, apply_mask, False, edge_weight)
559
+ self.alpha = alpha
560
+ self.eps = 1e-7
561
+
562
+ def forward(
563
+ self,
564
+ input: torch.Tensor,
565
+ target: torch.Tensor,
566
+ target_weight: torch.Tensor = None,
567
+ mask: torch.Tensor = None,
568
+ **kwargs,
569
+ ) -> torch.Tensor:
570
+ """Compute the masked MAE loss.
571
+
572
+ Args:
573
+ input (torch.Tensor): The prediction map. Shape (B, C, H, W).
574
+ target (torch.Tensor): The ground truth annotations. Shape (B, H, W).
575
+ target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None.
576
+ mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None.
577
+
578
+ Raises:
579
+ ValueError: Pred and target shapes must match.
580
+
581
+ Returns:
582
+ torch.Tensor: Computed MAE loss (scalar).
583
+ """
584
+ yhat = input
585
+ n_classes = yhat.shape[1]
586
+ if target.size() != yhat.size():
587
+ target = target.unsqueeze(1).repeat_interleave(n_classes, dim=1)
588
+
589
+ if not yhat.shape == target.shape:
590
+ raise ValueError(
591
+ f"Pred and target shapes must match. Got: {yhat.shape}, {target.shape}"
592
+ )
593
+
594
+ # compute the MAE loss with alpha as weight
595
+ mae_loss = torch.mean(torch.abs(target - yhat), axis=1) # (B, H, W)
596
+
597
+ if self.apply_mask and mask is not None:
598
+ mae_loss = self.apply_mask_weight(mae_loss, mask, norm=True) # (B, H, W)
599
+
600
+ # add the background regularization
601
+ if self.alpha > 0:
602
+ reg = torch.mean(((1 - mask).unsqueeze(1)) * torch.abs(yhat), axis=1)
603
+ mae_loss += self.alpha * reg
604
+
605
+ if self.apply_sd:
606
+ mae_loss = self.apply_spectral_decouple(mae_loss, yhat)
607
+
608
+ if self.edge_weight is not None:
609
+ mae_loss = self.apply_edge_weights(mae_loss, target_weight)
610
+
611
+ return mae_loss.mean()
612
+
613
+
614
+ class MSEWeighted(WeightedBaseLoss):
615
+ """MSE-loss.
616
+
617
+ Args:
618
+ apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the
619
+ loss matrix. Defaults to False.
620
+ apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False.
621
+ apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False.
622
+ apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False.
623
+ edge_weight (float, optional): Weight that is added to object borders. Defaults to None.
624
+ class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None.
625
+ """
626
+
627
+ def __init__(
628
+ self,
629
+ apply_sd: bool = False,
630
+ apply_ls: bool = False,
631
+ apply_svls: bool = False,
632
+ apply_mask: bool = False,
633
+ edge_weight: float = None,
634
+ class_weights: torch.Tensor = None,
635
+ **kwargs,
636
+ ) -> None:
637
+ super().__init__(
638
+ apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight
639
+ )
640
+
641
+ @staticmethod
642
+ def tensor_one_hot(type_map: torch.Tensor, n_classes: int) -> torch.Tensor:
643
+ """Convert a segmentation mask into one-hot-format.
644
+
645
+ I.e. Takes in a segmentation mask of shape (B, H, W) and reshapes it
646
+ into a tensor of shape (B, C, H, W).
647
+
648
+ Args:
649
+ type_map (torch.Tensor): Multi-label Segmentation mask. Shape (B, H, W).
650
+ n_classes (int): Number of classes. (Zero-class included.)
651
+
652
+ Raises:
653
+ TypeError: Input `type_map` should have dtype: torch.int64.
654
+
655
+ Returns:
656
+ torch.Tensor: A one hot tensor. Shape: (B, C, H, W). Dtype: torch.FloatTensor.
657
+ """
658
+ if not type_map.dtype == torch.int64:
659
+ raise TypeError(
660
+ f"""
661
+ Input `type_map` should have dtype: torch.int64. Got: {type_map.dtype}."""
662
+ )
663
+
664
+ one_hot = torch.zeros(
665
+ type_map.shape[0],
666
+ n_classes,
667
+ *type_map.shape[1:],
668
+ device=type_map.device,
669
+ dtype=type_map.dtype,
670
+ )
671
+
672
+ return one_hot.scatter_(dim=1, index=type_map.unsqueeze(1), value=1.0) + 1e-7
673
+
674
+ def forward(
675
+ self,
676
+ input: torch.Tensor,
677
+ target: torch.Tensor,
678
+ target_weight: torch.Tensor = None,
679
+ mask: torch.Tensor = None,
680
+ **kwargs,
681
+ ) -> torch.Tensor:
682
+ """Compute the MSE-loss.
683
+
684
+ Args:
685
+ input (torch.Tensor): The prediction map. Shape (B, C, H, W, C).
686
+ target (torch.Tensor): The ground truth annotations. Shape (B, H, W).
687
+ target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None.
688
+ mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None.
689
+
690
+ Returns:
691
+ torch.Tensor: Computed MSE loss (scalar).
692
+ """
693
+ yhat = input
694
+ target_one_hot = target
695
+ num_classes = yhat.shape[1]
696
+
697
+ if target.size() != yhat.size():
698
+ if target.dtype == torch.float32:
699
+ target_one_hot = target.unsqueeze(1)
700
+ else:
701
+ target_one_hot = MSEWeighted.tensor_one_hot(target, num_classes)
702
+
703
+ if self.apply_svls:
704
+ target_one_hot = self.apply_svls_to_target(
705
+ target_one_hot, num_classes, **kwargs
706
+ )
707
+
708
+ if self.apply_ls:
709
+ target_one_hot = self.apply_ls_to_target(
710
+ target_one_hot, num_classes, **kwargs
711
+ )
712
+
713
+ mse = F.mse_loss(yhat, target_one_hot, reduction="none") # (B, C, H, W)
714
+ mse = torch.mean(mse, dim=1) # to (B, H, W)
715
+
716
+ if self.apply_mask and mask is not None:
717
+ mse = self.apply_mask_weight(mse, mask, norm=False) # (B, H, W)
718
+
719
+ if self.apply_sd:
720
+ mse = self.apply_spectral_decouple(mse, yhat)
721
+
722
+ if self.class_weights is not None:
723
+ mse = self.apply_class_weights(mse, target)
724
+
725
+ if self.edge_weight is not None:
726
+ mse = self.apply_edge_weights(mse, target_weight)
727
+
728
+ return torch.mean(mse)
729
+
730
+
731
+ class BCEWeighted(WeightedBaseLoss):
732
+ def __init__(
733
+ self,
734
+ apply_sd: bool = False,
735
+ apply_ls: bool = False,
736
+ apply_svls: bool = False,
737
+ apply_mask: bool = False,
738
+ edge_weight: float = None,
739
+ class_weights: torch.Tensor = None,
740
+ **kwargs,
741
+ ) -> None:
742
+ """Binary cross entropy loss with weighting and other tricks.
743
+
744
+ Parameters
745
+ ----------
746
+ apply_sd : bool, default=False
747
+ If True, Spectral decoupling regularization will be applied to the
748
+ loss matrix.
749
+ apply_ls : bool, default=False
750
+ If True, Label smoothing will be applied to the target.
751
+ apply_svls : bool, default=False
752
+ If True, spatially varying label smoothing will be applied to the target
753
+ apply_mask : bool, default=False
754
+ If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W)
755
+ edge_weight : float, default=None
756
+ Weight that is added to object borders.
757
+ class_weights : torch.Tensor, default=None
758
+ Class weights. A tensor of shape (n_classes,).
759
+ """
760
+ super().__init__(
761
+ apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight
762
+ )
763
+ self.eps = 1e-8
764
+
765
+ def forward(
766
+ self,
767
+ input: torch.Tensor,
768
+ target: torch.Tensor,
769
+ target_weight: torch.Tensor = None,
770
+ mask: torch.Tensor = None,
771
+ **kwargs,
772
+ ) -> torch.Tensor:
773
+ """Compute binary cross entropy loss.
774
+
775
+ Parameters
776
+ ----------
777
+ yhat : torch.Tensor
778
+ The prediction map. Shape (B, C, H, W).
779
+ target : torch.Tensor
780
+ the ground truth annotations. Shape (B, H, W).
781
+ target_weight : torch.Tensor, default=None
782
+ The edge weight map. Shape (B, H, W).
783
+ mask : torch.Tensor, default=None
784
+ The mask map. Shape (B, H, W).
785
+
786
+ Returns
787
+ -------
788
+ torch.Tensor:
789
+ Computed BCE loss (scalar).
790
+ """
791
+ # Logits input
792
+ yhat = input
793
+ num_classes = yhat.shape[1]
794
+ yhat = torch.clip(yhat, self.eps, 1.0 - self.eps)
795
+
796
+ if target.size() != yhat.size():
797
+ target = target.unsqueeze(1).repeat_interleave(num_classes, dim=1)
798
+
799
+ if self.apply_svls:
800
+ target = self.apply_svls_to_target(target, num_classes, **kwargs)
801
+
802
+ if self.apply_ls:
803
+ target = self.apply_ls_to_target(target, num_classes, **kwargs)
804
+
805
+ bce = F.binary_cross_entropy_with_logits(
806
+ yhat.float(), target.float(), reduction="none"
807
+ ) # (B, C, H, W)
808
+ bce = torch.mean(bce, dim=1) # (B, H, W)
809
+
810
+ if self.apply_mask and mask is not None:
811
+ bce = self.apply_mask_weight(bce, mask, norm=False) # (B, H, W)
812
+
813
+ if self.apply_sd:
814
+ bce = self.apply_spectral_decouple(bce, yhat)
815
+
816
+ if self.class_weights is not None:
817
+ bce = self.apply_class_weights(bce, target)
818
+
819
+ if self.edge_weight is not None:
820
+ bce = self.apply_edge_weights(bce, target_weight)
821
+
822
+ return torch.mean(bce)
823
+
824
+
825
+ # class BCEWeighted(WeightedBaseLoss):
826
+ # """Binary cross entropy loss with weighting and other tricks.
827
+ # Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617)
828
+
829
+ # Args:
830
+ # apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the
831
+ # loss matrix. Defaults to False.
832
+ # apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False.
833
+ # apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False.
834
+ # apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False.
835
+ # edge_weight (float, optional): Weight that is added to object borders. Defaults to None.
836
+ # class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None.
837
+ # """
838
+
839
+ # def __init__(
840
+ # self,
841
+ # apply_sd: bool = False,
842
+ # apply_ls: bool = False,
843
+ # apply_svls: bool = False,
844
+ # apply_mask: bool = False,
845
+ # edge_weight: float = None,
846
+ # class_weights: torch.Tensor = None,
847
+ # **kwargs,
848
+ # ) -> None:
849
+ # super().__init__(
850
+ # apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight
851
+ # )
852
+ # self.eps = 1e-8
853
+
854
+ # def forward(
855
+ # self,
856
+ # input: torch.Tensor,
857
+ # target: torch.Tensor,
858
+ # target_weight: torch.Tensor = None,
859
+ # mask: torch.Tensor = None,
860
+ # **kwargs,
861
+ # ) -> torch.Tensor:
862
+ # """Compute binary cross entropy loss.
863
+
864
+ # Args:
865
+ # input (torch.Tensor): The prediction map. We internally convert back via logit function. Shape (B, C, H, W).
866
+ # target (torch.Tensor): the ground truth annotations. Shape (B, H, W).
867
+ # target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None.
868
+ # mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None.
869
+
870
+ # Returns:
871
+ # torch.Tensor: Computed BCE loss (scalar).
872
+ # """
873
+ # yhat = input
874
+ # yhat = torch.special.logit(yhat)
875
+ # num_classes = yhat.shape[1]
876
+ # yhat = torch.clip(yhat, self.eps, 1.0 - self.eps)
877
+
878
+ # if target.size() != yhat.size():
879
+ # target = target.unsqueeze(1).repeat_interleave(num_classes, dim=1)
880
+
881
+ # if self.apply_svls:
882
+ # target = self.apply_svls_to_target(target, num_classes, **kwargs)
883
+
884
+ # if self.apply_ls:
885
+ # target = self.apply_ls_to_target(target, num_classes, **kwargs)
886
+
887
+ # bce = F.binary_cross_entropy_with_logits(
888
+ # yhat.float(), target.float(), reduction="none"
889
+ # ) # (B, C, H, W)
890
+ # bce = torch.mean(bce, dim=1) # (B, H, W)
891
+
892
+ # if self.apply_mask and mask is not None:
893
+ # bce = self.apply_mask_weight(bce, mask, norm=False) # (B, H, W)
894
+
895
+ # if self.apply_sd:
896
+ # bce = self.apply_spectral_decouple(bce, yhat)
897
+
898
+ # if self.class_weights is not None:
899
+ # bce = self.apply_class_weights(bce, target)
900
+
901
+ # if self.edge_weight is not None:
902
+ # bce = self.apply_edge_weights(bce, target_weight)
903
+
904
+ # return torch.mean(bce)
905
+
906
+
907
+ class CEWeighted(WeightedBaseLoss):
908
+ def __init__(
909
+ self,
910
+ apply_sd: bool = False,
911
+ apply_ls: bool = False,
912
+ apply_svls: bool = False,
913
+ apply_mask: bool = False,
914
+ edge_weight: float = None,
915
+ class_weights: torch.Tensor = None,
916
+ **kwargs,
917
+ ) -> None:
918
+ """Cross-Entropy loss with weighting.
919
+
920
+ Parameters
921
+ ----------
922
+ apply_sd : bool, default=False
923
+ If True, Spectral decoupling regularization will be applied to the
924
+ loss matrix.
925
+ apply_ls : bool, default=False
926
+ If True, Label smoothing will be applied to the target.
927
+ apply_svls : bool, default=False
928
+ If True, spatially varying label smoothing will be applied to the target
929
+ apply_mask : bool, default=False
930
+ If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W)
931
+ edge_weight : float, default=None
932
+ Weight that is added to object borders.
933
+ class_weights : torch.Tensor, default=None
934
+ Class weights. A tensor of shape (n_classes,).
935
+ """
936
+ super().__init__(
937
+ apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight
938
+ )
939
+ self.eps = 1e-8
940
+
941
+ def forward(
942
+ self,
943
+ input: torch.Tensor,
944
+ target: torch.Tensor,
945
+ target_weight: torch.Tensor = None,
946
+ mask: torch.Tensor = None,
947
+ **kwargs,
948
+ ) -> torch.Tensor:
949
+ """Compute the cross entropy loss.
950
+
951
+ Parameters
952
+ ----------
953
+ yhat : torch.Tensor
954
+ The prediction map. Shape (B, C, H, W).
955
+ target : torch.Tensor
956
+ the ground truth annotations. Shape (B, H, W).
957
+ target_weight : torch.Tensor, default=None
958
+ The edge weight map. Shape (B, H, W).
959
+ mask : torch.Tensor, default=None
960
+ The mask map. Shape (B, H, W).
961
+
962
+ Returns
963
+ -------
964
+ torch.Tensor:
965
+ Computed CE loss (scalar).
966
+ """
967
+ yhat = input # TODO: remove doubled Softmax -> this function needs logits instead of softmax output
968
+ input_soft = F.softmax(yhat, dim=1) + self.eps # (B, C, H, W)
969
+ num_classes = yhat.shape[1]
970
+ if len(target.shape) != len(yhat.shape) and target.shape[1] != num_classes:
971
+ target_one_hot = MSEWeighted.tensor_one_hot(
972
+ target, num_classes
973
+ ) # (B, C, H, W)
974
+ else:
975
+ target_one_hot = target
976
+ target = torch.argmax(target, dim=1)
977
+ assert target_one_hot.shape == yhat.shape
978
+
979
+ if self.apply_svls:
980
+ target_one_hot = self.apply_svls_to_target(
981
+ target_one_hot, num_classes, **kwargs
982
+ )
983
+
984
+ if self.apply_ls:
985
+ target_one_hot = self.apply_ls_to_target(
986
+ target_one_hot, num_classes, **kwargs
987
+ )
988
+
989
+ loss = -torch.sum(target_one_hot * torch.log(input_soft), dim=1) # (B, H, W)
990
+
991
+ if self.apply_mask and mask is not None:
992
+ loss = self.apply_mask_weight(loss, mask, norm=False) # (B, H, W)
993
+
994
+ if self.apply_sd:
995
+ loss = self.apply_spectral_decouple(loss, yhat)
996
+
997
+ if self.class_weights is not None:
998
+ loss = self.apply_class_weights(loss, target)
999
+
1000
+ if self.edge_weight is not None:
1001
+ loss = self.apply_edge_weights(loss, target_weight)
1002
+
1003
+ return loss.mean()
1004
+
1005
+
1006
+ # class CEWeighted(WeightedBaseLoss):
1007
+ # """Cross-Entropy loss with weighting.
1008
+ # Adapted/Copied from: https://github.com/okunator/cellseg_models.pytorch (10.5281/zenodo.7064617)
1009
+
1010
+ # Args:
1011
+ # apply_sd (bool, optional): If True, Spectral decoupling regularization will be applied to the loss matrix. Defaults to False.
1012
+ # apply_ls (bool, optional): If True, Label smoothing will be applied to the target. Defaults to False.
1013
+ # apply_svls (bool, optional): If True, spatially varying label smoothing will be applied to the target. Defaults to False.
1014
+ # apply_mask (bool, optional): If True, a mask will be applied to the loss matrix. Mask shape: (B, H, W). Defaults to False.
1015
+ # edge_weight (float, optional): Weight that is added to object borders. Defaults to None.
1016
+ # class_weights (torch.Tensor, optional): Class weights. A tensor of shape (n_classes,). Defaults to None.
1017
+ # logits (bool, optional): If work on logit values. Defaults to False. Defaults to False.
1018
+ # """
1019
+
1020
+ # def __init__(
1021
+ # self,
1022
+ # apply_sd: bool = False,
1023
+ # apply_ls: bool = False,
1024
+ # apply_svls: bool = False,
1025
+ # apply_mask: bool = False,
1026
+ # edge_weight: float = None,
1027
+ # class_weights: torch.Tensor = None,
1028
+ # logits: bool = False,
1029
+ # **kwargs,
1030
+ # ) -> None:
1031
+ # super().__init__(
1032
+ # apply_sd, apply_ls, apply_svls, apply_mask, class_weights, edge_weight
1033
+ # )
1034
+ # self.eps = 1e-8
1035
+ # self.logits = logits
1036
+
1037
+ # def forward(
1038
+ # self,
1039
+ # input: torch.Tensor,
1040
+ # target: torch.Tensor,
1041
+ # target_weight: torch.Tensor = None,
1042
+ # mask: torch.Tensor = None,
1043
+ # **kwargs,
1044
+ # ) -> torch.Tensor:
1045
+ # """Compute the cross entropy loss.
1046
+
1047
+ # Args:
1048
+ # input (torch.Tensor): The prediction map. Shape (B, C, H, W).
1049
+ # target (torch.Tensor): The ground truth annotations. Shape (B, H, W).
1050
+ # target_weight (torch.Tensor, optional): The edge weight map. Shape (B, H, W). Defaults to None.
1051
+ # mask (torch.Tensor, optional): The mask map. Shape (B, H, W). Defaults to None.
1052
+
1053
+ # Returns:
1054
+ # torch.Tensor: Computed CE loss (scalar).
1055
+ # """
1056
+ # yhat = input
1057
+ # if self.logits:
1058
+ # input_soft = (
1059
+ # F.softmax(yhat, dim=1) + self.eps
1060
+ # ) # (B, C, H, W) # check if doubled softmax
1061
+ # else:
1062
+ # input_soft = input
1063
+
1064
+ # num_classes = yhat.shape[1]
1065
+ # if len(target.shape) != len(yhat.shape) and target.shape[1] != num_classes:
1066
+ # target_one_hot = MSEWeighted.tensor_one_hot(
1067
+ # target, num_classes
1068
+ # ) # (B, C, H, W)
1069
+ # else:
1070
+ # target_one_hot = target
1071
+ # target = torch.argmax(target, dim=1)
1072
+ # assert target_one_hot.shape == yhat.shape
1073
+
1074
+ # if self.apply_svls:
1075
+ # target_one_hot = self.apply_svls_to_target(
1076
+ # target_one_hot, num_classes, **kwargs
1077
+ # )
1078
+
1079
+ # if self.apply_ls:
1080
+ # target_one_hot = self.apply_ls_to_target(
1081
+ # target_one_hot, num_classes, **kwargs
1082
+ # )
1083
+
1084
+ # loss = -torch.sum(target_one_hot * torch.log(input_soft), dim=1) # (B, H, W)
1085
+
1086
+ # if self.apply_mask and mask is not None:
1087
+ # loss = self.apply_mask_weight(loss, mask, norm=False) # (B, H, W)
1088
+
1089
+ # if self.apply_sd:
1090
+ # loss = self.apply_spectral_decouple(loss, yhat)
1091
+
1092
+ # if self.class_weights is not None:
1093
+ # loss = self.apply_class_weights(loss, target)
1094
+
1095
+ # if self.edge_weight is not None:
1096
+ # loss = self.apply_edge_weights(loss, target_weight)
1097
+
1098
+ # return loss.mean()
1099
+
1100
+
1101
+ ### Stardist loss functions
1102
+ class L1LossWeighted(nn.Module):
1103
+ def __init__(self) -> None:
1104
+ super().__init__()
1105
+
1106
+ def forward(
1107
+ self,
1108
+ input: torch.Tensor,
1109
+ target: torch.Tensor,
1110
+ target_weight: torch.Tensor = None,
1111
+ ) -> torch.Tensor:
1112
+ l1loss = F.l1_loss(input, target, size_average=True, reduce=False)
1113
+ l1loss = torch.mean(l1loss, dim=1)
1114
+ if target_weight is not None:
1115
+ l1loss = torch.mean(target_weight * l1loss)
1116
+ else:
1117
+ l1loss = torch.mean(l1loss)
1118
+ return l1loss
1119
+
1120
+
1121
+ def retrieve_loss_fn(loss_name: dict, **kwargs) -> _Loss:
1122
+ """Return the loss function with given name defined in the LOSS_DICT and initialize with kwargs
1123
+
1124
+ kwargs must match with the parameters defined in the initialization method of the selected loss object
1125
+
1126
+ Args:
1127
+ loss_name (dict): Name of the loss function
1128
+
1129
+ Returns:
1130
+ _Loss: Loss
1131
+ """
1132
+ loss_fn = LOSS_DICT[loss_name]
1133
+ loss_fn = loss_fn(**kwargs)
1134
+
1135
+ return loss_fn
1136
+
1137
+
1138
+ LOSS_DICT = {
1139
+ "xentropy_loss": XentropyLoss,
1140
+ "dice_loss": DiceLoss,
1141
+ "mse_loss_maps": MSELossMaps,
1142
+ "msge_loss_maps": MSGELossMaps,
1143
+ "FocalTverskyLoss": FocalTverskyLoss,
1144
+ "MCFocalTverskyLoss": MCFocalTverskyLoss,
1145
+ "CrossEntropyLoss": nn.CrossEntropyLoss, # input logits, targets
1146
+ "L1Loss": nn.L1Loss,
1147
+ "MSELoss": nn.MSELoss,
1148
+ "CTCLoss": nn.CTCLoss, # probability
1149
+ "NLLLoss": nn.NLLLoss, # log-probabilities of each class
1150
+ "PoissonNLLLoss": nn.PoissonNLLLoss,
1151
+ "GaussianNLLLoss": nn.GaussianNLLLoss,
1152
+ "KLDivLoss": nn.KLDivLoss, # argument input in log-space
1153
+ "BCELoss": nn.BCELoss, # probabilities
1154
+ "BCEWithLogitsLoss": nn.BCEWithLogitsLoss, # logits
1155
+ "MarginRankingLoss": nn.MarginRankingLoss,
1156
+ "HingeEmbeddingLoss": nn.HingeEmbeddingLoss,
1157
+ "MultiLabelMarginLoss": nn.MultiLabelMarginLoss,
1158
+ "HuberLoss": nn.HuberLoss,
1159
+ "SmoothL1Loss": nn.SmoothL1Loss,
1160
+ "SoftMarginLoss": nn.SoftMarginLoss, # logits
1161
+ "MultiLabelSoftMarginLoss": nn.MultiLabelSoftMarginLoss,
1162
+ "CosineEmbeddingLoss": nn.CosineEmbeddingLoss,
1163
+ "MultiMarginLoss": nn.MultiMarginLoss,
1164
+ "TripletMarginLoss": nn.TripletMarginLoss,
1165
+ "TripletMarginWithDistanceLoss": nn.TripletMarginWithDistanceLoss,
1166
+ "MAEWeighted": MAEWeighted,
1167
+ "MSEWeighted": MSEWeighted,
1168
+ "BCEWeighted": BCEWeighted, # logits
1169
+ "CEWeighted": CEWeighted, # logits
1170
+ "L1LossWeighted": L1LossWeighted,
1171
+ }
base_ml/base_optim.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Wrappping all available PyTorch Optimizer
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ from torch.optim import (
9
+ ASGD,
10
+ LBFGS,
11
+ SGD,
12
+ Adadelta,
13
+ Adagrad,
14
+ Adam,
15
+ Adamax,
16
+ AdamW,
17
+ RAdam,
18
+ RMSprop,
19
+ Rprop,
20
+ SparseAdam,
21
+ )
22
+
23
+ OPTI_DICT = {
24
+ "Adadelta": Adadelta,
25
+ "Adagrad": Adagrad,
26
+ "Adam": Adam,
27
+ "AdamW": AdamW,
28
+ "SparseAdam": SparseAdam,
29
+ "Adamax": Adamax,
30
+ "ASGD": ASGD,
31
+ "LBFGS": LBFGS,
32
+ "RAdam": RAdam,
33
+ "RMSprop": RMSprop,
34
+ "Rprop": Rprop,
35
+ "SGD": SGD,
36
+ }
base_ml/base_trainer.py ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Base Trainer Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import logging
9
+ from abc import abstractmethod
10
+ from typing import Tuple, Union
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import wandb
15
+ from base_ml.base_early_stopping import EarlyStopping
16
+ from pathlib import Path
17
+ from torch.nn.modules.loss import _Loss
18
+ from torch.optim import Optimizer
19
+ from torch.optim.lr_scheduler import _LRScheduler
20
+ from torch.utils.data import DataLoader
21
+ from utils.tools import flatten_dict
22
+
23
+
24
+ class BaseTrainer:
25
+ """
26
+ Base class for all trainers with important ML components
27
+
28
+ Args:
29
+ model (nn.Module): Model that should be trained
30
+ loss_fn (_Loss): Loss function
31
+ optimizer (Optimizer): Optimizer
32
+ scheduler (_LRScheduler): Learning rate scheduler
33
+ device (str): Cuda device to use, e.g., cuda:0.
34
+ logger (logging.Logger): Logger module
35
+ logdir (Union[Path, str]): Logging directory
36
+ experiment_config (dict): Configuration of this experiment
37
+ early_stopping (EarlyStopping, optional): Early Stopping Class. Defaults to None.
38
+ accum_iter (int, optional): Accumulation steps for gradient accumulation.
39
+ Provide a number greater than 1 for activating gradient accumulation. Defaults to 1.
40
+ mixed_precision (bool, optional): If mixed-precision should be used. Defaults to False.
41
+ log_images (bool, optional): If images should be logged to WandB. Defaults to False.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ model: nn.Module,
47
+ loss_fn: _Loss,
48
+ optimizer: Optimizer,
49
+ scheduler: _LRScheduler,
50
+ device: str,
51
+ logger: logging.Logger,
52
+ logdir: Union[Path, str],
53
+ experiment_config: dict,
54
+ early_stopping: EarlyStopping = None,
55
+ accum_iter: int = 1,
56
+ mixed_precision: bool = False,
57
+ log_images: bool = False,
58
+ #model_ema: bool = True,
59
+ ) -> None:
60
+ self.model = model
61
+
62
+ self.loss_fn = loss_fn
63
+ self.optimizer = optimizer
64
+ self.scheduler = scheduler
65
+ self.device = device
66
+ self.logger = logger
67
+ self.logdir = Path(logdir)
68
+ self.early_stopping = early_stopping
69
+ self.accum_iter = accum_iter
70
+ self.start_epoch = 0
71
+ self.experiment_config = experiment_config
72
+ self.log_images = log_images
73
+ self.mixed_precision = mixed_precision
74
+ if self.mixed_precision:
75
+ self.scaler = torch.cuda.amp.GradScaler(enabled=True)
76
+ else:
77
+ self.scaler = None
78
+
79
+ @abstractmethod
80
+ def train_epoch(
81
+ self, epoch: int, train_loader: DataLoader, **kwargs
82
+ ) -> Tuple[dict, dict]:
83
+ """Training logic for a training epoch
84
+
85
+ Args:
86
+ epoch (int): Current epoch number
87
+ train_loader (DataLoader): Train dataloader
88
+
89
+ Raises:
90
+ NotImplementedError: Needs to be implemented
91
+
92
+ Returns:
93
+ Tuple[dict, dict]: wandb logging dictionaries
94
+ * Scalar metrics
95
+ * Image metrics
96
+ """
97
+ raise NotImplementedError
98
+
99
+ @abstractmethod
100
+ def validation_epoch(
101
+ self, epoch: int, val_dataloader: DataLoader
102
+ ) -> Tuple[dict, dict, float]:
103
+ """Training logic for an validation epoch
104
+
105
+ Args:
106
+ epoch (int): Current epoch number
107
+ val_dataloader (DataLoader): Validation dataloader
108
+
109
+ Raises:
110
+ NotImplementedError: Needs to be implemented
111
+
112
+ Returns:
113
+ Tuple[dict, dict, float]: wandb logging dictionaries and early_stopping_metric
114
+ * Scalar metrics
115
+ * Image metrics
116
+ * Early Stopping metric as float
117
+ """
118
+ raise NotImplementedError
119
+
120
+ @abstractmethod
121
+ def train_step(self, batch: object, batch_idx: int, num_batches: int):
122
+ """Training logic for one training batch
123
+
124
+ Args:
125
+ batch (object): A training batch
126
+ batch_idx (int): Current batch index
127
+ num_batches (int): Maximum number of batches
128
+
129
+ Raises:
130
+ NotImplementedError: Needs to be implemented
131
+ """
132
+
133
+ raise NotImplementedError
134
+
135
+ @abstractmethod
136
+ def validation_step(self, batch, batch_idx: int):
137
+ """Training logic for one validation batch
138
+
139
+ Args:
140
+ batch (object): A training batch
141
+ batch_idx (int): Current batch index
142
+
143
+ Raises:
144
+ NotImplementedError: Needs to be implemented
145
+ """
146
+
147
+ def fit(
148
+ self,
149
+ epochs: int,
150
+ train_dataloader: DataLoader,
151
+ val_dataloader: DataLoader,
152
+ metric_init: dict = None,
153
+ eval_every: int = 1,
154
+ **kwargs,
155
+ ):
156
+ """Fitting function to start training and validation of the trainer
157
+
158
+ Args:
159
+ epochs (int): Number of epochs the network should be training
160
+ train_dataloader (DataLoader): Dataloader with training data
161
+ val_dataloader (DataLoader): Dataloader with validation data
162
+ metric_init (dict, optional): Initialization dictionary with scalar metrics that should be initialized for startup.
163
+ This is just import for logging with wandb if you want to have the plots properly scaled.
164
+ The data in the the metric dictionary is used as values for epoch 0 (before training has startetd).
165
+ If not provided, step 0 (epoch 0) is not logged. Should have the same scalar keys as training and validation epochs report.
166
+ For more information, you should have a look into the train_epoch and val_epoch methods where the wandb logging dicts are assembled.
167
+ Defaults to None.
168
+ eval_every (int, optional): How often the network should be evaluated (after how many epochs). Defaults to 1.
169
+ **kwargs
170
+ """
171
+
172
+ self.logger.info(f"Starting training, total number of epochs: {epochs}")
173
+ if metric_init is not None and self.start_epoch == 0:
174
+ wandb.log(metric_init, step=0)
175
+ for epoch in range(self.start_epoch, epochs):
176
+ # training epoch
177
+ #train_sampler.set_epoch(epoch) # for distributed training
178
+ self.logger.info(f"Epoch: {epoch+1}/{epochs}")
179
+ train_scalar_metrics, train_image_metrics = self.train_epoch(
180
+ epoch, train_dataloader, **kwargs
181
+ )
182
+ wandb.log(train_scalar_metrics, step=epoch + 1)
183
+ if self.log_images:
184
+ wandb.log(train_image_metrics, step=epoch + 1)
185
+ if epoch >=95 and ((epoch + 1)) % eval_every == 0:
186
+ # validation epoch
187
+ (
188
+ val_scalar_metrics,
189
+ val_image_metrics,
190
+ early_stopping_metric,
191
+ ) = self.validation_epoch(epoch, val_dataloader)
192
+ wandb.log(val_scalar_metrics, step=epoch + 1)
193
+ if self.log_images:
194
+ wandb.log(val_image_metrics, step=epoch + 1)
195
+
196
+ #self.save_checkpoint(epoch, f"checkpoint_{epoch}.pth")
197
+
198
+ # log learning rate
199
+ curr_lr = self.optimizer.param_groups[0]["lr"]
200
+ wandb.log(
201
+ {
202
+ "Learning-Rate/Learning-Rate": curr_lr,
203
+ },
204
+ step=epoch + 1,
205
+ )
206
+ if epoch >=95 and ((epoch + 1)) % eval_every == 0:
207
+ # early stopping
208
+ if self.early_stopping is not None:
209
+ best_model = self.early_stopping(early_stopping_metric, epoch)
210
+ if best_model:
211
+ self.logger.info("New best model - save checkpoint")
212
+ self.save_checkpoint(epoch, "model_best.pth")
213
+ elif self.early_stopping.early_stop:
214
+ self.logger.info("Performing early stopping!")
215
+ break
216
+ self.save_checkpoint(epoch, "latest_checkpoint.pth")
217
+
218
+ # scheduling
219
+ if type(self.scheduler) == torch.optim.lr_scheduler.ReduceLROnPlateau:
220
+ self.scheduler.step(float(val_scalar_metrics["Loss/Validation"]))
221
+ else:
222
+ self.scheduler.step()
223
+ new_lr = self.optimizer.param_groups[0]["lr"]
224
+ self.logger.debug(f"Old lr: {curr_lr:.6f} - New lr: {new_lr:.6f}")
225
+
226
+ def save_checkpoint(self, epoch: int, checkpoint_name: str):
227
+ if self.early_stopping is None:
228
+ best_metric = None
229
+ best_epoch = None
230
+ else:
231
+ best_metric = self.early_stopping.best_metric
232
+ best_epoch = self.early_stopping.best_epoch
233
+
234
+ arch = type(self.model).__name__
235
+ state = {
236
+ "arch": arch,
237
+ "epoch": epoch,
238
+ "model_state_dict": self.model.state_dict(),
239
+ "optimizer_state_dict": self.optimizer.state_dict(),
240
+ "scheduler_state_dict": self.scheduler.state_dict(),
241
+ "best_metric": best_metric,
242
+ "best_epoch": best_epoch,
243
+ "config": flatten_dict(wandb.config),
244
+ "wandb_id": wandb.run.id,
245
+ "logdir": str(self.logdir.resolve()),
246
+ "run_name": str(Path(self.logdir).name),
247
+ "scaler_state_dict": self.scaler.state_dict()
248
+ if self.scaler is not None
249
+ else None,
250
+ }
251
+
252
+ checkpoint_dir = self.logdir / "checkpoints"
253
+ checkpoint_dir.mkdir(exist_ok=True, parents=True)
254
+
255
+ filename = str(checkpoint_dir / checkpoint_name)
256
+ torch.save(state, filename)
257
+
258
+ def resume_checkpoint(self, checkpoint):
259
+ self.logger.info("Loading checkpoint")
260
+ self.logger.info("Loading Model")
261
+ self.model.load_state_dict(checkpoint["model_state_dict"])
262
+ self.logger.info("Loading Optimizer state dict")
263
+ self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
264
+ self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
265
+
266
+ if self.early_stopping is not None:
267
+ self.early_stopping.best_metric = checkpoint["best_metric"]
268
+ self.early_stopping.best_epoch = checkpoint["best_epoch"]
269
+ if self.scaler is not None:
270
+ self.scaler.load_state_dict(checkpoint["scaler_state_dict"])
271
+
272
+ self.logger.info(f"Checkpoint epoch: {int(checkpoint['epoch'])}")
273
+ self.start_epoch = int(checkpoint["epoch"])
274
+ self.logger.info(f"Next epoch is: {self.start_epoch + 1}")
base_ml/base_utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import torch
3
+ import torch.nn.functional as F
4
+
5
+ __all__ = ["filter2D", "gaussian", "gaussian_kernel2d", "sobel_hv"]
6
+
7
+
8
+ def filter2D(input_tensor: torch.Tensor, kernel: torch.Tensor) -> torch.Tensor:
9
+ """Convolves a given kernel on input tensor without losing dimensional shape.
10
+
11
+ Parameters
12
+ ----------
13
+ input_tensor : torch.Tensor
14
+ Input image/tensor.
15
+ kernel : torch.Tensor
16
+ Convolution kernel/window.
17
+
18
+ Returns
19
+ -------
20
+ torch.Tensor:
21
+ The convolved tensor of same shape as the input.
22
+ """
23
+ (_, channel, _, _) = input_tensor.size()
24
+
25
+ # "SAME" padding to avoid losing height and width
26
+ pad = [
27
+ kernel.size(2) // 2,
28
+ kernel.size(2) // 2,
29
+ kernel.size(3) // 2,
30
+ kernel.size(3) // 2,
31
+ ]
32
+ pad_tensor = F.pad(input_tensor, pad, "replicate")
33
+
34
+ out = F.conv2d(pad_tensor, kernel, groups=channel)
35
+ return out
36
+
37
+
38
+ def gaussian(
39
+ window_size: int, sigma: float, device: torch.device = None
40
+ ) -> torch.Tensor:
41
+ """Create a gaussian 1D tensor.
42
+
43
+ Parameters
44
+ ----------
45
+ window_size : int
46
+ Number of elements for the output tensor.
47
+ sigma : float
48
+ Std of the gaussian distribution.
49
+ device : torch.device
50
+ Device for the tensor.
51
+
52
+ Returns
53
+ -------
54
+ torch.Tensor:
55
+ A gaussian 1D tensor. Shape: (window_size, ).
56
+ """
57
+ x = torch.arange(window_size, device=device).float() - window_size // 2
58
+ if window_size % 2 == 0:
59
+ x = x + 0.5
60
+
61
+ gauss = torch.exp((-x.pow(2.0) / float(2 * sigma**2)))
62
+
63
+ return gauss / gauss.sum()
64
+
65
+
66
+ def gaussian_kernel2d(
67
+ window_size: int, sigma: float, n_channels: int = 1, device: torch.device = None
68
+ ) -> torch.Tensor:
69
+ """Create 2D window_size**2 sized kernel a gaussial kernel.
70
+
71
+ Parameters
72
+ ----------
73
+ window_size : int
74
+ Number of rows and columns for the output tensor.
75
+ sigma : float
76
+ Std of the gaussian distribution.
77
+ n_channel : int
78
+ Number of channels in the image that will be convolved with
79
+ this kernel.
80
+ device : torch.device
81
+ Device for the kernel.
82
+
83
+ Returns:
84
+ -----------
85
+ torch.Tensor:
86
+ A tensor of shape (1, 1, window_size, window_size)
87
+ """
88
+ kernel_x = gaussian(window_size, sigma, device=device)
89
+ kernel_y = gaussian(window_size, sigma, device=device)
90
+
91
+ kernel_2d = torch.matmul(kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t())
92
+ kernel_2d = kernel_2d.expand(n_channels, 1, window_size, window_size)
93
+
94
+ return kernel_2d
95
+
96
+
97
+ def sobel_hv(window_size: int = 5, device: torch.device = None):
98
+ """Create a kernel that is used to compute 1st order derivatives.
99
+
100
+ Parameters
101
+ ----------
102
+ window_size : int
103
+ Size of the convolution kernel.
104
+ device : torch.device:
105
+ Device for the kernel.
106
+
107
+ Returns
108
+ -------
109
+ torch.Tensor:
110
+ the computed 1st order derivatives of the input tensor.
111
+ Shape (B, 2, H, W)
112
+
113
+ Raises
114
+ ------
115
+ ValueError:
116
+ If `window_size` is not an odd number.
117
+ """
118
+ if not window_size % 2 == 1:
119
+ raise ValueError(f"window_size must be odd. Got: {window_size}")
120
+
121
+ # Generate the sobel kernels
122
+ range_h = torch.arange(
123
+ -window_size // 2 + 1, window_size // 2 + 1, dtype=torch.float32, device=device
124
+ )
125
+ range_v = torch.arange(
126
+ -window_size // 2 + 1, window_size // 2 + 1, dtype=torch.float32, device=device
127
+ )
128
+ h, v = torch.meshgrid(range_h, range_v)
129
+
130
+ kernel_h = h / (h * h + v * v + 1e-6)
131
+ kernel_h = kernel_h.unsqueeze(0).unsqueeze(0)
132
+
133
+ kernel_v = v / (h * h + v * v + 1e-6)
134
+ kernel_v = kernel_v.unsqueeze(0).unsqueeze(0)
135
+
136
+ return torch.cat([kernel_h, kernel_v], dim=0)
base_ml/base_validator.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Validators
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ from schema import Schema, Or
9
+
10
+ sweep_schema = Schema(
11
+ {
12
+ "method": Or("grid", "random", "bayes"),
13
+ "name": str,
14
+ "metric": {"name": str, "goal": Or("maximize", "minimize")},
15
+ "run_cap": int,
16
+ },
17
+ ignore_extra_keys=True,
18
+ )
base_ml/optim_factory.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # UniRepLKNet: A Universal Perception Large-Kernel ConvNet for Audio, Video, Point Cloud, Time-Series and Image Recognition
2
+ # Github source: https://github.com/AILab-CVC/UniRepLKNet
3
+ # Licensed under The Apache License 2.0 License [see LICENSE for details]
4
+ # Based on RepLKNet, ConvNeXt, timm, DINO and DeiT code bases
5
+ # https://github.com/DingXiaoH/RepLKNet-pytorch
6
+ # https://github.com/facebookresearch/ConvNeXt
7
+ # https://github.com/rwightman/pytorch-image-models/tree/master/timm
8
+ # https://github.com/facebookresearch/deit/
9
+ # https://github.com/facebookresearch/dino
10
+ # --------------------------------------------------------'
11
+ import torch
12
+ from torch import optim as optim
13
+
14
+ from timm.optim.adafactor import Adafactor
15
+ from timm.optim.adahessian import Adahessian
16
+ from timm.optim.adamp import AdamP
17
+ from timm.optim.lookahead import Lookahead
18
+ from timm.optim.nadam import Nadam
19
+ from timm.optim.radam import RAdam
20
+ from timm.optim.rmsprop_tf import RMSpropTF
21
+ from timm.optim.sgdp import SGDP
22
+
23
+ import json
24
+
25
+ try:
26
+ from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
27
+ has_apex = True
28
+ except ImportError:
29
+ has_apex = False
30
+
31
+
32
+ def get_num_layer_for_convnext(var_name):
33
+ """
34
+ Divide [3, 3, 27, 3] layers into 12 groups; each group is three
35
+ consecutive blocks, including possible neighboring downsample layers;
36
+ adapted from https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py
37
+ """
38
+ num_max_layer = 12
39
+ if var_name.startswith("downsample_layers"):
40
+ stage_id = int(var_name.split('.')[1])
41
+ if stage_id == 0:
42
+ layer_id = 0
43
+ elif stage_id == 1 or stage_id == 2:
44
+ layer_id = stage_id + 1
45
+ elif stage_id == 3:
46
+ layer_id = 12
47
+ return layer_id
48
+
49
+ elif var_name.startswith("stages"):
50
+ stage_id = int(var_name.split('.')[1])
51
+ block_id = int(var_name.split('.')[2])
52
+ if stage_id == 0 or stage_id == 1:
53
+ layer_id = stage_id + 1
54
+ elif stage_id == 2:
55
+ layer_id = 3 + block_id // 3
56
+ elif stage_id == 3:
57
+ layer_id = 12
58
+ return layer_id
59
+ else:
60
+ return num_max_layer + 1
61
+
62
+ class LayerDecayValueAssigner(object):
63
+ def __init__(self, values):
64
+ self.values = values
65
+
66
+ def get_scale(self, layer_id):
67
+ return self.values[layer_id]
68
+
69
+ def get_layer_id(self, var_name):
70
+ return get_num_layer_for_convnext(var_name)
71
+
72
+
73
+ def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
74
+ parameter_group_names = {}
75
+ parameter_group_vars = {}
76
+
77
+ for name, param in model.named_parameters():
78
+ if not param.requires_grad:
79
+ continue # frozen weights
80
+ if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
81
+ group_name = "no_decay"
82
+ this_weight_decay = 0.
83
+ else:
84
+ group_name = "decay"
85
+ this_weight_decay = weight_decay
86
+ if get_num_layer is not None:
87
+ layer_id = get_num_layer(name)
88
+ group_name = "layer_%d_%s" % (layer_id, group_name)
89
+ else:
90
+ layer_id = None
91
+
92
+ if group_name not in parameter_group_names:
93
+ if get_layer_scale is not None:
94
+ scale = get_layer_scale(layer_id)
95
+ else:
96
+ scale = 1.
97
+
98
+ parameter_group_names[group_name] = {
99
+ "weight_decay": this_weight_decay,
100
+ "params": [],
101
+ "lr_scale": scale
102
+ }
103
+ parameter_group_vars[group_name] = {
104
+ "weight_decay": this_weight_decay,
105
+ "params": [],
106
+ "lr_scale": scale
107
+ }
108
+
109
+ parameter_group_vars[group_name]["params"].append(param)
110
+ parameter_group_names[group_name]["params"].append(name)
111
+ print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
112
+ return list(parameter_group_vars.values())
113
+
114
+
115
+ def create_optimizer(model, weight_decay, lr, opt, get_num_layer=None, opt_eps=None, opt_betas=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None, momentum = 0.9):
116
+ opt_lower = opt.lower()
117
+ weight_decay = weight_decay
118
+ # if weight_decay and filter_bias_and_bn:
119
+ if filter_bias_and_bn:
120
+ skip = {}
121
+ if skip_list is not None:
122
+ skip = skip_list
123
+ elif hasattr(model, 'no_weight_decay'):
124
+ skip = model.no_weight_decay()
125
+ parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
126
+ weight_decay = 0.
127
+ else:
128
+ parameters = model.parameters()
129
+
130
+ if 'fused' in opt_lower:
131
+ assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
132
+
133
+ opt_args = dict(lr=lr, weight_decay=weight_decay)
134
+ if opt_eps is not None:
135
+ opt_args['eps'] = opt_eps
136
+ if opt_betas is not None:
137
+ opt_args['betas'] = opt_betas
138
+
139
+ opt_split = opt_lower.split('_')
140
+ opt_lower = opt_split[-1]
141
+ if opt_lower == 'sgd' or opt_lower == 'nesterov':
142
+ opt_args.pop('eps', None)
143
+ optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args)
144
+ elif opt_lower == 'momentum':
145
+ opt_args.pop('eps', None)
146
+ optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args)
147
+ elif opt_lower == 'adam':
148
+ optimizer = optim.Adam(parameters, **opt_args)
149
+ if opt_lower == 'adamw':
150
+ optimizer = optim.AdamW(parameters, **opt_args)
151
+ elif opt_lower == 'nadam':
152
+ optimizer = Nadam(parameters, **opt_args)
153
+ elif opt_lower == 'radam':
154
+ optimizer = RAdam(parameters, **opt_args)
155
+ elif opt_lower == 'adamp':
156
+ optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
157
+ elif opt_lower == 'sgdp':
158
+ optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args)
159
+ elif opt_lower == 'adadelta':
160
+ optimizer = optim.Adadelta(parameters, **opt_args)
161
+
162
+ elif opt_lower == 'adahessian':
163
+ optimizer = Adahessian(parameters, **opt_args)
164
+ elif opt_lower == 'rmsprop':
165
+ optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args)
166
+ elif opt_lower == 'rmsproptf':
167
+ optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args)
168
+ elif opt_lower == 'fusedsgd':
169
+ opt_args.pop('eps', None)
170
+ optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args)
171
+ elif opt_lower == 'fusedmomentum':
172
+ opt_args.pop('eps', None)
173
+ optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args)
174
+ elif opt_lower == 'fusedadam':
175
+ optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
176
+ elif opt_lower == 'fusedadamw':
177
+ optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
178
+ elif opt_lower == 'fusedlamb':
179
+ optimizer = FusedLAMB(parameters, **opt_args)
180
+ elif opt_lower == 'fusednovograd':
181
+ opt_args.setdefault('betas', (0.95, 0.98))
182
+ optimizer = FusedNovoGrad(parameters, **opt_args)
183
+ else:
184
+ assert False and "Invalid optimizer"
185
+
186
+ if len(opt_split) > 1:
187
+ if opt_split[0] == 'lookahead':
188
+ optimizer = Lookahead(optimizer)
189
+
190
+ return optimizer
base_ml/unireplknet_layer_decay_optimizer_constructor.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # UniRepLKNet
3
+ # https://github.com/AILab-CVC/UniRepLKNet
4
+ # Licensed under The Apache 2.0 License [see LICENSE for details]
5
+ # --------------------------------------------------------
6
+ import json
7
+ from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
8
+ from mmcv.runner import get_dist_info
9
+ from mmdet.utils import get_root_logger
10
+
11
+ def get_layer_id(var_name, max_layer_id,):
12
+ """Get the layer id to set the different learning rates in ``layer_wise``
13
+ decay_type.
14
+
15
+ Args:
16
+ var_name (str): The key of the model.
17
+ max_layer_id (int): Maximum layer id.
18
+
19
+ Returns:
20
+ int: The id number corresponding to different learning rate in
21
+ ``LearningRateDecayOptimizerConstructor``.
22
+ """
23
+
24
+ if var_name in ('backbone.cls_token', 'backbone.mask_token',
25
+ 'backbone.pos_embed'):
26
+ return 0
27
+
28
+ elif var_name.startswith('backbone.downsample_layers'):
29
+ stage_id = int(var_name.split('.')[2])
30
+ if stage_id == 0:
31
+ layer_id = 0
32
+ elif stage_id == 1:
33
+ layer_id = 2
34
+ elif stage_id == 2:
35
+ layer_id = 3
36
+ elif stage_id == 3:
37
+ layer_id = max_layer_id
38
+ return layer_id
39
+
40
+ elif var_name.startswith('backbone.stages'):
41
+ stage_id = int(var_name.split('.')[2])
42
+ block_id = int(var_name.split('.')[3])
43
+ if stage_id == 0:
44
+ layer_id = 1
45
+ elif stage_id == 1:
46
+ layer_id = 2
47
+ elif stage_id == 2:
48
+ layer_id = 3 + block_id // 3
49
+ elif stage_id == 3:
50
+ layer_id = max_layer_id
51
+ return layer_id
52
+
53
+ else:
54
+ return max_layer_id + 1
55
+
56
+
57
+
58
+ def get_stage_id(var_name, max_stage_id):
59
+ """Get the stage id to set the different learning rates in ``stage_wise``
60
+ decay_type.
61
+
62
+ Args:
63
+ var_name (str): The key of the model.
64
+ max_stage_id (int): Maximum stage id.
65
+
66
+ Returns:
67
+ int: The id number corresponding to different learning rate in
68
+ ``LearningRateDecayOptimizerConstructor``.
69
+ """
70
+
71
+ if var_name in ('backbone.cls_token', 'backbone.mask_token',
72
+ 'backbone.pos_embed'):
73
+ return 0
74
+ elif var_name.startswith('backbone.downsample_layers'):
75
+ return 0
76
+ elif var_name.startswith('backbone.stages'):
77
+ stage_id = int(var_name.split('.')[2])
78
+ return stage_id + 1
79
+ else:
80
+ return max_stage_id - 1
81
+
82
+
83
+ @OPTIMIZER_BUILDERS.register_module()
84
+ class UniRepLKNetLearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
85
+ # Different learning rates are set for different layers of backbone.
86
+ # The design is inspired by and adapted from ConvNeXt.
87
+
88
+ def add_params(self, params, module, **kwargs):
89
+ """Add all parameters of module to the params list.
90
+
91
+ The parameters of the given module will be added to the list of param
92
+ groups, with specific rules defined by paramwise_cfg.
93
+
94
+ Args:
95
+ params (list[dict]): A list of param groups, it will be modified
96
+ in place.
97
+ module (nn.Module): The module to be added.
98
+ """
99
+ logger = get_root_logger()
100
+
101
+ parameter_groups = {}
102
+ logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')
103
+ num_layers = self.paramwise_cfg.get('num_layers') + 2
104
+ decay_rate = self.paramwise_cfg.get('decay_rate')
105
+ decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')
106
+ dw_scale = self.paramwise_cfg.get('dw_scale', 1)
107
+ logger.info('Build UniRepLKNetLearningRateDecayOptimizerConstructor '
108
+ f'{decay_type} {decay_rate} - {num_layers}')
109
+ weight_decay = self.base_wd
110
+ for name, param in module.named_parameters():
111
+ if not param.requires_grad:
112
+ continue # frozen weights
113
+ if len(param.shape) == 1 or name.endswith('.bias') or name in (
114
+ 'pos_embed', 'cls_token'):
115
+ group_name = 'no_decay'
116
+ this_weight_decay = 0.
117
+ else:
118
+ group_name = 'decay'
119
+ this_weight_decay = weight_decay
120
+ if 'layer_wise' in decay_type:
121
+ layer_id = get_layer_id(name, self.paramwise_cfg.get('num_layers'))
122
+ logger.info(f'set param {name} as id {layer_id}')
123
+ elif decay_type == 'stage_wise':
124
+ layer_id = get_stage_id(name, num_layers)
125
+ logger.info(f'set param {name} as id {layer_id}')
126
+
127
+ if dw_scale == 1 or 'dwconv' not in name:
128
+ group_name = f'layer_{layer_id}_{group_name}'
129
+ if group_name not in parameter_groups:
130
+ scale = decay_rate ** (num_layers - layer_id - 1)
131
+ parameter_groups[group_name] = {
132
+ 'weight_decay': this_weight_decay,
133
+ 'params': [],
134
+ 'param_names': [],
135
+ 'lr_scale': scale,
136
+ 'group_name': group_name,
137
+ 'lr': scale * self.base_lr,
138
+ }
139
+
140
+ parameter_groups[group_name]['params'].append(param)
141
+ parameter_groups[group_name]['param_names'].append(name)
142
+ else:
143
+ group_name = f'layer_{layer_id}_{group_name}_dwconv'
144
+ if group_name not in parameter_groups:
145
+ scale = decay_rate ** (num_layers - layer_id - 1) * dw_scale
146
+ parameter_groups[group_name] = {
147
+ 'weight_decay': this_weight_decay,
148
+ 'params': [],
149
+ 'param_names': [],
150
+ 'lr_scale': scale,
151
+ 'group_name': group_name,
152
+ 'lr': scale * self.base_lr,
153
+ }
154
+
155
+ parameter_groups[group_name]['params'].append(param)
156
+ parameter_groups[group_name]['param_names'].append(name)
157
+
158
+ rank, _ = get_dist_info()
159
+ if rank == 0:
160
+ to_display = {}
161
+ for key in parameter_groups:
162
+ to_display[key] = {
163
+ 'param_names': parameter_groups[key]['param_names'],
164
+ 'lr_scale': parameter_groups[key]['lr_scale'],
165
+ 'lr': parameter_groups[key]['lr'],
166
+ 'weight_decay': parameter_groups[key]['weight_decay'],
167
+ }
168
+ logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')
169
+ params.extend(parameter_groups.values())
cell_segmentation/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Cell Segmentation and detection using our cellvit model
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
cell_segmentation/datasets/base_cell.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Base cell segmentation dataset, based on torch Dataset implementation
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import logging
9
+ from typing import Callable
10
+
11
+ import torch
12
+ from torch.utils.data import Dataset
13
+
14
+ logger = logging.getLogger()
15
+ logger.addHandler(logging.NullHandler())
16
+
17
+ from abc import abstractmethod
18
+
19
+
20
+ class CellDataset(Dataset):
21
+ def set_transforms(self, transforms: Callable) -> None:
22
+ self.transforms = transforms
23
+
24
+ @abstractmethod
25
+ def load_cell_count(self):
26
+ """Load Cell count from cell_count.csv file. File must be located inside the fold folder
27
+
28
+ Example file beginning:
29
+ Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial
30
+ 0_0.png,4,2,2,0,0
31
+ 0_1.png,8,1,1,0,0
32
+ 0_10.png,17,0,1,0,0
33
+ 0_100.png,10,0,11,0,0
34
+ ...
35
+ """
36
+ pass
37
+
38
+ @abstractmethod
39
+ def get_sampling_weights_tissue(self, gamma: float = 1) -> torch.Tensor:
40
+ """Get sampling weights calculated by tissue type statistics
41
+
42
+ For this, a file named "weight_config.yaml" with the content:
43
+ tissue:
44
+ tissue_1: xxx
45
+ tissue_2: xxx (name of tissue: count)
46
+ ...
47
+ Must exists in the dataset main folder (parent path, not inside the folds)
48
+
49
+ Args:
50
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
51
+ 1 means total balancing, 0 means original weights. Defaults to 1.
52
+
53
+ Returns:
54
+ torch.Tensor: Weights for each sample
55
+ """
56
+
57
+ @abstractmethod
58
+ def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor:
59
+ """Get sampling weights calculated by cell type statistics
60
+
61
+ Args:
62
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
63
+ 1 means total balancing, 0 means original weights. Defaults to 1.
64
+
65
+ Returns:
66
+ torch.Tensor: Weights for each sample
67
+ """
68
+
69
+ def get_sampling_weights_cell_tissue(self, gamma: float = 1) -> torch.Tensor:
70
+ """Get combined sampling weights by calculating tissue and cell sampling weights,
71
+ normalizing them and adding them up to yield one score.
72
+
73
+ Args:
74
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
75
+ 1 means total balancing, 0 means original weights. Defaults to 1.
76
+
77
+ Returns:
78
+ torch.Tensor: Weights for each sample
79
+ """
80
+ assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
81
+ tw = self.get_sampling_weights_tissue(gamma)
82
+ cw = self.get_sampling_weights_cell(gamma)
83
+ weights = tw / torch.max(tw) + cw / torch.max(cw)
84
+
85
+ return weights
cell_segmentation/datasets/cell_graph_datamodel.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Graph Data model
3
+ #
4
+ # For more information, please check out docs/readmes/graphs.md
5
+ #
6
+ # @ Fabian Hörst, [email protected]
7
+ # Institute for Artifical Intelligence in Medicine,
8
+ # University Medicine Essen
9
+
10
+ from dataclasses import dataclass
11
+ from typing import List
12
+
13
+ import torch
14
+
15
+ from datamodel.graph_datamodel import GraphDataWSI
16
+
17
+
18
+ @dataclass
19
+ class CellGraphDataWSI(GraphDataWSI):
20
+ """Dataclass for Graph Data
21
+
22
+ Args:
23
+ contours (List[torch.Tensor]): Contour Data for each object.
24
+ """
25
+
26
+ contours: List[torch.Tensor]
cell_segmentation/datasets/conic.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # PanNuke Dataset
3
+ #
4
+ # Dataset information: https://arxiv.org/abs/2108.11195
5
+ # Please Prepare Dataset as described here: docs/readmes/pannuke.md # TODO: write own documentation
6
+ #
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+
12
+ import logging
13
+ from pathlib import Path
14
+ from typing import Callable, Tuple, Union, List
15
+
16
+ import numpy as np
17
+ import pandas as pd
18
+ import torch
19
+ from PIL import Image
20
+
21
+ from cell_segmentation.datasets.base_cell import CellDataset
22
+ from cell_segmentation.datasets.pannuke import PanNukeDataset
23
+
24
+ logger = logging.getLogger()
25
+ logger.addHandler(logging.NullHandler())
26
+
27
+
28
+ class CoNicDataset(CellDataset):
29
+ """Lizzard dataset
30
+
31
+ This dataset is always cached
32
+
33
+ Args:
34
+ dataset_path (Union[Path, str]): Path to Lizzard dataset. Structure is described under ./docs/readmes/cell_segmentation.md
35
+ folds (Union[int, list[int]]): Folds to use for this dataset
36
+ transforms (Callable, optional): PyTorch transformations. Defaults to None.
37
+ stardist (bool, optional): Return StarDist labels. Defaults to False
38
+ regression (bool, optional): Return Regression of cells in x and y direction. Defaults to False
39
+ **kwargs are irgnored
40
+ """
41
+
42
+ def __init__(
43
+ self,
44
+ dataset_path: Union[Path, str],
45
+ folds: Union[int, List[int]],
46
+ transforms: Callable = None,
47
+ stardist: bool = False,
48
+ regression: bool = False,
49
+ **kwargs,
50
+ ) -> None:
51
+ if isinstance(folds, int):
52
+ folds = [folds]
53
+
54
+ self.dataset = Path(dataset_path).resolve()
55
+ self.transforms = transforms
56
+ self.images = []
57
+ self.masks = []
58
+ self.img_names = []
59
+ self.folds = folds
60
+ self.stardist = stardist
61
+ self.regression = regression
62
+ for fold in folds:
63
+ image_path = self.dataset / f"fold{fold}" / "images"
64
+ fold_images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()]
65
+
66
+ # sanity_check: mask must exist for image
67
+ for fold_image in fold_images:
68
+ mask_path = (
69
+ self.dataset / f"fold{fold}" / "labels" / f"{fold_image.stem}.npy"
70
+ )
71
+ if mask_path.is_file():
72
+ self.images.append(fold_image)
73
+ self.masks.append(mask_path)
74
+ self.img_names.append(fold_image.name)
75
+
76
+ else:
77
+ logger.debug(
78
+ "Found image {fold_image}, but no corresponding annotation file!"
79
+ )
80
+
81
+ # load everything in advance to speedup, as the dataset is rather small
82
+ self.loaded_imgs = []
83
+ self.loaded_masks = []
84
+ for idx in range(len(self.images)):
85
+ img_path = self.images[idx]
86
+ img = np.array(Image.open(img_path)).astype(np.uint8)
87
+
88
+ mask_path = self.masks[idx]
89
+ mask = np.load(mask_path, allow_pickle=True)
90
+ inst_map = mask[()]["inst_map"].astype(np.int32)
91
+ type_map = mask[()]["type_map"].astype(np.int32)
92
+ mask = np.stack([inst_map, type_map], axis=-1)
93
+ self.loaded_imgs.append(img)
94
+ self.loaded_masks.append(mask)
95
+
96
+ logger.info(f"Created Pannuke Dataset by using fold(s) {self.folds}")
97
+ logger.info(f"Resulting dataset length: {self.__len__()}")
98
+
99
+ def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str, str]:
100
+ """Get one dataset item consisting of transformed image,
101
+ masks (instance_map, nuclei_type_map, nuclei_binary_map, hv_map) and tissue type as string
102
+
103
+ Args:
104
+ index (int): Index of element to retrieve
105
+
106
+ Returns:
107
+ Tuple[torch.Tensor, dict, str, str]:
108
+ torch.Tensor: Image, with shape (3, H, W), shape is arbitrary for Lizzard (H and W approx. between 500 and 2000)
109
+ dict:
110
+ "instance_map": Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (256, 256)
111
+ "nuclei_type_map": Nuclei-Type-Map, for each nucleus (instance) the class is indicated by an integer. Shape (256, 256)
112
+ "nuclei_binary_map": Binary Nuclei-Mask, Shape (256, 256)
113
+ "hv_map": Horizontal and vertical instance map.
114
+ Shape: (H, W, 2). First dimension is horizontal (horizontal gradient (-1 to 1)),
115
+ last is vertical (vertical gradient (-1 to 1)) Shape (256, 256, 2)
116
+ "dist_map": Probability distance map. Shape (256, 256)
117
+ "stardist_map": Stardist vector map. Shape (n_rays, 256, 256)
118
+ [Optional if regression]
119
+ "regression_map": Regression map. Shape (2, 256, 256). First is vertical, second horizontal.
120
+ str: Tissue type
121
+ str: Image Name
122
+ """
123
+ img_path = self.images[index]
124
+ img = self.loaded_imgs[index]
125
+ mask = self.loaded_masks[index]
126
+
127
+ if self.transforms is not None:
128
+ transformed = self.transforms(image=img, mask=mask)
129
+ img = transformed["image"]
130
+ mask = transformed["mask"]
131
+
132
+ inst_map = mask[:, :, 0].copy()
133
+ type_map = mask[:, :, 1].copy()
134
+ np_map = mask[:, :, 0].copy()
135
+ np_map[np_map > 0] = 1
136
+ hv_map = PanNukeDataset.gen_instance_hv_map(inst_map)
137
+
138
+ # torch convert
139
+ img = torch.Tensor(img).type(torch.float32)
140
+ img = img.permute(2, 0, 1)
141
+ if torch.max(img) >= 5:
142
+ img = img / 255
143
+
144
+ masks = {
145
+ "instance_map": torch.Tensor(inst_map).type(torch.int64),
146
+ "nuclei_type_map": torch.Tensor(type_map).type(torch.int64),
147
+ "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64),
148
+ "hv_map": torch.Tensor(hv_map).type(torch.float32),
149
+ }
150
+ if self.stardist:
151
+ dist_map = PanNukeDataset.gen_distance_prob_maps(inst_map)
152
+ stardist_map = PanNukeDataset.gen_stardist_maps(inst_map)
153
+ masks["dist_map"] = torch.Tensor(dist_map).type(torch.float32)
154
+ masks["stardist_map"] = torch.Tensor(stardist_map).type(torch.float32)
155
+ if self.regression:
156
+ masks["regression_map"] = PanNukeDataset.gen_regression_map(inst_map)
157
+
158
+ return img, masks, "Colon", Path(img_path).name
159
+
160
+ def __len__(self) -> int:
161
+ """Length of Dataset
162
+
163
+ Returns:
164
+ int: Length of Dataset
165
+ """
166
+ return len(self.images)
167
+
168
+ def set_transforms(self, transforms: Callable) -> None:
169
+ """Set the transformations, can be used tp exchange transformations
170
+
171
+ Args:
172
+ transforms (Callable): PyTorch transformations
173
+ """
174
+ self.transforms = transforms
175
+
176
+ def load_cell_count(self):
177
+ """Load Cell count from cell_count.csv file. File must be located inside the fold folder
178
+ and named "cell_count.csv"
179
+
180
+ Example file beginning:
181
+ Image,Neutrophil,Epithelial,Lymphocyte,Plasma,Eosinophil,Connective
182
+ consep_1_0000.png,0,117,0,0,0,0
183
+ consep_1_0001.png,0,95,1,0,0,8
184
+ consep_1_0002.png,0,172,3,0,0,2
185
+ ...
186
+ """
187
+ df_placeholder = []
188
+ for fold in self.folds:
189
+ csv_path = self.dataset / f"fold{fold}" / "cell_count.csv"
190
+ cell_count = pd.read_csv(csv_path, index_col=0)
191
+ df_placeholder.append(cell_count)
192
+ self.cell_count = pd.concat(df_placeholder)
193
+ self.cell_count = self.cell_count.reindex(self.img_names)
194
+
195
+ def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor:
196
+ """Get sampling weights calculated by cell type statistics
197
+
198
+ Args:
199
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
200
+ 1 means total balancing, 0 means original weights. Defaults to 1.
201
+
202
+ Returns:
203
+ torch.Tensor: Weights for each sample
204
+ """
205
+ assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
206
+ assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!"
207
+ binary_weight_factors = np.array([1069, 4189, 4356, 3103, 1025, 4527])
208
+ k = np.sum(binary_weight_factors)
209
+ cell_counts_imgs = np.clip(self.cell_count.to_numpy(), 0, 1)
210
+ weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k)
211
+ img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum(
212
+ cell_counts_imgs * weight_vector, axis=-1
213
+ )
214
+ img_weight[np.where(img_weight == 0)] = np.min(
215
+ img_weight[np.nonzero(img_weight)]
216
+ )
217
+
218
+ return torch.Tensor(img_weight)
219
+
220
+ # def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor:
221
+ # """Get sampling weights calculated by cell type statistics
222
+
223
+ # Args:
224
+ # gamma (float, optional): Gamma scaling factor, between 0 and 1.
225
+ # 1 means total balancing, 0 means original weights. Defaults to 1.
226
+
227
+ # Returns:
228
+ # torch.Tensor: Weights for each sample
229
+ # """
230
+ # assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
231
+ # assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!"
232
+ # binary_weight_factors = np.array([4012, 222017, 93612, 24793, 2999, 98783])
233
+ # k = np.sum(binary_weight_factors)
234
+ # cell_counts_imgs = self.cell_count.to_numpy()
235
+ # weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k)
236
+ # img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum(
237
+ # cell_counts_imgs * weight_vector, axis=-1
238
+ # )
239
+ # img_weight[np.where(img_weight == 0)] = np.min(
240
+ # img_weight[np.nonzero(img_weight)]
241
+ # )
242
+
243
+ # return torch.Tensor(img_weight)
cell_segmentation/datasets/consep.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # MoNuSeg Dataset
3
+ #
4
+ # Dataset information: https://monuseg.grand-challenge.org/Home/
5
+ # Please Prepare Dataset as described here: docs/readmes/monuseg.md
6
+ #
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+ import logging
12
+ from pathlib import Path
13
+ from typing import Callable, Union, Tuple
14
+
15
+ import numpy as np
16
+ import torch
17
+ from PIL import Image
18
+ from torch.utils.data import Dataset
19
+
20
+ from cell_segmentation.datasets.pannuke import PanNukeDataset
21
+
22
+ logger = logging.getLogger()
23
+ logger.addHandler(logging.NullHandler())
24
+
25
+
26
+ class CoNSePDataset(Dataset):
27
+ def __init__(
28
+ self,
29
+ dataset_path: Union[Path, str],
30
+ transforms: Callable = None,
31
+ ) -> None:
32
+ """MoNuSeg Dataset
33
+
34
+ Args:
35
+ dataset_path (Union[Path, str]): Path to dataset
36
+ transforms (Callable, optional): Transformations to apply on images. Defaults to None.
37
+ Raises:
38
+ FileNotFoundError: If no ground-truth annotation file was found in path
39
+ """
40
+ self.dataset = Path(dataset_path).resolve()
41
+ self.transforms = transforms
42
+ self.masks = []
43
+ self.img_names = []
44
+
45
+ image_path = self.dataset / "images"
46
+ label_path = self.dataset / "labels"
47
+ self.images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()]
48
+ self.masks = [f for f in sorted(label_path.glob("*.npy")) if f.is_file()]
49
+
50
+ # sanity_check
51
+ for idx, image in enumerate(self.images):
52
+ image_name = image.stem
53
+ mask_name = self.masks[idx].stem
54
+ if image_name != mask_name:
55
+ raise FileNotFoundError(f"Annotation for file {image_name} is missing")
56
+
57
+ def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str]:
58
+ """Get one item from dataset
59
+
60
+ Args:
61
+ index (int): Item to get
62
+
63
+ Returns:
64
+ Tuple[torch.Tensor, dict, str]: Trainings-Batch
65
+ * torch.Tensor: Image
66
+ * dict: Ground-Truth values: keys are "instance map", "nuclei_binary_map" and "hv_map"
67
+ * str: filename
68
+ """
69
+ img_path = self.images[index]
70
+ img = np.array(Image.open(img_path)).astype(np.uint8)
71
+
72
+ mask_path = self.masks[index]
73
+ mask = np.load(mask_path, allow_pickle=True)
74
+ inst_map = mask[()]["inst_map"].astype(np.int32)
75
+ type_map = mask[()]["type_map"].astype(np.int32)
76
+ mask = np.stack([inst_map, type_map], axis=-1)
77
+
78
+ if self.transforms is not None:
79
+ transformed = self.transforms(image=img, mask=mask)
80
+ img = transformed["image"]
81
+ mask = transformed["mask"]
82
+
83
+ inst_map = mask[:, :, 0].copy()
84
+ type_map = mask[:, :, 1].copy()
85
+ np_map = mask[:, :, 0].copy()
86
+ np_map[np_map > 0] = 1
87
+ hv_map = PanNukeDataset.gen_instance_hv_map(inst_map)
88
+
89
+ # torch convert
90
+ img = torch.Tensor(img).type(torch.float32)
91
+ img = img.permute(2, 0, 1)
92
+ if torch.max(img) >= 5:
93
+ img = img / 255
94
+
95
+ masks = {
96
+ "instance_map": torch.Tensor(inst_map).type(torch.int64),
97
+ "nuclei_type_map": torch.Tensor(type_map).type(torch.int64),
98
+ "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64),
99
+ "hv_map": torch.Tensor(hv_map).type(torch.float32),
100
+ }
101
+
102
+ return img, masks, Path(img_path).name
103
+
104
+ def __len__(self) -> int:
105
+ """Length of Dataset
106
+
107
+ Returns:
108
+ int: Length of Dataset
109
+ """
110
+ return len(self.images)
111
+
112
+ def set_transforms(self, transforms: Callable) -> None:
113
+ """Set the transformations, can be used tp exchange transformations
114
+
115
+ Args:
116
+ transforms (Callable): PyTorch transformations
117
+ """
118
+ self.transforms = transforms
cell_segmentation/datasets/dataset_coordinator.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Coordinate the datasets, used to select the right dataset with corresponding setting
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ from typing import Callable
9
+
10
+ from torch.utils.data import Dataset
11
+ from cell_segmentation.datasets.conic import CoNicDataset
12
+
13
+ from cell_segmentation.datasets.pannuke import PanNukeDataset
14
+
15
+
16
+ def select_dataset(
17
+ dataset_name: str, split: str, dataset_config: dict, transforms: Callable = None
18
+ ) -> Dataset:
19
+ """Select a cell segmentation dataset from the provided ones, currently just PanNuke is implemented here
20
+
21
+ Args:
22
+ dataset_name (str): Name of dataset to use.
23
+ Must be one of: [pannuke, lizzard]
24
+ split (str): Split to use.
25
+ Must be one of: ["train", "val", "validation", "test"]
26
+ dataset_config (dict): Dictionary with dataset configuration settings
27
+ transforms (Callable, optional): PyTorch Image and Mask transformations. Defaults to None.
28
+
29
+ Raises:
30
+ NotImplementedError: Unknown dataset
31
+
32
+ Returns:
33
+ Dataset: Cell segmentation dataset
34
+ """
35
+ assert split.lower() in [
36
+ "train",
37
+ "val",
38
+ "validation",
39
+ "test",
40
+ ], "Unknown split type!"
41
+
42
+ if dataset_name.lower() == "pannuke":
43
+ if split == "train":
44
+ folds = dataset_config["train_folds"]
45
+ if split == "val" or split == "validation":
46
+ folds = dataset_config["val_folds"]
47
+ if split == "test":
48
+ folds = dataset_config["test_folds"]
49
+ dataset = PanNukeDataset(
50
+ dataset_path=dataset_config["dataset_path"],
51
+ folds=folds,
52
+ transforms=transforms,
53
+ stardist=dataset_config.get("stardist", False),
54
+ regression=dataset_config.get("regression_loss", False),
55
+ )
56
+ elif dataset_name.lower() == "conic":
57
+ if split == "train":
58
+ folds = dataset_config["train_folds"]
59
+ if split == "val" or split == "validation":
60
+ folds = dataset_config["val_folds"]
61
+ if split == "test":
62
+ folds = dataset_config["test_folds"]
63
+ dataset = CoNicDataset(
64
+ dataset_path=dataset_config["dataset_path"],
65
+ folds=folds,
66
+ transforms=transforms,
67
+ stardist=dataset_config.get("stardist", False),
68
+ regression=dataset_config.get("regression_loss", False),
69
+ # TODO: Stardist and regression loss
70
+ )
71
+ else:
72
+ raise NotImplementedError(f"Unknown dataset: {dataset_name}")
73
+ return dataset
cell_segmentation/datasets/monuseg.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # MoNuSeg Dataset
3
+ #
4
+ # Dataset information: https://monuseg.grand-challenge.org/Home/
5
+ # Please Prepare Dataset as described here: docs/readmes/monuseg.md
6
+ #
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+ import logging
12
+ from pathlib import Path
13
+ from typing import Callable, Union, Tuple
14
+
15
+ import numpy as np
16
+ import torch
17
+ from PIL import Image
18
+ from torch.utils.data import Dataset
19
+
20
+ from cell_segmentation.datasets.pannuke import PanNukeDataset
21
+ from einops import rearrange
22
+
23
+ logger = logging.getLogger()
24
+ logger.addHandler(logging.NullHandler())
25
+
26
+
27
+ class MoNuSegDataset(Dataset):
28
+ def __init__(
29
+ self,
30
+ dataset_path: Union[Path, str],
31
+ transforms: Callable = None,
32
+ patching: bool = False,
33
+ overlap: int = 0,
34
+ ) -> None:
35
+ """MoNuSeg Dataset
36
+
37
+ Args:
38
+ dataset_path (Union[Path, str]): Path to dataset
39
+ transforms (Callable, optional): Transformations to apply on images. Defaults to None.
40
+ patching (bool, optional): If patches with size 256px should be used Otherwise, the entire MoNuSeg images are loaded. Defaults to False.
41
+ overlap: (bool, optional): If overlap should be used for patch sampling. Overlap in pixels.
42
+ Recommended value other than 0 is 64. Defaults to 0.
43
+ Raises:
44
+ FileNotFoundError: If no ground-truth annotation file was found in path
45
+ """
46
+ self.dataset = Path(dataset_path).resolve()
47
+ self.transforms = transforms
48
+ self.masks = []
49
+ self.img_names = []
50
+ self.patching = patching
51
+ self.overlap = overlap
52
+
53
+ image_path = self.dataset / "images"
54
+ label_path = self.dataset / "labels"
55
+ self.images = [f for f in sorted(image_path.glob("*.png")) if f.is_file()]
56
+ self.masks = [f for f in sorted(label_path.glob("*.npy")) if f.is_file()]
57
+
58
+ # sanity_check
59
+ for idx, image in enumerate(self.images):
60
+ image_name = image.stem
61
+ mask_name = self.masks[idx].stem
62
+ if image_name != mask_name:
63
+ raise FileNotFoundError(f"Annotation for file {image_name} is missing")
64
+
65
+ def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str]:
66
+ """Get one item from dataset
67
+
68
+ Args:
69
+ index (int): Item to get
70
+
71
+ Returns:
72
+ Tuple[torch.Tensor, dict, str]: Trainings-Batch
73
+ * torch.Tensor: Image
74
+ * dict: Ground-Truth values: keys are "instance map", "nuclei_binary_map" and "hv_map"
75
+ * str: filename
76
+ """
77
+ img_path = self.images[index]
78
+ img = np.array(Image.open(img_path)).astype(np.uint8)
79
+
80
+ mask_path = self.masks[index]
81
+ mask = np.load(mask_path, allow_pickle=True)
82
+ mask = mask.astype(np.int64)
83
+
84
+ if self.transforms is not None:
85
+ transformed = self.transforms(image=img, mask=mask)
86
+ img = transformed["image"]
87
+ mask = transformed["mask"]
88
+
89
+ hv_map = PanNukeDataset.gen_instance_hv_map(mask)
90
+ np_map = mask.copy()
91
+ np_map[np_map > 0] = 1
92
+
93
+ # torch convert
94
+ img = torch.Tensor(img).type(torch.float32)
95
+ img = img.permute(2, 0, 1)
96
+ if torch.max(img) >= 5:
97
+ img = img / 255
98
+
99
+ if self.patching and self.overlap == 0:
100
+ img = rearrange(img, "c (h i) (w j) -> c h w i j", i=256, j=256)
101
+ if self.patching and self.overlap != 0:
102
+ img = img.unfold(1, 256, 256 - self.overlap).unfold(
103
+ 2, 256, 256 - self.overlap
104
+ )
105
+
106
+ masks = {
107
+ "instance_map": torch.Tensor(mask).type(torch.int64),
108
+ "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64),
109
+ "hv_map": torch.Tensor(hv_map).type(torch.float32),
110
+ }
111
+
112
+ return img, masks, Path(img_path).name
113
+
114
+ def __len__(self) -> int:
115
+ """Length of Dataset
116
+
117
+ Returns:
118
+ int: Length of Dataset
119
+ """
120
+ return len(self.images)
121
+
122
+ def set_transforms(self, transforms: Callable) -> None:
123
+ """Set the transformations, can be used tp exchange transformations
124
+
125
+ Args:
126
+ transforms (Callable): PyTorch transformations
127
+ """
128
+ self.transforms = transforms
cell_segmentation/datasets/pannuke.py ADDED
@@ -0,0 +1,537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # PanNuke Dataset
3
+ #
4
+ # Dataset information: https://arxiv.org/abs/2003.10778
5
+ # Please Prepare Dataset as described here: docs/readmes/pannuke.md
6
+ #
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+
12
+ import logging
13
+ import sys # remove
14
+ from pathlib import Path
15
+ from typing import Callable, Tuple, Union, List
16
+
17
+ sys.path.append("/homes/fhoerst/histo-projects/CellViT/") # remove
18
+
19
+ import numpy as np
20
+ import pandas as pd
21
+ import torch
22
+ import yaml
23
+ from numba import njit
24
+ from PIL import Image
25
+ from scipy.ndimage import center_of_mass, distance_transform_edt
26
+
27
+ from cell_segmentation.datasets.base_cell import CellDataset
28
+ from cell_segmentation.utils.tools import fix_duplicates, get_bounding_box
29
+
30
+ logger = logging.getLogger()
31
+ logger.addHandler(logging.NullHandler())
32
+
33
+ from natsort import natsorted
34
+
35
+
36
+ class PanNukeDataset(CellDataset):
37
+ """PanNuke dataset
38
+
39
+ Args:
40
+ dataset_path (Union[Path, str]): Path to PanNuke dataset. Structure is described under ./docs/readmes/cell_segmentation.md
41
+ folds (Union[int, list[int]]): Folds to use for this dataset
42
+ transforms (Callable, optional): PyTorch transformations. Defaults to None.
43
+ stardist (bool, optional): Return StarDist labels. Defaults to False
44
+ regression (bool, optional): Return Regression of cells in x and y direction. Defaults to False
45
+ cache_dataset: If the dataset should be loaded to host memory in first epoch.
46
+ Be careful, workers in DataLoader needs to be persistent to have speedup.
47
+ Recommended to false, just use if you have enough RAM and your I/O operations might be limited.
48
+ Defaults to False.
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ dataset_path: Union[Path, str],
54
+ folds: Union[int, List[int]],
55
+ transforms: Callable = None,
56
+ stardist: bool = False,
57
+ regression: bool = False,
58
+ cache_dataset: bool = False,
59
+ ) -> None:
60
+ if isinstance(folds, int):
61
+ folds = [folds]
62
+
63
+ self.dataset = Path(dataset_path).resolve()
64
+ self.transforms = transforms
65
+ self.images = []
66
+ self.masks = []
67
+ self.types = {}
68
+ self.img_names = []
69
+ self.folds = folds
70
+ self.cache_dataset = cache_dataset
71
+ self.stardist = stardist
72
+ self.regression = regression
73
+ for fold in folds:
74
+ image_path = self.dataset / f"fold{fold}" / "images"
75
+ fold_images = [
76
+ f for f in natsorted(image_path.glob("*.png")) if f.is_file()
77
+ ]
78
+
79
+ # sanity_check: mask must exist for image
80
+ for fold_image in fold_images:
81
+ mask_path = (
82
+ self.dataset / f"fold{fold}" / "labels" / f"{fold_image.stem}.npy"
83
+ )
84
+ if mask_path.is_file():
85
+ self.images.append(fold_image)
86
+ self.masks.append(mask_path)
87
+ self.img_names.append(fold_image.name)
88
+
89
+ else:
90
+ logger.debug(
91
+ "Found image {fold_image}, but no corresponding annotation file!"
92
+ )
93
+ fold_types = pd.read_csv(self.dataset / f"fold{fold}" / "types.csv")
94
+ fold_type_dict = fold_types.set_index("img")["type"].to_dict()
95
+ self.types = {
96
+ **self.types,
97
+ **fold_type_dict,
98
+ } # careful - should all be named differently
99
+
100
+ logger.info(f"Created Pannuke Dataset by using fold(s) {self.folds}")
101
+ logger.info(f"Resulting dataset length: {self.__len__()}")
102
+
103
+ if self.cache_dataset:
104
+ self.cached_idx = [] # list of idx that should be cached
105
+ self.cached_imgs = {} # keys: idx, values: numpy array of imgs
106
+ self.cached_masks = {} # keys: idx, values: numpy array of masks
107
+ logger.info("Using cached dataset. Cache is built up during first epoch.")
108
+
109
+ def __getitem__(self, index: int) -> Tuple[torch.Tensor, dict, str, str]:
110
+ """Get one dataset item consisting of transformed image,
111
+ masks (instance_map, nuclei_type_map, nuclei_binary_map, hv_map) and tissue type as string
112
+
113
+ Args:
114
+ index (int): Index of element to retrieve
115
+
116
+ Returns:
117
+ Tuple[torch.Tensor, dict, str, str]:
118
+ torch.Tensor: Image, with shape (3, H, W), in this case (3, 256, 256)
119
+ dict:
120
+ "instance_map": Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (256, 256)
121
+ "nuclei_type_map": Nuclei-Type-Map, for each nucleus (instance) the class is indicated by an integer. Shape (256, 256)
122
+ "nuclei_binary_map": Binary Nuclei-Mask, Shape (256, 256)
123
+ "hv_map": Horizontal and vertical instance map.
124
+ Shape: (2 , H, W). First dimension is horizontal (horizontal gradient (-1 to 1)),
125
+ last is vertical (vertical gradient (-1 to 1)) Shape (2, 256, 256)
126
+ [Optional if stardist]
127
+ "dist_map": Probability distance map. Shape (256, 256)
128
+ "stardist_map": Stardist vector map. Shape (n_rays, 256, 256)
129
+ [Optional if regression]
130
+ "regression_map": Regression map. Shape (2, 256, 256). First is vertical, second horizontal.
131
+ str: Tissue type
132
+ str: Image Name
133
+ """
134
+ img_path = self.images[index]
135
+
136
+ if self.cache_dataset:
137
+ if index in self.cached_idx:
138
+ img = self.cached_imgs[index]
139
+ mask = self.cached_masks[index]
140
+ else:
141
+ # cache file
142
+ img = self.load_imgfile(index)
143
+ mask = self.load_maskfile(index)
144
+ self.cached_imgs[index] = img
145
+ self.cached_masks[index] = mask
146
+ self.cached_idx.append(index)
147
+
148
+ else:
149
+ img = self.load_imgfile(index)
150
+ mask = self.load_maskfile(index)
151
+
152
+ if self.transforms is not None:
153
+ transformed = self.transforms(image=img, mask=mask)
154
+ img = transformed["image"]
155
+ mask = transformed["mask"]
156
+
157
+ tissue_type = self.types[img_path.name]
158
+ inst_map = mask[:, :, 0].copy()
159
+ type_map = mask[:, :, 1].copy()
160
+ np_map = mask[:, :, 0].copy()
161
+ np_map[np_map > 0] = 1
162
+ hv_map = PanNukeDataset.gen_instance_hv_map(inst_map)
163
+
164
+ # torch convert
165
+ img = torch.Tensor(img).type(torch.float32)
166
+ img = img.permute(2, 0, 1)
167
+ if torch.max(img) >= 5:
168
+ img = img / 255
169
+
170
+ masks = {
171
+ "instance_map": torch.Tensor(inst_map).type(torch.int64),
172
+ "nuclei_type_map": torch.Tensor(type_map).type(torch.int64),
173
+ "nuclei_binary_map": torch.Tensor(np_map).type(torch.int64),
174
+ "hv_map": torch.Tensor(hv_map).type(torch.float32),
175
+ }
176
+
177
+ # load stardist transforms if neccessary
178
+ if self.stardist:
179
+ dist_map = PanNukeDataset.gen_distance_prob_maps(inst_map)
180
+ stardist_map = PanNukeDataset.gen_stardist_maps(inst_map)
181
+ masks["dist_map"] = torch.Tensor(dist_map).type(torch.float32)
182
+ masks["stardist_map"] = torch.Tensor(stardist_map).type(torch.float32)
183
+ if self.regression:
184
+ masks["regression_map"] = PanNukeDataset.gen_regression_map(inst_map)
185
+
186
+ return img, masks, tissue_type, Path(img_path).name
187
+
188
+ def __len__(self) -> int:
189
+ """Length of Dataset
190
+
191
+ Returns:
192
+ int: Length of Dataset
193
+ """
194
+ return len(self.images)
195
+
196
+ def set_transforms(self, transforms: Callable) -> None:
197
+ """Set the transformations, can be used tp exchange transformations
198
+
199
+ Args:
200
+ transforms (Callable): PyTorch transformations
201
+ """
202
+ self.transforms = transforms
203
+
204
+ def load_imgfile(self, index: int) -> np.ndarray:
205
+ """Load image from file (disk)
206
+
207
+ Args:
208
+ index (int): Index of file
209
+
210
+ Returns:
211
+ np.ndarray: Image as array with shape (H, W, 3)
212
+ """
213
+ img_path = self.images[index]
214
+ return np.array(Image.open(img_path)).astype(np.uint8)
215
+
216
+ def load_maskfile(self, index: int) -> np.ndarray:
217
+ """Load mask from file (disk)
218
+
219
+ Args:
220
+ index (int): Index of file
221
+
222
+ Returns:
223
+ np.ndarray: Mask as array with shape (H, W, 2)
224
+ """
225
+ mask_path = self.masks[index]
226
+ mask = np.load(mask_path, allow_pickle=True)
227
+ inst_map = mask[()]["inst_map"].astype(np.int32)
228
+ type_map = mask[()]["type_map"].astype(np.int32)
229
+ mask = np.stack([inst_map, type_map], axis=-1)
230
+ return mask
231
+
232
+ def load_cell_count(self):
233
+ """Load Cell count from cell_count.csv file. File must be located inside the fold folder
234
+ and named "cell_count.csv"
235
+
236
+ Example file beginning:
237
+ Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial
238
+ 0_0.png,4,2,2,0,0
239
+ 0_1.png,8,1,1,0,0
240
+ 0_10.png,17,0,1,0,0
241
+ 0_100.png,10,0,11,0,0
242
+ ...
243
+ """
244
+ df_placeholder = []
245
+ for fold in self.folds:
246
+ csv_path = self.dataset / f"fold{fold}" / "cell_count.csv"
247
+ cell_count = pd.read_csv(csv_path, index_col=0)
248
+ df_placeholder.append(cell_count)
249
+ self.cell_count = pd.concat(df_placeholder)
250
+ self.cell_count = self.cell_count.reindex(self.img_names)
251
+
252
+ def get_sampling_weights_tissue(self, gamma: float = 1) -> torch.Tensor:
253
+ """Get sampling weights calculated by tissue type statistics
254
+
255
+ For this, a file named "weight_config.yaml" with the content:
256
+ tissue:
257
+ tissue_1: xxx
258
+ tissue_2: xxx (name of tissue: count)
259
+ ...
260
+ Must exists in the dataset main folder (parent path, not inside the folds)
261
+
262
+ Args:
263
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
264
+ 1 means total balancing, 0 means original weights. Defaults to 1.
265
+
266
+ Returns:
267
+ torch.Tensor: Weights for each sample
268
+ """
269
+ assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
270
+ with open(
271
+ (self.dataset / "weight_config.yaml").resolve(), "r"
272
+ ) as run_config_file:
273
+ yaml_config = yaml.safe_load(run_config_file)
274
+ tissue_counts = dict(yaml_config)["tissue"]
275
+
276
+ # calculate weight for each tissue
277
+ weights_dict = {}
278
+ k = np.sum(list(tissue_counts.values()))
279
+ for tissue, count in tissue_counts.items():
280
+ w = k / (gamma * count + (1 - gamma) * k)
281
+ weights_dict[tissue] = w
282
+
283
+ weights = []
284
+ for idx in range(self.__len__()):
285
+ img_idx = self.img_names[idx]
286
+ type_str = self.types[img_idx]
287
+ weights.append(weights_dict[type_str])
288
+
289
+ return torch.Tensor(weights)
290
+
291
+ def get_sampling_weights_cell(self, gamma: float = 1) -> torch.Tensor:
292
+ """Get sampling weights calculated by cell type statistics
293
+
294
+ Args:
295
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
296
+ 1 means total balancing, 0 means original weights. Defaults to 1.
297
+
298
+ Returns:
299
+ torch.Tensor: Weights for each sample
300
+ """
301
+ assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
302
+ assert hasattr(self, "cell_count"), "Please run .load_cell_count() in advance!"
303
+ binary_weight_factors = np.array([4191, 4132, 6140, 232, 1528])
304
+ k = np.sum(binary_weight_factors)
305
+ cell_counts_imgs = np.clip(self.cell_count.to_numpy(), 0, 1)
306
+ weight_vector = k / (gamma * binary_weight_factors + (1 - gamma) * k)
307
+ img_weight = (1 - gamma) * np.max(cell_counts_imgs, axis=-1) + gamma * np.sum(
308
+ cell_counts_imgs * weight_vector, axis=-1
309
+ )
310
+ img_weight[np.where(img_weight == 0)] = np.min(
311
+ img_weight[np.nonzero(img_weight)]
312
+ )
313
+
314
+ return torch.Tensor(img_weight)
315
+
316
+ def get_sampling_weights_cell_tissue(self, gamma: float = 1) -> torch.Tensor:
317
+ """Get combined sampling weights by calculating tissue and cell sampling weights,
318
+ normalizing them and adding them up to yield one score.
319
+
320
+ Args:
321
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
322
+ 1 means total balancing, 0 means original weights. Defaults to 1.
323
+
324
+ Returns:
325
+ torch.Tensor: Weights for each sample
326
+ """
327
+ assert 0 <= gamma <= 1, "Gamma must be between 0 and 1"
328
+ tw = self.get_sampling_weights_tissue(gamma)
329
+ cw = self.get_sampling_weights_cell(gamma)
330
+ weights = tw / torch.max(tw) + cw / torch.max(cw)
331
+
332
+ return weights
333
+
334
+ @staticmethod
335
+ def gen_instance_hv_map(inst_map: np.ndarray) -> np.ndarray:
336
+ """Obtain the horizontal and vertical distance maps for each
337
+ nuclear instance.
338
+
339
+ Args:
340
+ inst_map (np.ndarray): Instance map with each instance labelled as a unique integer
341
+ Shape: (H, W)
342
+ Returns:
343
+ np.ndarray: Horizontal and vertical instance map.
344
+ Shape: (2, H, W). First dimension is horizontal (horizontal gradient (-1 to 1)),
345
+ last is vertical (vertical gradient (-1 to 1))
346
+ """
347
+ orig_inst_map = inst_map.copy() # instance ID map
348
+
349
+ x_map = np.zeros(orig_inst_map.shape[:2], dtype=np.float32)
350
+ y_map = np.zeros(orig_inst_map.shape[:2], dtype=np.float32)
351
+
352
+ inst_list = list(np.unique(orig_inst_map))
353
+ inst_list.remove(0) # 0 is background
354
+ for inst_id in inst_list:
355
+ inst_map = np.array(orig_inst_map == inst_id, np.uint8)
356
+ inst_box = get_bounding_box(inst_map)
357
+
358
+ # expand the box by 2px
359
+ # Because we first pad the ann at line 207, the bboxes
360
+ # will remain valid after expansion
361
+ if inst_box[0] >= 2:
362
+ inst_box[0] -= 2
363
+ if inst_box[2] >= 2:
364
+ inst_box[2] -= 2
365
+ if inst_box[1] <= orig_inst_map.shape[0] - 2:
366
+ inst_box[1] += 2
367
+ if inst_box[3] <= orig_inst_map.shape[0] - 2:
368
+ inst_box[3] += 2
369
+
370
+ # improvement
371
+ inst_map = inst_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
372
+
373
+ if inst_map.shape[0] < 2 or inst_map.shape[1] < 2:
374
+ continue
375
+
376
+ # instance center of mass, rounded to nearest pixel
377
+ inst_com = list(center_of_mass(inst_map))
378
+
379
+ inst_com[0] = int(inst_com[0] + 0.5)
380
+ inst_com[1] = int(inst_com[1] + 0.5)
381
+
382
+ inst_x_range = np.arange(1, inst_map.shape[1] + 1)
383
+ inst_y_range = np.arange(1, inst_map.shape[0] + 1)
384
+ # shifting center of pixels grid to instance center of mass
385
+ inst_x_range -= inst_com[1]
386
+ inst_y_range -= inst_com[0]
387
+
388
+ inst_x, inst_y = np.meshgrid(inst_x_range, inst_y_range)
389
+
390
+ # remove coord outside of instance
391
+ inst_x[inst_map == 0] = 0
392
+ inst_y[inst_map == 0] = 0
393
+ inst_x = inst_x.astype("float32")
394
+ inst_y = inst_y.astype("float32")
395
+
396
+ # normalize min into -1 scale
397
+ if np.min(inst_x) < 0:
398
+ inst_x[inst_x < 0] /= -np.amin(inst_x[inst_x < 0])
399
+ if np.min(inst_y) < 0:
400
+ inst_y[inst_y < 0] /= -np.amin(inst_y[inst_y < 0])
401
+ # normalize max into +1 scale
402
+ if np.max(inst_x) > 0:
403
+ inst_x[inst_x > 0] /= np.amax(inst_x[inst_x > 0])
404
+ if np.max(inst_y) > 0:
405
+ inst_y[inst_y > 0] /= np.amax(inst_y[inst_y > 0])
406
+
407
+ ####
408
+ x_map_box = x_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
409
+ x_map_box[inst_map > 0] = inst_x[inst_map > 0]
410
+
411
+ y_map_box = y_map[inst_box[0] : inst_box[1], inst_box[2] : inst_box[3]]
412
+ y_map_box[inst_map > 0] = inst_y[inst_map > 0]
413
+
414
+ hv_map = np.stack([x_map, y_map])
415
+ return hv_map
416
+
417
+ @staticmethod
418
+ def gen_distance_prob_maps(inst_map: np.ndarray) -> np.ndarray:
419
+ """Generate distance probability maps
420
+
421
+ Args:
422
+ inst_map (np.ndarray): Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (H, W)
423
+
424
+ Returns:
425
+ np.ndarray: Distance probability map, shape (H, W)
426
+ """
427
+ inst_map = fix_duplicates(inst_map)
428
+ dist = np.zeros_like(inst_map, dtype=np.float64)
429
+ inst_list = list(np.unique(inst_map))
430
+ if 0 in inst_list:
431
+ inst_list.remove(0)
432
+
433
+ for inst_id in inst_list:
434
+ inst = np.array(inst_map == inst_id, np.uint8)
435
+
436
+ y1, y2, x1, x2 = get_bounding_box(inst)
437
+ y1 = y1 - 2 if y1 - 2 >= 0 else y1
438
+ x1 = x1 - 2 if x1 - 2 >= 0 else x1
439
+ x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2
440
+ y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2
441
+
442
+ inst = inst[y1:y2, x1:x2]
443
+
444
+ if inst.shape[0] < 2 or inst.shape[1] < 2:
445
+ continue
446
+
447
+ # chessboard distance map generation
448
+ # normalize distance to 0-1
449
+ inst_dist = distance_transform_edt(inst)
450
+ inst_dist = inst_dist.astype("float64")
451
+
452
+ max_value = np.amax(inst_dist)
453
+ if max_value <= 0:
454
+ continue
455
+ inst_dist = inst_dist / (np.max(inst_dist) + 1e-10)
456
+
457
+ dist_map_box = dist[y1:y2, x1:x2]
458
+ dist_map_box[inst > 0] = inst_dist[inst > 0]
459
+
460
+ return dist
461
+
462
+ @staticmethod
463
+ @njit
464
+ def gen_stardist_maps(inst_map: np.ndarray) -> np.ndarray:
465
+ """Generate StarDist map with 32 nrays
466
+
467
+ Args:
468
+ inst_map (np.ndarray): Instance-Map, each instance is has one integer starting by 1 (zero is background), Shape (H, W)
469
+
470
+ Returns:
471
+ np.ndarray: Stardist vector map, shape (n_rays, H, W)
472
+ """
473
+ n_rays = 32
474
+ # inst_map = fix_duplicates(inst_map)
475
+ dist = np.empty(inst_map.shape + (n_rays,), np.float32)
476
+
477
+ st_rays = np.float32((2 * np.pi) / n_rays)
478
+ for i in range(inst_map.shape[0]):
479
+ for j in range(inst_map.shape[1]):
480
+ value = inst_map[i, j]
481
+ if value == 0:
482
+ dist[i, j] = 0
483
+ else:
484
+ for k in range(n_rays):
485
+ phi = np.float32(k * st_rays)
486
+ dy = np.cos(phi)
487
+ dx = np.sin(phi)
488
+ x, y = np.float32(0), np.float32(0)
489
+ while True:
490
+ x += dx
491
+ y += dy
492
+ ii = int(round(i + x))
493
+ jj = int(round(j + y))
494
+ if (
495
+ ii < 0
496
+ or ii >= inst_map.shape[0]
497
+ or jj < 0
498
+ or jj >= inst_map.shape[1]
499
+ or value != inst_map[ii, jj]
500
+ ):
501
+ # small correction as we overshoot the boundary
502
+ t_corr = 1 - 0.5 / max(np.abs(dx), np.abs(dy))
503
+ x -= t_corr * dx
504
+ y -= t_corr * dy
505
+ dst = np.sqrt(x**2 + y**2)
506
+ dist[i, j, k] = dst
507
+ break
508
+
509
+ return dist.transpose(2, 0, 1)
510
+
511
+ @staticmethod
512
+ def gen_regression_map(inst_map: np.ndarray):
513
+ n_directions = 2
514
+ dist = np.zeros(inst_map.shape + (n_directions,), np.float32).transpose(2, 0, 1)
515
+ inst_map = fix_duplicates(inst_map)
516
+ inst_list = list(np.unique(inst_map))
517
+ if 0 in inst_list:
518
+ inst_list.remove(0)
519
+ for inst_id in inst_list:
520
+ inst = np.array(inst_map == inst_id, np.uint8)
521
+ y1, y2, x1, x2 = get_bounding_box(inst)
522
+ y1 = y1 - 2 if y1 - 2 >= 0 else y1
523
+ x1 = x1 - 2 if x1 - 2 >= 0 else x1
524
+ x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2
525
+ y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2
526
+
527
+ inst = inst[y1:y2, x1:x2]
528
+ y_mass, x_mass = center_of_mass(inst)
529
+ x_map = np.repeat(np.arange(1, x2 - x1 + 1)[None, :], y2 - y1, axis=0)
530
+ y_map = np.repeat(np.arange(1, y2 - y1 + 1)[:, None], x2 - x1, axis=1)
531
+ # we use a transposed coordinate system to align to HV-map, correct would be -1*x_dist_map and -1*y_dist_map
532
+ x_dist_map = (x_map - x_mass) * np.clip(inst, 0, 1)
533
+ y_dist_map = (y_map - y_mass) * np.clip(inst, 0, 1)
534
+ dist[0, y1:y2, x1:x2] = x_dist_map
535
+ dist[1, y1:y2, x1:x2] = y_dist_map
536
+
537
+ return dist
cell_segmentation/datasets/prepare_monuseg.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Prepare MoNuSeg Dataset By converting and resorting files
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ from PIL import Image
9
+ import xml.etree.ElementTree as ET
10
+ from skimage import draw
11
+ import numpy as np
12
+ from pathlib import Path
13
+ from typing import Union
14
+ import argparse
15
+
16
+
17
+ def convert_monuseg(
18
+ input_path: Union[Path, str], output_path: Union[Path, str]
19
+ ) -> None:
20
+ """Convert the MoNuSeg dataset to a new format (1000 -> 1024, tiff to png and xml to npy)
21
+
22
+ Args:
23
+ input_path (Union[Path, str]): Input dataset
24
+ output_path (Union[Path, str]): Output path
25
+ """
26
+ input_path = Path(input_path)
27
+ output_path = Path(output_path)
28
+ output_path.mkdir(exist_ok=True, parents=True)
29
+
30
+ # testing and training
31
+ parts = ["testing", "training"]
32
+ for part in parts:
33
+ print(f"Prepare: {part}")
34
+ input_path_part = input_path / part
35
+ output_path_part = output_path / part
36
+ output_path_part.mkdir(exist_ok=True, parents=True)
37
+ (output_path_part / "images").mkdir(exist_ok=True, parents=True)
38
+ (output_path_part / "labels").mkdir(exist_ok=True, parents=True)
39
+
40
+ # images
41
+ images = [f for f in sorted((input_path_part / "images").glob("*.tif"))]
42
+ for img_path in images:
43
+ loaded_image = Image.open(img_path)
44
+ resized = loaded_image.resize(
45
+ (1024, 1024), resample=Image.Resampling.LANCZOS
46
+ )
47
+ new_img_path = output_path_part / "images" / f"{img_path.stem}.png"
48
+ resized.save(new_img_path)
49
+ # masks
50
+ annotations = [f for f in sorted((input_path_part / "labels").glob("*.xml"))]
51
+ for annot_path in annotations:
52
+ binary_mask = np.transpose(np.zeros((1000, 1000)))
53
+
54
+ # extract xml file
55
+ tree = ET.parse(annot_path)
56
+ root = tree.getroot()
57
+ child = root[0]
58
+
59
+ for x in child:
60
+ r = x.tag
61
+ if r == "Regions":
62
+ element_idx = 1
63
+ for y in x:
64
+ y_tag = y.tag
65
+
66
+ if y_tag == "Region":
67
+ regions = []
68
+ vertices = y[1]
69
+ coords = np.zeros((len(vertices), 2))
70
+ for i, vertex in enumerate(vertices):
71
+ coords[i][0] = vertex.attrib["X"]
72
+ coords[i][1] = vertex.attrib["Y"]
73
+ regions.append(coords)
74
+ vertex_row_coords = regions[0][:, 0]
75
+ vertex_col_coords = regions[0][:, 1]
76
+ fill_row_coords, fill_col_coords = draw.polygon(
77
+ vertex_col_coords, vertex_row_coords, binary_mask.shape
78
+ )
79
+ binary_mask[fill_row_coords, fill_col_coords] = element_idx
80
+
81
+ element_idx = element_idx + 1
82
+ inst_image = Image.fromarray(binary_mask)
83
+ resized_mask = np.array(
84
+ inst_image.resize((1024, 1024), resample=Image.Resampling.NEAREST)
85
+ )
86
+ new_mask_path = output_path_part / "labels" / f"{annot_path.stem}.npy"
87
+ np.save(new_mask_path, resized_mask)
88
+ print("Finished")
89
+
90
+
91
+ parser = argparse.ArgumentParser(
92
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
93
+ description="Convert the MoNuSeg dataset",
94
+ )
95
+ parser.add_argument(
96
+ "--input_path",
97
+ type=str,
98
+ help="Input path of the original MoNuSeg dataset",
99
+ required=True,
100
+ )
101
+ parser.add_argument(
102
+ "--output_path",
103
+ type=str,
104
+ help="Output path to store the processed MoNuSeg dataset",
105
+ required=True,
106
+ )
107
+
108
+ if __name__ == "__main__":
109
+ opt = parser.parse_args()
110
+ configuration = vars(opt)
111
+
112
+ input_path = Path(configuration["input_path"])
113
+ output_path = Path(configuration["output_path"])
114
+
115
+ convert_monuseg(input_path=input_path, output_path=output_path)
cell_segmentation/datasets/prepare_pannuke_origin.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Prepare MoNuSeg Dataset By converting and resorting files
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import inspect
9
+ import os
10
+ import sys
11
+
12
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
13
+ parentdir = os.path.dirname(currentdir)
14
+ sys.path.insert(0, parentdir)
15
+ parentdir = os.path.dirname(parentdir)
16
+ sys.path.insert(0, parentdir)
17
+
18
+ import numpy as np
19
+ from pathlib import Path
20
+ from PIL import Image
21
+ from tqdm import tqdm
22
+ import argparse
23
+ from cell_segmentation.utils.metrics import remap_label
24
+
25
+
26
+ def process_fold(fold, input_path, output_path) -> None:
27
+ fold_path = Path(input_path) / f"fold{fold}"
28
+ output_fold_path = Path(output_path) / f"fold{fold}"
29
+ output_fold_path.mkdir(exist_ok=True, parents=True)
30
+ (output_fold_path / "images").mkdir(exist_ok=True, parents=True)
31
+ (output_fold_path / "labels").mkdir(exist_ok=True, parents=True)
32
+
33
+ print(f"Fold: {fold}")
34
+ print("Loading large numpy files, this may take a while")
35
+ images = np.load(fold_path / "images.npy")
36
+ masks = np.load(fold_path / "masks.npy")
37
+
38
+ print("Process images")
39
+ for i in tqdm(range(len(images)), total=len(images)):
40
+ outname = f"{fold}_{i}.png"
41
+ out_img = images[i]
42
+ im = Image.fromarray(out_img.astype(np.uint8))
43
+ im.save(output_fold_path / "images" / outname)
44
+
45
+ print("Process masks")
46
+ for i in tqdm(range(len(images)), total=len(images)):
47
+ outname = f"{fold}_{i}.npy"
48
+
49
+ # need to create instance map and type map with shape 256x256
50
+ mask = masks[i]
51
+ inst_map = np.zeros((256, 256))
52
+ num_nuc = 0
53
+ for j in range(5):
54
+ # copy value from new array if value is not equal 0
55
+ layer_res = remap_label(mask[:, :, j])
56
+ # inst_map = np.where(mask[:,:,j] != 0, mask[:,:,j], inst_map)
57
+ inst_map = np.where(layer_res != 0, layer_res + num_nuc, inst_map)
58
+ num_nuc = num_nuc + np.max(layer_res)
59
+ inst_map = remap_label(inst_map)
60
+
61
+ type_map = np.zeros((256, 256)).astype(np.int32)
62
+ for j in range(5):
63
+ layer_res = ((j + 1) * np.clip(mask[:, :, j], 0, 1)).astype(np.int32)
64
+ type_map = np.where(layer_res != 0, layer_res, type_map)
65
+
66
+ outdict = {"inst_map": inst_map, "type_map": type_map}
67
+ np.save(output_fold_path / "labels" / outname, outdict)
68
+
69
+
70
+ parser = argparse.ArgumentParser(
71
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
72
+ description="Perform CellViT inference for given run-directory with model checkpoints and logs",
73
+ )
74
+ parser.add_argument(
75
+ "--input_path",
76
+ type=str,
77
+ help="Input path of the original PanNuke dataset",
78
+ required=True,
79
+ )
80
+ parser.add_argument(
81
+ "--output_path",
82
+ type=str,
83
+ help="Output path to store the processed PanNuke dataset",
84
+ required=True,
85
+ )
86
+
87
+ if __name__ == "__main__":
88
+ opt = parser.parse_args()
89
+ configuration = vars(opt)
90
+
91
+ input_path = Path(configuration["input_path"])
92
+ output_path = Path(configuration["output_path"])
93
+
94
+ for fold in [0, 1, 2]:
95
+ process_fold(fold, input_path, output_path)
cell_segmentation/experiments/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Experiment related methods for each network type
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
cell_segmentation/experiments/experiment_cellvit_conic.py ADDED
@@ -0,0 +1,808 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellVit Experiment Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import copy
9
+ import datetime
10
+ import inspect
11
+ import os
12
+ import shutil
13
+ import sys
14
+
15
+ import yaml
16
+
17
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
18
+ parentdir = os.path.dirname(currentdir)
19
+ sys.path.insert(0, parentdir)
20
+
21
+ import uuid
22
+ from pathlib import Path
23
+ from typing import Callable, Tuple, Union
24
+
25
+ import albumentations as A
26
+ import torch
27
+ import torch.nn as nn
28
+ import wandb
29
+ from torch.optim import Optimizer
30
+ from torch.optim.lr_scheduler import (
31
+ ConstantLR,
32
+ CosineAnnealingLR,
33
+ ExponentialLR,
34
+ SequentialLR,
35
+ _LRScheduler,
36
+ )
37
+ from torch.utils.data import (
38
+ DataLoader,
39
+ Dataset,
40
+ RandomSampler,
41
+ Sampler,
42
+ Subset,
43
+ WeightedRandomSampler,
44
+ )
45
+ from torchinfo import summary
46
+ from wandb.sdk.lib.runid import generate_id
47
+
48
+ from base_ml.base_early_stopping import EarlyStopping
49
+ from base_ml.base_experiment import BaseExperiment
50
+ from base_ml.base_loss import retrieve_loss_fn
51
+ from cell_segmentation.datasets.base_cell import CellDataset
52
+ from cell_segmentation.datasets.dataset_coordinator import select_dataset
53
+ from cell_segmentation.trainer.trainer_cellvit import CellViTTrainer
54
+ from models.segmentation.cell_segmentation.cellvit import CellViT
55
+ from utils.tools import close_logger
56
+
57
+
58
+ class ExperimentCellViTCoNic(BaseExperiment):
59
+ def __init__(self, default_conf: dict, checkpoint=None) -> None:
60
+ super().__init__(default_conf, checkpoint)
61
+ self.load_dataset_setup(dataset_path=self.default_conf["data"]["dataset_path"])
62
+
63
+ def run_experiment(self) -> Tuple[Path, dict, nn.Module, dict]:
64
+ """Main Experiment Code"""
65
+ ### Setup
66
+ # close loggers
67
+ self.close_remaining_logger()
68
+
69
+ # get the config for the current run
70
+ self.run_conf = copy.deepcopy(self.default_conf)
71
+ self.run_conf["dataset_config"] = self.dataset_config
72
+ self.run_name = f"{datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')}_{self.run_conf['logging']['log_comment']}"
73
+
74
+ wandb_run_id = generate_id()
75
+ resume = None
76
+ if self.checkpoint is not None:
77
+ wandb_run_id = self.checkpoint["wandb_id"]
78
+ resume = "must"
79
+ self.run_name = self.checkpoint["run_name"]
80
+
81
+ # initialize wandb
82
+ run = wandb.init(
83
+ project=self.run_conf["logging"]["project"],
84
+ tags=self.run_conf["logging"].get("tags", []),
85
+ name=self.run_name,
86
+ notes=self.run_conf["logging"]["notes"],
87
+ dir=self.run_conf["logging"]["wandb_dir"],
88
+ mode=self.run_conf["logging"]["mode"].lower(),
89
+ group=self.run_conf["logging"].get("group", str(uuid.uuid4())),
90
+ allow_val_change=True,
91
+ id=wandb_run_id,
92
+ resume=resume,
93
+ settings=wandb.Settings(start_method="fork"),
94
+ )
95
+
96
+ # get ids
97
+ self.run_conf["logging"]["run_id"] = run.id
98
+ self.run_conf["logging"]["wandb_file"] = run.id
99
+
100
+ # overwrite configuration with sweep values are leave them as they are
101
+ if self.run_conf["run_sweep"] is True:
102
+ self.run_conf["logging"]["sweep_id"] = run.sweep_id
103
+ self.run_conf["logging"]["log_dir"] = str(
104
+ Path(self.default_conf["logging"]["log_dir"])
105
+ / f"sweep_{run.sweep_id}"
106
+ / f"{self.run_name}_{self.run_conf['logging']['run_id']}"
107
+ )
108
+ self.overwrite_sweep_values(self.run_conf, run.config)
109
+ else:
110
+ self.run_conf["logging"]["log_dir"] = str(
111
+ Path(self.default_conf["logging"]["log_dir"]) / self.run_name
112
+ )
113
+
114
+ # update wandb
115
+ wandb.config.update(
116
+ self.run_conf, allow_val_change=True
117
+ ) # this may lead to the problem
118
+
119
+ # create output folder, instantiate logger and store config
120
+ self.create_output_dir(self.run_conf["logging"]["log_dir"])
121
+ self.logger = self.instantiate_logger()
122
+ self.logger.info("Instantiated Logger. WandB init and config update finished.")
123
+ self.logger.info(f"Run ist stored here: {self.run_conf['logging']['log_dir']}")
124
+ self.store_config()
125
+
126
+ self.logger.info(
127
+ f"Cuda devices: {[torch.cuda.device(i) for i in range(torch.cuda.device_count())]}"
128
+ )
129
+ ### Machine Learning
130
+ device = f"cuda:{self.run_conf['gpu']}"
131
+ self.logger.info(f"Using GPU: {device}")
132
+ self.logger.info(f"Using device: {device}")
133
+
134
+ # loss functions
135
+ loss_fn_dict = self.get_loss_fn(self.run_conf.get("loss", {}))
136
+ self.logger.info("Loss functions:")
137
+ self.logger.info(loss_fn_dict)
138
+
139
+ # model
140
+ model = self.get_train_model(
141
+ pretrained_encoder=self.run_conf["model"].get("pretrained_encoder", None),
142
+ pretrained_model=self.run_conf["model"].get("pretrained", None),
143
+ backbone_type=self.run_conf["model"].get("backbone", "default"),
144
+ shared_decoders=self.run_conf["model"].get("shared_decoders", False),
145
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
146
+ )
147
+ model.to(device)
148
+
149
+ # optimizer
150
+ optimizer = self.get_optimizer(
151
+ model,
152
+ self.run_conf["training"]["optimizer"],
153
+ self.run_conf["training"]["optimizer_hyperparameter"],
154
+ )
155
+
156
+ # scheduler
157
+ scheduler = self.get_scheduler(
158
+ optimizer=optimizer,
159
+ scheduler_type=self.run_conf["training"]["scheduler"]["scheduler_type"],
160
+ )
161
+
162
+ # early stopping (no early stopping for basic setup)
163
+ early_stopping = None
164
+ if "early_stopping_patience" in self.run_conf["training"]:
165
+ if self.run_conf["training"]["early_stopping_patience"] is not None:
166
+ early_stopping = EarlyStopping(
167
+ patience=self.run_conf["training"]["early_stopping_patience"],
168
+ strategy="maximize",
169
+ )
170
+
171
+ ### Data handling
172
+ train_transforms, val_transforms = self.get_transforms(
173
+ self.run_conf["transformations"],
174
+ input_shape=self.run_conf["data"].get("input_shape", 256),
175
+ )
176
+
177
+ train_dataset, val_dataset = self.get_datasets(
178
+ train_transforms=train_transforms,
179
+ val_transforms=val_transforms,
180
+ )
181
+
182
+ # load sampler
183
+ training_sampler = self.get_sampler(
184
+ train_dataset=train_dataset,
185
+ strategy=self.run_conf["training"].get("sampling_strategy", "random"),
186
+ gamma=self.run_conf["training"].get("sampling_gamma", 1),
187
+ )
188
+
189
+ # define dataloaders
190
+ train_dataloader = DataLoader(
191
+ train_dataset,
192
+ batch_size=self.run_conf["training"]["batch_size"],
193
+ sampler=training_sampler,
194
+ num_workers=16,
195
+ pin_memory=False,
196
+ worker_init_fn=self.seed_worker,
197
+ )
198
+
199
+ val_dataloader = DataLoader(
200
+ val_dataset,
201
+ batch_size=128,
202
+ num_workers=16,
203
+ pin_memory=True,
204
+ worker_init_fn=self.seed_worker,
205
+ )
206
+
207
+ # start Training
208
+ self.logger.info("Instantiate Trainer")
209
+ trainer = CellViTTrainer(
210
+ model=model,
211
+ loss_fn_dict=loss_fn_dict,
212
+ optimizer=optimizer,
213
+ scheduler=scheduler,
214
+ device=device,
215
+ logger=self.logger,
216
+ logdir=self.run_conf["logging"]["log_dir"],
217
+ num_classes=self.run_conf["data"]["num_nuclei_classes"],
218
+ dataset_config=self.dataset_config,
219
+ early_stopping=early_stopping,
220
+ experiment_config=self.run_conf,
221
+ log_images=self.run_conf["logging"].get("log_images", False),
222
+ magnification=self.run_conf["data"].get("magnification", 40),
223
+ mixed_precision=self.run_conf["training"].get("mixed_precision", False),
224
+ )
225
+
226
+ # Load checkpoint if provided
227
+ if self.checkpoint is not None:
228
+ self.logger.info("Checkpoint was provided. Restore ...")
229
+ trainer.resume_checkpoint(self.checkpoint)
230
+
231
+ # Call fit method
232
+ self.logger.info("Calling Trainer Fit")
233
+ trainer.fit(
234
+ epochs=self.run_conf["training"]["epochs"],
235
+ train_dataloader=train_dataloader,
236
+ val_dataloader=val_dataloader,
237
+ metric_init=self.get_wandb_init_dict(),
238
+ unfreeze_epoch=self.run_conf["training"]["unfreeze_epoch"],
239
+ eval_every=self.run_conf["training"].get("eval_every", 1),
240
+ )
241
+
242
+ # Select best model if not provided by early stopping
243
+ checkpoint_dir = Path(self.run_conf["logging"]["log_dir"]) / "checkpoints"
244
+ if not (checkpoint_dir / "model_best.pth").is_file():
245
+ shutil.copy(
246
+ checkpoint_dir / "latest_checkpoint.pth",
247
+ checkpoint_dir / "model_best.pth",
248
+ )
249
+
250
+ # At the end close logger
251
+ self.logger.info(f"Finished run {run.id}")
252
+ close_logger(self.logger)
253
+
254
+ return self.run_conf["logging"]["log_dir"]
255
+
256
+ def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None:
257
+ """Load the configuration of the cell segmentation dataset.
258
+
259
+ The dataset must have a dataset_config.yaml file in their dataset path with the following entries:
260
+ * nuclei_types: describing the present nuclei types with corresponding integer
261
+
262
+ Args:
263
+ dataset_path (Union[Path, str]): Path to dataset folder
264
+ """
265
+ dataset_config_path = Path(dataset_path) / "dataset_config.yaml"
266
+ with open(dataset_config_path, "r") as dataset_config_file:
267
+ yaml_config = yaml.safe_load(dataset_config_file)
268
+ self.dataset_config = dict(yaml_config)
269
+
270
+ def get_loss_fn(self, loss_fn_settings: dict) -> dict:
271
+ """Create a dictionary with loss functions for all branches
272
+
273
+ Branches: "nuclei_binary_map", "hv_map", "nuclei_type_map"
274
+
275
+ Args:
276
+ loss_fn_settings (dict): Dictionary with the loss function settings. Structure
277
+ branch_name(str):
278
+ loss_name(str):
279
+ loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss)
280
+ weight(float): Weighting factor as float value
281
+ (optional) args: Optional parameters for initializing the loss function
282
+ arg_name: value
283
+
284
+ If a branch is not provided, the defaults settings (described below) are used.
285
+
286
+ For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml
287
+ under the section "loss"
288
+
289
+ Example:
290
+ nuclei_binary_map:
291
+ bce:
292
+ loss_fn: xentropy_loss
293
+ weight: 1
294
+ dice:
295
+ loss_fn: dice_loss
296
+ weight: 1
297
+
298
+ Returns:
299
+ dict: Dictionary with loss functions for each branch. Structure:
300
+ branch_name(str):
301
+ loss_name(str):
302
+ "loss_fn": Callable loss function
303
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
304
+ loss_name(str):
305
+ "loss_fn": Callable loss function
306
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
307
+ branch_name(str)
308
+ ...
309
+
310
+ Default loss dictionary:
311
+ nuclei_binary_map:
312
+ bce:
313
+ loss_fn: xentropy_loss
314
+ weight: 1
315
+ dice:
316
+ loss_fn: dice_loss
317
+ weight: 1
318
+ hv_map:
319
+ mse:
320
+ loss_fn: mse_loss_maps
321
+ weight: 1
322
+ msge:
323
+ loss_fn: msge_loss_maps
324
+ weight: 1
325
+ nuclei_type_map
326
+ bce:
327
+ loss_fn: xentropy_loss
328
+ weight: 1
329
+ dice:
330
+ loss_fn: dice_loss
331
+ weight: 1
332
+ """
333
+ loss_fn_dict = {}
334
+ if "nuclei_binary_map" in loss_fn_settings.keys():
335
+ loss_fn_dict["nuclei_binary_map"] = {}
336
+ for loss_name, loss_sett in loss_fn_settings["nuclei_binary_map"].items():
337
+ parameters = loss_sett.get("args", {})
338
+ loss_fn_dict["nuclei_binary_map"][loss_name] = {
339
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
340
+ "weight": loss_sett["weight"],
341
+ }
342
+ else:
343
+ loss_fn_dict["nuclei_binary_map"] = {
344
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
345
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
346
+ }
347
+ if "hv_map" in loss_fn_settings.keys():
348
+ loss_fn_dict["hv_map"] = {}
349
+ for loss_name, loss_sett in loss_fn_settings["hv_map"].items():
350
+ parameters = loss_sett.get("args", {})
351
+ loss_fn_dict["hv_map"][loss_name] = {
352
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
353
+ "weight": loss_sett["weight"],
354
+ }
355
+ else:
356
+ loss_fn_dict["hv_map"] = {
357
+ "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1},
358
+ "msge": {"loss_fn": retrieve_loss_fn("msge_loss_maps"), "weight": 1},
359
+ }
360
+ if "nuclei_type_map" in loss_fn_settings.keys():
361
+ loss_fn_dict["nuclei_type_map"] = {}
362
+ for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items():
363
+ parameters = loss_sett.get("args", {})
364
+ loss_fn_dict["nuclei_type_map"][loss_name] = {
365
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
366
+ "weight": loss_sett["weight"],
367
+ }
368
+ else:
369
+ loss_fn_dict["nuclei_type_map"] = {
370
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
371
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
372
+ }
373
+ if "regression_loss" in loss_fn_settings.keys():
374
+ loss_fn_dict["regression_map"] = {}
375
+ for loss_name, loss_sett in loss_fn_settings["regression_loss"].items():
376
+ parameters = loss_sett.get("args", {})
377
+ loss_fn_dict["regression_map"][loss_name] = {
378
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
379
+ "weight": loss_sett["weight"],
380
+ }
381
+ elif "regression_loss" in self.run_conf["model"].keys():
382
+ loss_fn_dict["regression_map"] = {
383
+ "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1},
384
+ }
385
+ return loss_fn_dict
386
+
387
+ def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler:
388
+ """Get the learning rate scheduler for CellViT
389
+
390
+ The configuration of the scheduler is given in the "training" -> "scheduler" section.
391
+ Currenlty, "constant", "exponential" and "cosine" schedulers are implemented.
392
+
393
+ Required parameters for implemented schedulers:
394
+ - "constant": None
395
+ - "exponential": gamma (optional, defaults to 0.95)
396
+ - "cosine": eta_min (optional, defaults to 1-e5)
397
+
398
+ Args:
399
+ scheduler_type (str): Type of scheduler as a string. Currently implemented:
400
+ - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75)
401
+ - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95)
402
+ - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5)
403
+ optimizer (Optimizer): Optimizer
404
+
405
+ Returns:
406
+ _LRScheduler: PyTorch Scheduler
407
+ """
408
+ implemented_schedulers = ["constant", "exponential", "cosine"]
409
+ if scheduler_type.lower() not in implemented_schedulers:
410
+ self.logger.warning(
411
+ f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling."
412
+ )
413
+ if scheduler_type.lower() == "constant":
414
+ scheduler = SequentialLR(
415
+ optimizer=optimizer,
416
+ schedulers=[
417
+ ConstantLR(optimizer, factor=1, total_iters=25),
418
+ ConstantLR(optimizer, factor=0.1, total_iters=25),
419
+ ConstantLR(optimizer, factor=1, total_iters=25),
420
+ ConstantLR(optimizer, factor=0.1, total_iters=1000),
421
+ ],
422
+ milestones=[24, 49, 74],
423
+ )
424
+ elif scheduler_type.lower() == "exponential":
425
+ scheduler = ExponentialLR(
426
+ optimizer,
427
+ gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95),
428
+ )
429
+ elif scheduler_type.lower() == "cosine":
430
+ scheduler = CosineAnnealingLR(
431
+ optimizer,
432
+ T_max=self.run_conf["training"]["epochs"],
433
+ eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5),
434
+ )
435
+ else:
436
+ scheduler = super().get_scheduler(optimizer)
437
+ return scheduler
438
+
439
+ def get_datasets(
440
+ self,
441
+ train_transforms: Callable = None,
442
+ val_transforms: Callable = None,
443
+ ) -> Tuple[Dataset, Dataset]:
444
+ """Retrieve training dataset and validation dataset
445
+
446
+ Args:
447
+ train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None.
448
+ val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None.
449
+
450
+ Returns:
451
+ Tuple[Dataset, Dataset]: Training dataset and validation dataset
452
+ """
453
+ if (
454
+ "val_split" in self.run_conf["data"]
455
+ and "val_folds" in self.run_conf["data"]
456
+ ):
457
+ raise RuntimeError(
458
+ "Provide either val_splits or val_folds in configuration file, not both."
459
+ )
460
+ if (
461
+ "val_split" not in self.run_conf["data"]
462
+ and "val_folds" not in self.run_conf["data"]
463
+ ):
464
+ raise RuntimeError(
465
+ "Provide either val_split or val_folds in configuration file, one is necessary."
466
+ )
467
+ if (
468
+ "val_split" not in self.run_conf["data"]
469
+ and "val_folds" not in self.run_conf["data"]
470
+ ):
471
+ raise RuntimeError(
472
+ "Provide either val_split or val_fold in configuration file, one is necessary."
473
+ )
474
+ if "regression_loss" in self.run_conf["model"].keys():
475
+ self.run_conf["data"]["regression_loss"] = True
476
+
477
+ full_dataset = select_dataset(
478
+ dataset_name="conic",
479
+ split="train",
480
+ dataset_config=self.run_conf["data"],
481
+ transforms=train_transforms,
482
+ )
483
+ if "val_split" in self.run_conf["data"]:
484
+ generator_split = torch.Generator().manual_seed(
485
+ self.default_conf["random_seed"]
486
+ )
487
+ val_splits = float(self.run_conf["data"]["val_split"])
488
+ train_dataset, val_dataset = torch.utils.data.random_split(
489
+ full_dataset,
490
+ lengths=[1 - val_splits, val_splits],
491
+ generator=generator_split,
492
+ )
493
+ val_dataset.dataset = copy.deepcopy(full_dataset)
494
+ val_dataset.dataset.set_transforms(val_transforms)
495
+ else:
496
+ train_dataset = full_dataset
497
+ val_dataset = select_dataset(
498
+ dataset_name="conic",
499
+ split="validation",
500
+ dataset_config=self.run_conf["data"],
501
+ transforms=val_transforms,
502
+ )
503
+
504
+ return train_dataset, val_dataset
505
+
506
+ def get_train_model(
507
+ self,
508
+ pretrained_encoder: Union[Path, str] = None,
509
+ pretrained_model: Union[Path, str] = None,
510
+ backbone_type: str = "default",
511
+ shared_decoders: bool = False,
512
+ regression_loss: bool = False,
513
+ **kwargs,
514
+ ) -> CellViT:
515
+ """Return the CellViT training model
516
+
517
+ Args:
518
+ pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None.
519
+ pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None.
520
+ backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None
521
+ shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False.
522
+ regression_loss (bool, optional): If regression loss is used. Defaults to False
523
+
524
+ Returns:
525
+ CellViT: CellViT training model with given setup
526
+ """
527
+ # reseed needed, due to subprocess seeding compatibility
528
+ self.seed_run(self.default_conf["random_seed"])
529
+
530
+ # check for backbones
531
+ implemented_backbones = ["default", "vit256", "sam-b", "sam-l", "sam-h"]
532
+ if backbone_type.lower() not in implemented_backbones:
533
+ raise NotImplementedError(
534
+ f"Unknown Backbone Type - Currently supported are: {implemented_backbones}"
535
+ )
536
+ if backbone_type.lower() == "default":
537
+ if shared_decoders:
538
+ model_class = CellViTShared
539
+ else:
540
+ model_class = CellViT
541
+ model = model_class(
542
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
543
+ num_tissue_classes=1,
544
+ embed_dim=self.run_conf["model"]["embed_dim"],
545
+ input_channels=self.run_conf["model"].get("input_channels", 3),
546
+ depth=self.run_conf["model"]["depth"],
547
+ num_heads=self.run_conf["model"]["num_heads"],
548
+ extract_layers=self.run_conf["model"]["extract_layers"],
549
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
550
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
551
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
552
+ regression_loss=regression_loss,
553
+ )
554
+
555
+ if pretrained_model is not None:
556
+ self.logger.info(
557
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
558
+ )
559
+ cellvit_pretrained = torch.load(pretrained_model)
560
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
561
+ self.logger.info("Loaded CellViT model")
562
+
563
+ if backbone_type.lower() == "vit256":
564
+ if shared_decoders:
565
+ model_class = CellViT256Shared
566
+ else:
567
+ model_class = CellViT256
568
+ model = model_class(
569
+ model256_path=pretrained_encoder,
570
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
571
+ num_tissue_classes=1,
572
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
573
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
574
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
575
+ regression_loss=regression_loss,
576
+ )
577
+ model.load_pretrained_encoder(model.model256_path)
578
+ if pretrained_model is not None:
579
+ self.logger.info(
580
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
581
+ )
582
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
583
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
584
+ model.freeze_encoder()
585
+ self.logger.info("Loaded CellVit256 model")
586
+ if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]:
587
+ if shared_decoders:
588
+ model_class = CellViTSAMShared
589
+ else:
590
+ model_class = CellViTSAM
591
+ model = model_class(
592
+ model_path=pretrained_encoder,
593
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
594
+ num_tissue_classes=1,
595
+ vit_structure=backbone_type,
596
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
597
+ regression_loss=regression_loss,
598
+ )
599
+ model.load_pretrained_encoder(model.model_path)
600
+ if pretrained_model is not None:
601
+ self.logger.info(
602
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
603
+ )
604
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
605
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
606
+ model.freeze_encoder()
607
+ self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}")
608
+
609
+ self.logger.info(f"\nModel: {model}")
610
+ model = model.to("cpu")
611
+ self.logger.info(
612
+ f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}"
613
+ )
614
+
615
+ return model
616
+
617
+ def get_wandb_init_dict(self) -> dict:
618
+ pass
619
+
620
+ def get_transforms(
621
+ self, transform_settings: dict, input_shape: int = 256
622
+ ) -> Tuple[Callable, Callable]:
623
+ """Get Transformations (Albumentation Transformations). Return both training and validation transformations.
624
+
625
+ The transformation settings are given in the following format:
626
+ key: dict with parameters
627
+ Example:
628
+ colorjitter:
629
+ p: 0.1
630
+ scale_setting: 0.5
631
+ scale_color: 0.1
632
+
633
+ For further information on how to setup the dictionary and default (recommended) values is given here:
634
+ configs/examples/cell_segmentation/train_cellvit.yaml
635
+
636
+ Training Transformations:
637
+ Implemented are:
638
+ - A.RandomRotate90: Key in transform_settings: randomrotate90, parameters: p
639
+ - A.HorizontalFlip: Key in transform_settings: horizontalflip, parameters: p
640
+ - A.VerticalFlip: Key in transform_settings: verticalflip, parameters: p
641
+ - A.Downscale: Key in transform_settings: downscale, parameters: p, scale
642
+ - A.Blur: Key in transform_settings: blur, parameters: p, blur_limit
643
+ - A.GaussNoise: Key in transform_settings: gaussnoise, parameters: p, var_limit
644
+ - A.ColorJitter: Key in transform_settings: colorjitter, parameters: p, scale_setting, scale_color
645
+ - A.Superpixels: Key in transform_settings: superpixels, parameters: p
646
+ - A.ZoomBlur: Key in transform_settings: zoomblur, parameters: p
647
+ - A.RandomSizedCrop: Key in transform_settings: randomsizedcrop, parameters: p
648
+ - A.ElasticTransform: Key in transform_settings: elastictransform, parameters: p
649
+ Always implemented at the end of the pipeline:
650
+ - A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5))
651
+
652
+ Validation Transformations:
653
+ A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5))
654
+
655
+ Args:
656
+ transform_settings (dict): dictionay with the transformation settings.
657
+ input_shape (int, optional): Input shape of the images to used. Defaults to 256.
658
+
659
+ Returns:
660
+ Tuple[Callable, Callable]: Train Transformations, Validation Transformations
661
+
662
+ """
663
+ transform_list = []
664
+ transform_settings = {k.lower(): v for k, v in transform_settings.items()}
665
+ if "RandomRotate90".lower() in transform_settings:
666
+ p = transform_settings["randomrotate90"]["p"]
667
+ if p > 0 and p <= 1:
668
+ transform_list.append(A.RandomRotate90(p=p))
669
+ if "HorizontalFlip".lower() in transform_settings.keys():
670
+ p = transform_settings["horizontalflip"]["p"]
671
+ if p > 0 and p <= 1:
672
+ transform_list.append(A.HorizontalFlip(p=p))
673
+ if "VerticalFlip".lower() in transform_settings:
674
+ p = transform_settings["verticalflip"]["p"]
675
+ if p > 0 and p <= 1:
676
+ transform_list.append(A.VerticalFlip(p=p))
677
+ if "Downscale".lower() in transform_settings:
678
+ p = transform_settings["downscale"]["p"]
679
+ scale = transform_settings["downscale"]["scale"]
680
+ if p > 0 and p <= 1:
681
+ transform_list.append(
682
+ A.Downscale(p=p, scale_max=scale, scale_min=scale)
683
+ )
684
+ if "Blur".lower() in transform_settings:
685
+ p = transform_settings["blur"]["p"]
686
+ blur_limit = transform_settings["blur"]["blur_limit"]
687
+ if p > 0 and p <= 1:
688
+ transform_list.append(A.Blur(p=p, blur_limit=blur_limit))
689
+ if "GaussNoise".lower() in transform_settings:
690
+ p = transform_settings["gaussnoise"]["p"]
691
+ var_limit = transform_settings["gaussnoise"]["var_limit"]
692
+ if p > 0 and p <= 1:
693
+ transform_list.append(A.GaussNoise(p=p, var_limit=var_limit))
694
+ if "ColorJitter".lower() in transform_settings:
695
+ p = transform_settings["colorjitter"]["p"]
696
+ scale_setting = transform_settings["colorjitter"]["scale_setting"]
697
+ scale_color = transform_settings["colorjitter"]["scale_color"]
698
+ if p > 0 and p <= 1:
699
+ transform_list.append(
700
+ A.ColorJitter(
701
+ p=p,
702
+ brightness=scale_setting,
703
+ contrast=scale_setting,
704
+ saturation=scale_color,
705
+ hue=scale_color / 2,
706
+ )
707
+ )
708
+ if "Superpixels".lower() in transform_settings:
709
+ p = transform_settings["superpixels"]["p"]
710
+ if p > 0 and p <= 1:
711
+ transform_list.append(
712
+ A.Superpixels(
713
+ p=p,
714
+ p_replace=0.1,
715
+ n_segments=200,
716
+ max_size=int(input_shape / 2),
717
+ )
718
+ )
719
+ if "ZoomBlur".lower() in transform_settings:
720
+ p = transform_settings["zoomblur"]["p"]
721
+ if p > 0 and p <= 1:
722
+ transform_list.append(A.ZoomBlur(p=p, max_factor=1.05))
723
+ if "RandomSizedCrop".lower() in transform_settings:
724
+ p = transform_settings["randomsizedcrop"]["p"]
725
+ if p > 0 and p <= 1:
726
+ transform_list.append(
727
+ A.RandomSizedCrop(
728
+ min_max_height=(input_shape / 2, input_shape),
729
+ height=input_shape,
730
+ width=input_shape,
731
+ p=p,
732
+ )
733
+ )
734
+ if "ElasticTransform".lower() in transform_settings:
735
+ p = transform_settings["elastictransform"]["p"]
736
+ if p > 0 and p <= 1:
737
+ transform_list.append(
738
+ A.ElasticTransform(p=p, sigma=25, alpha=0.5, alpha_affine=15)
739
+ )
740
+
741
+ if "normalize" in transform_settings:
742
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
743
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
744
+ else:
745
+ mean = (0.5, 0.5, 0.5)
746
+ std = (0.5, 0.5, 0.5)
747
+ transform_list.append(A.Normalize(mean=mean, std=std))
748
+
749
+ train_transforms = A.Compose(transform_list)
750
+ val_transforms = A.Compose([A.Normalize(mean=mean, std=std)])
751
+
752
+ return train_transforms, val_transforms
753
+
754
+ def get_sampler(
755
+ self, train_dataset: CellDataset, strategy: str = "random", gamma: float = 1
756
+ ) -> Sampler:
757
+ """Return the sampler (either RandomSampler or WeightedRandomSampler)
758
+
759
+ Args:
760
+ train_dataset (CellDataset): Dataset for training
761
+ strategy (str, optional): Sampling strategy. Defaults to "random" (random sampling).
762
+ Implemented are "random" and "cell"
763
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
764
+ 1 means total balancing, 0 means original weights. Defaults to 1.
765
+
766
+ Raises:
767
+ NotImplementedError: Not implemented sampler is selected
768
+
769
+ Returns:
770
+ Sampler: Sampler for training
771
+ """
772
+ if strategy.lower() == "random":
773
+ sampling_generator = torch.Generator().manual_seed(
774
+ self.default_conf["random_seed"]
775
+ )
776
+ sampler = RandomSampler(train_dataset, generator=sampling_generator)
777
+ self.logger.info("Using RandomSampler")
778
+ else:
779
+ # this solution is not accurate when a subset is used since the weights are calculated on the whole training dataset
780
+ if isinstance(train_dataset, Subset):
781
+ ds = train_dataset.dataset
782
+ else:
783
+ ds = train_dataset
784
+ ds.load_cell_count()
785
+ if strategy.lower() == "cell":
786
+ weights = ds.get_sampling_weights_cell(gamma)
787
+ else:
788
+ raise NotImplementedError(
789
+ "Unknown sampling strategy - Implemented is cell"
790
+ )
791
+
792
+ if isinstance(train_dataset, Subset):
793
+ weights = torch.Tensor([weights[i] for i in train_dataset.indices])
794
+
795
+ sampling_generator = torch.Generator().manual_seed(
796
+ self.default_conf["random_seed"]
797
+ )
798
+ sampler = WeightedRandomSampler(
799
+ weights=weights,
800
+ num_samples=len(train_dataset),
801
+ replacement=True,
802
+ generator=sampling_generator,
803
+ )
804
+
805
+ self.logger.info(f"Using Weighted Sampling with strategy: {strategy}")
806
+ self.logger.info(f"Unique-Weights: {torch.unique(weights)}")
807
+
808
+ return sampler
cell_segmentation/experiments/experiment_cellvit_pannuke.py ADDED
@@ -0,0 +1,861 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellVit Experiment Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+ import argparse
8
+ import copy
9
+ import datetime
10
+ import inspect
11
+ import os
12
+ import shutil
13
+ import sys
14
+
15
+ import yaml
16
+ import numpy as np
17
+ import math
18
+
19
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
20
+ parentdir = os.path.dirname(currentdir)
21
+ sys.path.insert(0, parentdir)
22
+
23
+ import uuid
24
+ from pathlib import Path
25
+ from typing import Callable, Tuple, Union
26
+ import torch
27
+ from torchsummary import summary
28
+ from torchstat import stat
29
+ import albumentations as A
30
+ import torch
31
+ import torch.nn as nn
32
+ import wandb
33
+ from torch.optim import Optimizer
34
+ from torch.optim.lr_scheduler import (
35
+ ConstantLR,
36
+ CosineAnnealingLR,
37
+ ExponentialLR,
38
+ SequentialLR,
39
+ _LRScheduler,
40
+ CosineAnnealingWarmRestarts,
41
+ )
42
+ from torch.utils.data import (
43
+ DataLoader,
44
+ Dataset,
45
+ RandomSampler,
46
+ Sampler,
47
+ Subset,
48
+ WeightedRandomSampler,
49
+ )
50
+ from torchinfo import summary
51
+ from wandb.sdk.lib.runid import generate_id
52
+
53
+ from base_ml.base_early_stopping import EarlyStopping
54
+ from base_ml.base_experiment import BaseExperiment
55
+ from base_ml.base_loss import retrieve_loss_fn
56
+ from base_ml.base_trainer import BaseTrainer
57
+ from cell_segmentation.datasets.base_cell import CellDataset
58
+ from cell_segmentation.datasets.dataset_coordinator import select_dataset
59
+ from cell_segmentation.trainer.trainer_cellvit import CellViTTrainer
60
+ from models.segmentation.cell_segmentation.cellvit import CellViT
61
+
62
+ from utils.tools import close_logger
63
+
64
+
65
+ class WarmupCosineAnnealingLR(CosineAnnealingLR):
66
+ def __init__(self, optimizer, T_max, eta_min=0, warmup_epochs=0, warmup_factor=0):
67
+ super().__init__(optimizer, T_max=T_max, eta_min=eta_min)
68
+ self.warmup_epochs = warmup_epochs
69
+ self.warmup_factor = warmup_factor
70
+ self.initial_lr = [group['lr'] for group in optimizer.param_groups] #初始化的学习率
71
+
72
+ def get_lr(self):
73
+ if self.last_epoch < self.warmup_epochs:
74
+ warmup_factor = self.warmup_factor + (1.0 - self.warmup_factor) * (self.last_epoch / self.warmup_epochs)
75
+ return [base_lr * warmup_factor for base_lr in self.initial_lr]
76
+ else:
77
+ return [base_lr * self.get_lr_ratio() for base_lr in self.initial_lr]
78
+
79
+ def get_lr_ratio(self):
80
+ T_cur = min(self.last_epoch - self.warmup_epochs, self.T_max - self.warmup_epochs)
81
+ return 0.5 * (1 + math.cos(math.pi * T_cur / (self.T_max - self.warmup_epochs)))
82
+
83
+
84
+
85
+ class ExperimentCellVitPanNuke(BaseExperiment):
86
+ def __init__(self, default_conf: dict, checkpoint=None) -> None:
87
+ super().__init__(default_conf, checkpoint)
88
+ self.load_dataset_setup(dataset_path=self.default_conf["data"]["dataset_path"])
89
+
90
+ def run_experiment(self) -> Tuple[Path, dict, nn.Module, dict]:
91
+ """Main Experiment Code"""
92
+ ### Setup
93
+ # close loggers
94
+ self.close_remaining_logger()
95
+
96
+ # Initialize distributed training environment
97
+
98
+
99
+ # get the config for the current run
100
+ self.run_conf = copy.deepcopy(self.default_conf)
101
+ self.run_conf["dataset_config"] = self.dataset_config
102
+ self.run_name = f"{datetime.datetime.now().strftime('%Y-%m-%dT%H%M%S')}_{self.run_conf['logging']['log_comment']}"
103
+
104
+ wandb_run_id = generate_id()
105
+ resume = None
106
+ if self.checkpoint is not None:
107
+ wandb_run_id = self.checkpoint["wandb_id"]
108
+ resume = "must"
109
+ self.run_name = self.checkpoint["run_name"]
110
+
111
+ # initialize wandb
112
+ run = wandb.init(
113
+ project=self.run_conf["logging"]["project"],
114
+ tags=self.run_conf["logging"].get("tags", []),
115
+ name=self.run_name,
116
+ notes=self.run_conf["logging"]["notes"],
117
+ dir=self.run_conf["logging"]["wandb_dir"],
118
+ mode=self.run_conf["logging"]["mode"].lower(),
119
+ group=self.run_conf["logging"].get("group", str(uuid.uuid4())),
120
+ allow_val_change=True,
121
+ id=wandb_run_id,
122
+ resume=resume,
123
+ settings=wandb.Settings(start_method="fork"),
124
+ )
125
+
126
+ # get ids
127
+ self.run_conf["logging"]["run_id"] = run.id
128
+ self.run_conf["logging"]["wandb_file"] = run.id
129
+
130
+ # overwrite configuration with sweep values are leave them as they are
131
+ if self.run_conf["run_sweep"] is True:
132
+ self.run_conf["logging"]["sweep_id"] = run.sweep_id
133
+ self.run_conf["logging"]["log_dir"] = str(
134
+ Path(self.default_conf["logging"]["log_dir"])
135
+ / f"sweep_{run.sweep_id}"
136
+ / f"{self.run_name}_{self.run_conf['logging']['run_id']}"
137
+ )
138
+ self.overwrite_sweep_values(self.run_conf, run.config)
139
+ else:
140
+ self.run_conf["logging"]["log_dir"] = str(
141
+ Path(self.default_conf["logging"]["log_dir"]) / self.run_name
142
+ )
143
+
144
+ # update wandb
145
+ wandb.config.update(
146
+ self.run_conf, allow_val_change=True
147
+ ) # this may lead to the problem
148
+
149
+ # create output folder, instantiate logger and store config
150
+ self.create_output_dir(self.run_conf["logging"]["log_dir"])
151
+ self.logger = self.instantiate_logger()
152
+ self.logger.info("Instantiated Logger. WandB init and config update finished.")
153
+ self.logger.info(f"Run ist stored here: {self.run_conf['logging']['log_dir']}")
154
+ self.store_config()
155
+
156
+ self.logger.info(
157
+ f"Cuda devices: {[torch.cuda.device(i) for i in range(torch.cuda.device_count())]}"
158
+ )
159
+ ### Machine Learning
160
+ #device = f"cuda:{2}"
161
+ #device = torch.device("cuda:2")
162
+
163
+ device = f"cuda:{self.run_conf['gpu']}"
164
+ self.logger.info(f"Using GPU: {device}")
165
+ self.logger.info(f"Using device: {device}")
166
+
167
+ # loss functions
168
+ loss_fn_dict = self.get_loss_fn(self.run_conf.get("loss", {}))
169
+ self.logger.info("Loss functions:")
170
+ self.logger.info(loss_fn_dict)
171
+
172
+ # model
173
+ model = self.get_train_model(
174
+ pretrained_encoder=self.run_conf["model"].get("pretrained_encoder", None),
175
+ pretrained_model=self.run_conf["model"].get("pretrained", None),
176
+ backbone_type=self.run_conf["model"].get("backbone", "default"),
177
+ shared_decoders=self.run_conf["model"].get("shared_decoders", False),
178
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
179
+ )
180
+ model.to(device)
181
+
182
+ # optimizer
183
+ optimizer = self.get_optimizer(
184
+ model,
185
+ self.run_conf["training"]["optimizer"].lower(),
186
+ self.run_conf["training"]["optimizer_hyperparameter"],
187
+ #self.run_conf["training"]["optimizer"],
188
+ self.run_conf["training"]["layer_decay"],
189
+
190
+ )
191
+
192
+ # scheduler
193
+ scheduler = self.get_scheduler(
194
+ optimizer=optimizer,
195
+ scheduler_type=self.run_conf["training"]["scheduler"]["scheduler_type"],
196
+ )
197
+
198
+ # early stopping (no early stopping for basic setup)
199
+ early_stopping = None
200
+ if "early_stopping_patience" in self.run_conf["training"]:
201
+ if self.run_conf["training"]["early_stopping_patience"] is not None:
202
+ early_stopping = EarlyStopping(
203
+ patience=self.run_conf["training"]["early_stopping_patience"],
204
+ strategy="maximize",
205
+ )
206
+
207
+ ### Data handling
208
+ train_transforms, val_transforms = self.get_transforms(
209
+ self.run_conf["transformations"],
210
+ input_shape=self.run_conf["data"].get("input_shape", 256),
211
+ )
212
+
213
+ train_dataset, val_dataset = self.get_datasets(
214
+ train_transforms=train_transforms,
215
+ val_transforms=val_transforms,
216
+ )
217
+
218
+ # load sampler
219
+ training_sampler = self.get_sampler(
220
+ train_dataset=train_dataset,
221
+ strategy=self.run_conf["training"].get("sampling_strategy", "random"),
222
+ gamma=self.run_conf["training"].get("sampling_gamma", 1),
223
+ )
224
+
225
+ # define dataloaders
226
+ train_dataloader = DataLoader(
227
+ train_dataset,
228
+ batch_size=self.run_conf["training"]["batch_size"],
229
+ sampler=training_sampler,
230
+ num_workers=16,
231
+ pin_memory=False,
232
+ worker_init_fn=self.seed_worker,
233
+ )
234
+
235
+ val_dataloader = DataLoader(
236
+ val_dataset,
237
+ batch_size=64,
238
+ num_workers=8,
239
+ pin_memory=True,
240
+ worker_init_fn=self.seed_worker,
241
+ )
242
+
243
+ # start Training
244
+ self.logger.info("Instantiate Trainer")
245
+ trainer_fn = self.get_trainer()
246
+ trainer = trainer_fn(
247
+ model=model,
248
+ loss_fn_dict=loss_fn_dict,
249
+ optimizer=optimizer,
250
+ scheduler=scheduler,
251
+ device=device,
252
+ logger=self.logger,
253
+ logdir=self.run_conf["logging"]["log_dir"],
254
+ num_classes=self.run_conf["data"]["num_nuclei_classes"],
255
+ dataset_config=self.dataset_config,
256
+ early_stopping=early_stopping,
257
+ experiment_config=self.run_conf,
258
+ log_images=self.run_conf["logging"].get("log_images", False),
259
+ magnification=self.run_conf["data"].get("magnification", 40),
260
+ mixed_precision=self.run_conf["training"].get("mixed_precision", False),
261
+ )
262
+
263
+ # Load checkpoint if provided
264
+ if self.checkpoint is not None:
265
+ self.logger.info("Checkpoint was provided. Restore ...")
266
+ trainer.resume_checkpoint(self.checkpoint)
267
+
268
+ # Call fit method
269
+ self.logger.info("Calling Trainer Fit")
270
+ trainer.fit(
271
+ epochs=self.run_conf["training"]["epochs"],
272
+ train_dataloader=train_dataloader,
273
+ val_dataloader=val_dataloader,
274
+ metric_init=self.get_wandb_init_dict(),
275
+ unfreeze_epoch=self.run_conf["training"]["unfreeze_epoch"],
276
+ eval_every=self.run_conf["training"].get("eval_every", 1),
277
+ )
278
+
279
+ # Select best model if not provided by early stopping
280
+ checkpoint_dir = Path(self.run_conf["logging"]["log_dir"]) / "checkpoints"
281
+ if not (checkpoint_dir / "model_best.pth").is_file():
282
+ shutil.copy(
283
+ checkpoint_dir / "latest_checkpoint.pth",
284
+ checkpoint_dir / "model_best.pth",
285
+ )
286
+
287
+ # At the end close logger
288
+ self.logger.info(f"Finished run {run.id}")
289
+ close_logger(self.logger)
290
+
291
+ return self.run_conf["logging"]["log_dir"]
292
+
293
+ def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None:
294
+ """Load the configuration of the cell segmentation dataset.
295
+
296
+ The dataset must have a dataset_config.yaml file in their dataset path with the following entries:
297
+ * tissue_types: describing the present tissue types with corresponding integer
298
+ * nuclei_types: describing the present nuclei types with corresponding integer
299
+
300
+ Args:
301
+ dataset_path (Union[Path, str]): Path to dataset folder
302
+ """
303
+ dataset_config_path = Path(dataset_path) / "dataset_config.yaml"
304
+ with open(dataset_config_path, "r") as dataset_config_file:
305
+ yaml_config = yaml.safe_load(dataset_config_file)
306
+ self.dataset_config = dict(yaml_config)
307
+
308
+ def get_loss_fn(self, loss_fn_settings: dict) -> dict:
309
+ """Create a dictionary with loss functions for all branches
310
+
311
+ Branches: "nuclei_binary_map", "hv_map", "nuclei_type_map", "tissue_types"
312
+
313
+ Args:
314
+ loss_fn_settings (dict): Dictionary with the loss function settings. Structure
315
+ branch_name(str):
316
+ loss_name(str):
317
+ loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss)
318
+ weight(float): Weighting factor as float value
319
+ (optional) args: Optional parameters for initializing the loss function
320
+ arg_name: value
321
+
322
+ If a branch is not provided, the defaults settings (described below) are used.
323
+
324
+ For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml
325
+ under the section "loss"
326
+
327
+ Example:
328
+ nuclei_binary_map:
329
+ bce:
330
+ loss_fn: xentropy_loss
331
+ weight: 1
332
+ dice:
333
+ loss_fn: dice_loss
334
+ weight: 1
335
+
336
+ Returns:
337
+ dict: Dictionary with loss functions for each branch. Structure:
338
+ branch_name(str):
339
+ loss_name(str):
340
+ "loss_fn": Callable loss function
341
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
342
+ loss_name(str):
343
+ "loss_fn": Callable loss function
344
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
345
+ branch_name(str)
346
+ ...
347
+
348
+ Default loss dictionary:
349
+ nuclei_binary_map:
350
+ bce:
351
+ loss_fn: xentropy_loss
352
+ weight: 1
353
+ dice:
354
+ loss_fn: dice_loss
355
+ weight: 1
356
+ hv_map:
357
+ mse:
358
+ loss_fn: mse_loss_maps
359
+ weight: 1
360
+ msge:
361
+ loss_fn: msge_loss_maps
362
+ weight: 1
363
+ nuclei_type_map
364
+ bce:
365
+ loss_fn: xentropy_loss
366
+ weight: 1
367
+ dice:
368
+ loss_fn: dice_loss
369
+ weight: 1
370
+ tissue_types
371
+ ce:
372
+ loss_fn: nn.CrossEntropyLoss()
373
+ weight: 1
374
+ """
375
+ loss_fn_dict = {}
376
+ if "nuclei_binary_map" in loss_fn_settings.keys():
377
+ loss_fn_dict["nuclei_binary_map"] = {}
378
+ for loss_name, loss_sett in loss_fn_settings["nuclei_binary_map"].items():
379
+ parameters = loss_sett.get("args", {})
380
+ loss_fn_dict["nuclei_binary_map"][loss_name] = {
381
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
382
+ "weight": loss_sett["weight"],
383
+ }
384
+ else:
385
+ loss_fn_dict["nuclei_binary_map"] = {
386
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
387
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
388
+ }
389
+ if "hv_map" in loss_fn_settings.keys():
390
+ loss_fn_dict["hv_map"] = {}
391
+ for loss_name, loss_sett in loss_fn_settings["hv_map"].items():
392
+ parameters = loss_sett.get("args", {})
393
+ loss_fn_dict["hv_map"][loss_name] = {
394
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
395
+ "weight": loss_sett["weight"],
396
+ }
397
+ else:
398
+ loss_fn_dict["hv_map"] = {
399
+ "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1},
400
+ "msge": {"loss_fn": retrieve_loss_fn("msge_loss_maps"), "weight": 1},
401
+ }
402
+ if "nuclei_type_map" in loss_fn_settings.keys():
403
+ loss_fn_dict["nuclei_type_map"] = {}
404
+ for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items():
405
+ parameters = loss_sett.get("args", {})
406
+ loss_fn_dict["nuclei_type_map"][loss_name] = {
407
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
408
+ "weight": loss_sett["weight"],
409
+ }
410
+ else:
411
+ loss_fn_dict["nuclei_type_map"] = {
412
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
413
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
414
+ }
415
+ if "tissue_types" in loss_fn_settings.keys():
416
+ loss_fn_dict["tissue_types"] = {}
417
+ for loss_name, loss_sett in loss_fn_settings["tissue_types"].items():
418
+ parameters = loss_sett.get("args", {})
419
+ loss_fn_dict["tissue_types"][loss_name] = {
420
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
421
+ "weight": loss_sett["weight"],
422
+ }
423
+ else:
424
+ loss_fn_dict["tissue_types"] = {
425
+ "ce": {"loss_fn": nn.CrossEntropyLoss(), "weight": 1},
426
+ }
427
+ if "regression_loss" in loss_fn_settings.keys():
428
+ loss_fn_dict["regression_map"] = {}
429
+ for loss_name, loss_sett in loss_fn_settings["regression_loss"].items():
430
+ parameters = loss_sett.get("args", {})
431
+ loss_fn_dict["regression_map"][loss_name] = {
432
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
433
+ "weight": loss_sett["weight"],
434
+ }
435
+ elif "regression_loss" in self.run_conf["model"].keys():
436
+ loss_fn_dict["regression_map"] = {
437
+ "mse": {"loss_fn": retrieve_loss_fn("mse_loss_maps"), "weight": 1},
438
+ }
439
+ return loss_fn_dict
440
+
441
+
442
+
443
+ def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler:
444
+ """Get the learning rate scheduler for CellViT
445
+
446
+ The configuration of the scheduler is given in the "training" -> "scheduler" section.
447
+ Currenlty, "constant", "exponential" and "cosine" schedulers are implemented.
448
+
449
+ Required parameters for implemented schedulers:
450
+ - "constant": None
451
+ - "exponential": gamma (optional, defaults to 0.95)
452
+ - "cosine": eta_min (optional, defaults to 1-e5)
453
+
454
+ Args:
455
+ scheduler_type (str): Type of scheduler as a string. Currently implemented:
456
+ - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75)
457
+ - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95)
458
+ - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5)
459
+ optimizer (Optimizer): Optimizer
460
+
461
+ Returns:
462
+ _LRScheduler: PyTorch Scheduler
463
+ """
464
+ implemented_schedulers = ["constant", "exponential", "cosine", "default"]
465
+ if scheduler_type.lower() not in implemented_schedulers:
466
+ self.logger.warning(
467
+ f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling."
468
+ )
469
+ if scheduler_type.lower() == "constant":
470
+ scheduler = SequentialLR(
471
+ optimizer=optimizer,
472
+ schedulers=[
473
+ ConstantLR(optimizer, factor=1, total_iters=25),
474
+ ConstantLR(optimizer, factor=0.1, total_iters=25),
475
+ ConstantLR(optimizer, factor=1, total_iters=25),
476
+ ConstantLR(optimizer, factor=0.1, total_iters=1000),
477
+ ],
478
+ milestones=[24, 49, 74],
479
+ )
480
+ elif scheduler_type.lower() == "exponential":
481
+ scheduler = ExponentialLR(
482
+ optimizer,
483
+ gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95),
484
+ )
485
+ elif scheduler_type.lower() == "cosine":
486
+ scheduler = CosineAnnealingLR(
487
+ optimizer,
488
+ T_max=self.run_conf["training"]["epochs"],
489
+ eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5),
490
+ )
491
+ # elif scheduler_type.lower == "cosinewarmrestarts":
492
+ # scheduler = CosineAnnealingWarmRestarts(
493
+ # optimizer,
494
+ # T_0=self.run_conf["training"]["scheduler"]["T_0"],
495
+ # T_mult=self.run_conf["training"]["scheduler"]["T_mult"],
496
+ # eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5)
497
+ # )
498
+ elif scheduler_type.lower() == "default":
499
+ scheduler = super().get_scheduler(optimizer)
500
+ return scheduler
501
+
502
+ def get_datasets(
503
+ self,
504
+ train_transforms: Callable = None,
505
+ val_transforms: Callable = None,
506
+ ) -> Tuple[Dataset, Dataset]:
507
+ """Retrieve training dataset and validation dataset
508
+
509
+ Args:
510
+ train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None.
511
+ val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None.
512
+
513
+ Returns:
514
+ Tuple[Dataset, Dataset]: Training dataset and validation dataset
515
+ """
516
+ if (
517
+ "val_split" in self.run_conf["data"]
518
+ and "val_folds" in self.run_conf["data"]
519
+ ):
520
+ raise RuntimeError(
521
+ "Provide either val_splits or val_folds in configuration file, not both."
522
+ )
523
+ if (
524
+ "val_split" not in self.run_conf["data"]
525
+ and "val_folds" not in self.run_conf["data"]
526
+ ):
527
+ raise RuntimeError(
528
+ "Provide either val_split or val_folds in configuration file, one is necessary."
529
+ )
530
+ if (
531
+ "val_split" not in self.run_conf["data"]
532
+ and "val_folds" not in self.run_conf["data"]
533
+ ):
534
+ raise RuntimeError(
535
+ "Provide either val_split or val_fold in configuration file, one is necessary."
536
+ )
537
+ if "regression_loss" in self.run_conf["model"].keys():
538
+ self.run_conf["data"]["regression_loss"] = True
539
+
540
+ full_dataset = select_dataset(
541
+ dataset_name="pannuke",
542
+ split="train",
543
+ dataset_config=self.run_conf["data"],
544
+ transforms=train_transforms,
545
+ )
546
+ if "val_split" in self.run_conf["data"]:
547
+ generator_split = torch.Generator().manual_seed(
548
+ self.default_conf["random_seed"]
549
+ )
550
+ val_splits = float(self.run_conf["data"]["val_split"])
551
+ train_dataset, val_dataset = torch.utils.data.random_split(
552
+ full_dataset,
553
+ lengths=[1 - val_splits, val_splits],
554
+ generator=generator_split,
555
+ )
556
+ val_dataset.dataset = copy.deepcopy(full_dataset)
557
+ val_dataset.dataset.set_transforms(val_transforms)
558
+ else:
559
+ train_dataset = full_dataset
560
+ val_dataset = select_dataset(
561
+ dataset_name="pannuke",
562
+ split="validation",
563
+ dataset_config=self.run_conf["data"],
564
+ transforms=val_transforms,
565
+ )
566
+
567
+ return train_dataset, val_dataset
568
+
569
+ def get_train_model(
570
+ self,
571
+ pretrained_encoder: Union[Path, str] = None,
572
+ pretrained_model: Union[Path, str] = None,
573
+ backbone_type: str = "default",
574
+ shared_decoders: bool = False,
575
+ regression_loss: bool = False,
576
+ **kwargs,
577
+ ) -> CellViT:
578
+ """Return the CellViT training model
579
+
580
+ Args:
581
+ pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None.
582
+ pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None.
583
+ backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None
584
+ shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False.
585
+ regression_loss (bool, optional): If regression loss is used. Defaults to False
586
+
587
+ Returns:
588
+ CellViT: CellViT training model with given setup
589
+ """
590
+ # reseed needed, due to subprocess seeding compatibility
591
+ self.seed_run(self.default_conf["random_seed"])
592
+
593
+ # check for backbones
594
+ implemented_backbones = ["default", "UniRepLKNet", "vit256", "sam-b", "sam-l", "sam-h"]
595
+ if backbone_type.lower() not in implemented_backbones:
596
+ raise NotImplementedError(
597
+ f"Unknown Backbone Type - Currently supported are: {implemented_backbones}"
598
+ )
599
+ if backbone_type.lower() == "default":
600
+ model_class = CellViT
601
+ model = model_class(
602
+ model256_path = pretrained_encoder,
603
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
604
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
605
+ #embed_dim=self.run_conf["model"]["embed_dim"],
606
+ in_channels=self.run_conf["model"].get("input_chanels", 3),
607
+ #depth=self.run_conf["model"]["depth"],
608
+ #change
609
+ #depth=(3, 3, 27, 3),
610
+ #num_heads=self.run_conf["model"]["num_heads"],
611
+ # extract_layers=self.run_conf["model"]["extract_layers"],
612
+
613
+ dropout=self.run_conf["training"].get("drop_rate", 0),
614
+ #attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
615
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0.1),
616
+ #regression_loss=regression_loss,
617
+ )
618
+ model.load_pretrained_encoder(model.model256_path)
619
+ #model.load_state_dict(checkpoint["model"])
620
+
621
+ if pretrained_model is not None:
622
+ self.logger.info(
623
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
624
+ )
625
+ cellvit_pretrained = torch.load(pretrained_model)
626
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
627
+ self.logger.info("Loaded CellViT model")
628
+
629
+ self.logger.info(f"\nModel: {model}")
630
+ print(f"\nModel: {model}")
631
+ model = model.to("cuda")
632
+ self.logger.info(
633
+ f"\n{summary(model, input_size=(1, 3, 256, 256), device='cuda')}"
634
+ )
635
+ # from thop import profile
636
+ # input_size=torch.randn(1, 3, 256, 256)
637
+ # self.logger.info(
638
+ # f"\n{profile(model, inputs=(input_size,))}"
639
+ # )
640
+ #self.logger.info(f"\n{stat(model, (3, 256, 256))}")
641
+ total_params = 0
642
+ Trainable_params = 0
643
+ NonTrainable_params = 0
644
+ for param in model.parameters():
645
+ multvalue = np.prod(param.size())
646
+ total_params += multvalue
647
+ if param.requires_grad:
648
+ Trainable_params += multvalue # 可训练参数量
649
+ else:
650
+ NonTrainable_params += multvalue # 非可训练参数量
651
+
652
+ print(f'Total params: {total_params}')
653
+ print(f'Trainable params: {Trainable_params}')
654
+ print(f'Non-trainable params: {NonTrainable_params}')
655
+
656
+ return model
657
+
658
+ def get_wandb_init_dict(self) -> dict:
659
+ pass
660
+
661
+ def get_transforms(
662
+ self, transform_settings: dict, input_shape: int = 256
663
+ ) -> Tuple[Callable, Callable]:
664
+ """Get Transformations (Albumentation Transformations). Return both training and validation transformations.
665
+
666
+ The transformation settings are given in the following format:
667
+ key: dict with parameters
668
+ Example:
669
+ colorjitter:
670
+ p: 0.1
671
+ scale_setting: 0.5
672
+ scale_color: 0.1
673
+
674
+ For further information on how to setup the dictionary and default (recommended) values is given here:
675
+ configs/examples/cell_segmentation/train_cellvit.yaml
676
+
677
+ Training Transformations:
678
+ Implemented are:
679
+ - A.RandomRotate90: Key in transform_settings: randomrotate90, parameters: p
680
+ - A.HorizontalFlip: Key in transform_settings: horizontalflip, parameters: p
681
+ - A.VerticalFlip: Key in transform_settings: verticalflip, parameters: p
682
+ - A.Downscale: Key in transform_settings: downscale, parameters: p, scale
683
+ - A.Blur: Key in transform_settings: blur, parameters: p, blur_limit
684
+ - A.GaussNoise: Key in transform_settings: gaussnoise, parameters: p, var_limit
685
+ - A.ColorJitter: Key in transform_settings: colorjitter, parameters: p, scale_setting, scale_color
686
+ - A.Superpixels: Key in transform_settings: superpixels, parameters: p
687
+ - A.ZoomBlur: Key in transform_settings: zoomblur, parameters: p
688
+ - A.RandomSizedCrop: Key in transform_settings: randomsizedcrop, parameters: p
689
+ - A.ElasticTransform: Key in transform_settings: elastictransform, parameters: p
690
+ Always implemented at the end of the pipeline:
691
+ - A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5))
692
+
693
+ Validation Transformations:
694
+ A.Normalize with given mean (default: (0.5, 0.5, 0.5)) and std (default: (0.5, 0.5, 0.5))
695
+
696
+ Args:
697
+ transform_settings (dict): dictionay with the transformation settings.
698
+ input_shape (int, optional): Input shape of the images to used. Defaults to 256.
699
+
700
+ Returns:
701
+ Tuple[Callable, Callable]: Train Transformations, Validation Transformations
702
+
703
+ """
704
+ transform_list = []
705
+ transform_settings = {k.lower(): v for k, v in transform_settings.items()}
706
+ if "RandomRotate90".lower() in transform_settings:
707
+ p = transform_settings["randomrotate90"]["p"]
708
+ if p > 0 and p <= 1:
709
+ transform_list.append(A.RandomRotate90(p=p))
710
+ if "HorizontalFlip".lower() in transform_settings.keys():
711
+ p = transform_settings["horizontalflip"]["p"]
712
+ if p > 0 and p <= 1:
713
+ transform_list.append(A.HorizontalFlip(p=p))
714
+ if "VerticalFlip".lower() in transform_settings:
715
+ p = transform_settings["verticalflip"]["p"]
716
+ if p > 0 and p <= 1:
717
+ transform_list.append(A.VerticalFlip(p=p))
718
+ if "Downscale".lower() in transform_settings:
719
+ p = transform_settings["downscale"]["p"]
720
+ scale = transform_settings["downscale"]["scale"]
721
+ if p > 0 and p <= 1:
722
+ transform_list.append(
723
+ A.Downscale(p=p, scale_max=scale, scale_min=scale)
724
+ )
725
+ if "Blur".lower() in transform_settings:
726
+ p = transform_settings["blur"]["p"]
727
+ blur_limit = transform_settings["blur"]["blur_limit"]
728
+ if p > 0 and p <= 1:
729
+ transform_list.append(A.Blur(p=p, blur_limit=blur_limit))
730
+ if "GaussNoise".lower() in transform_settings:
731
+ p = transform_settings["gaussnoise"]["p"]
732
+ var_limit = transform_settings["gaussnoise"]["var_limit"]
733
+ if p > 0 and p <= 1:
734
+ transform_list.append(A.GaussNoise(p=p, var_limit=var_limit))
735
+ if "ColorJitter".lower() in transform_settings:
736
+ p = transform_settings["colorjitter"]["p"]
737
+ scale_setting = transform_settings["colorjitter"]["scale_setting"]
738
+ scale_color = transform_settings["colorjitter"]["scale_color"]
739
+ if p > 0 and p <= 1:
740
+ transform_list.append(
741
+ A.ColorJitter(
742
+ p=p,
743
+ brightness=scale_setting,
744
+ contrast=scale_setting,
745
+ saturation=scale_color,
746
+ hue=scale_color / 2,
747
+ )
748
+ )
749
+ if "Superpixels".lower() in transform_settings:
750
+ p = transform_settings["superpixels"]["p"]
751
+ if p > 0 and p <= 1:
752
+ transform_list.append(
753
+ A.Superpixels(
754
+ p=p,
755
+ p_replace=0.1,
756
+ n_segments=200,
757
+ max_size=int(input_shape / 2),
758
+ )
759
+ )
760
+ if "ZoomBlur".lower() in transform_settings:
761
+ p = transform_settings["zoomblur"]["p"]
762
+ if p > 0 and p <= 1:
763
+ transform_list.append(A.ZoomBlur(p=p, max_factor=1.05))
764
+ if "RandomSizedCrop".lower() in transform_settings:
765
+ p = transform_settings["randomsizedcrop"]["p"]
766
+ if p > 0 and p <= 1:
767
+ transform_list.append(
768
+ A.RandomSizedCrop(
769
+ min_max_height=(input_shape / 2, input_shape),
770
+ height=input_shape,
771
+ width=input_shape,
772
+ p=p,
773
+ )
774
+ )
775
+ if "ElasticTransform".lower() in transform_settings:
776
+ p = transform_settings["elastictransform"]["p"]
777
+ if p > 0 and p <= 1:
778
+ transform_list.append(
779
+ A.ElasticTransform(p=p, sigma=25, alpha=0.5, alpha_affine=15)
780
+ )
781
+
782
+ if "normalize" in transform_settings:
783
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
784
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
785
+ else:
786
+ mean = (0.5, 0.5, 0.5)
787
+ std = (0.5, 0.5, 0.5)
788
+ transform_list.append(A.Normalize(mean=mean, std=std))
789
+
790
+ train_transforms = A.Compose(transform_list)
791
+ val_transforms = A.Compose([A.Normalize(mean=mean, std=std)])
792
+
793
+ return train_transforms, val_transforms
794
+
795
+ def get_sampler(
796
+ self, train_dataset: CellDataset, strategy: str = "random", gamma: float = 1
797
+ ) -> Sampler:
798
+ """Return the sampler (either RandomSampler or WeightedRandomSampler)
799
+
800
+ Args:
801
+ train_dataset (CellDataset): Dataset for training
802
+ strategy (str, optional): Sampling strategy. Defaults to "random" (random sampling).
803
+ Implemented are "random", "cell", "tissue", "cell+tissue".
804
+ gamma (float, optional): Gamma scaling factor, between 0 and 1.
805
+ 1 means total balancing, 0 means original weights. Defaults to 1.
806
+
807
+ Raises:
808
+ NotImplementedError: Not implemented sampler is selected
809
+
810
+ Returns:
811
+ Sampler: Sampler for training
812
+ """
813
+ if strategy.lower() == "random":
814
+ sampling_generator = torch.Generator().manual_seed(
815
+ self.default_conf["random_seed"]
816
+ )
817
+ sampler = RandomSampler(train_dataset, generator=sampling_generator)
818
+ self.logger.info("Using RandomSampler")
819
+ else:
820
+ # this solution is not accurate when a subset is used since the weights are calculated on the whole training dataset
821
+ if isinstance(train_dataset, Subset):
822
+ ds = train_dataset.dataset
823
+ else:
824
+ ds = train_dataset
825
+ ds.load_cell_count()
826
+ if strategy.lower() == "cell":
827
+ weights = ds.get_sampling_weights_cell(gamma)
828
+ elif strategy.lower() == "tissue":
829
+ weights = ds.get_sampling_weights_tissue(gamma)
830
+ elif strategy.lower() == "cell+tissue":
831
+ weights = ds.get_sampling_weights_cell_tissue(gamma)
832
+ else:
833
+ raise NotImplementedError(
834
+ "Unknown sampling strategy - Implemented are cell, tissue and cell+tissue"
835
+ )
836
+
837
+ if isinstance(train_dataset, Subset):
838
+ weights = torch.Tensor([weights[i] for i in train_dataset.indices])
839
+
840
+ sampling_generator = torch.Generator().manual_seed(
841
+ self.default_conf["random_seed"]
842
+ )
843
+ sampler = WeightedRandomSampler(
844
+ weights=weights,
845
+ num_samples=len(train_dataset),
846
+ replacement=True,
847
+ generator=sampling_generator,
848
+ )
849
+
850
+ self.logger.info(f"Using Weighted Sampling with strategy: {strategy}")
851
+ self.logger.info(f"Unique-Weights: {torch.unique(weights)}")
852
+
853
+ return sampler
854
+
855
+ def get_trainer(self) -> BaseTrainer:
856
+ """Return Trainer matching to this network
857
+
858
+ Returns:
859
+ BaseTrainer: Trainer
860
+ """
861
+ return CellViTTrainer
cell_segmentation/experiments/experiment_cpp_net_pannuke.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CPP-Net Experiment Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import inspect
9
+ import os
10
+ import sys
11
+
12
+
13
+ from base_ml.base_trainer import BaseTrainer
14
+
15
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
16
+ parentdir = os.path.dirname(currentdir)
17
+ sys.path.insert(0, parentdir)
18
+
19
+ from pathlib import Path
20
+ from typing import Union
21
+
22
+ import torch
23
+ import torch.nn as nn
24
+ from torchinfo import summary
25
+
26
+ from base_ml.base_loss import retrieve_loss_fn
27
+ from cell_segmentation.experiments.experiment_stardist_pannuke import (
28
+ ExperimentCellViTStarDist,
29
+ )
30
+ from cell_segmentation.trainer.trainer_cpp_net import CellViTCPPTrainer
31
+ from models.segmentation.cell_segmentation.cellvit_cpp_net import (
32
+ CellViT256CPP,
33
+ CellViTCPP,
34
+ CellViTSAMCPP,
35
+ )
36
+
37
+
38
+ class ExperimentCellViTCPP(ExperimentCellViTStarDist):
39
+ def get_loss_fn(self, loss_fn_settings: dict) -> dict:
40
+ """Create a dictionary with loss functions for all branches
41
+
42
+ Branches: "dist_map", "stardist_map", "stardist_map_refined", "nuclei_type_map", "tissue_types"
43
+
44
+ Args:
45
+ loss_fn_settings (dict): Dictionary with the loss function settings. Structure
46
+ branch_name(str):
47
+ loss_name(str):
48
+ loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss)
49
+ weight(float): Weighting factor as float value
50
+ (optional) args: Optional parameters for initializing the loss function
51
+ arg_name: value
52
+
53
+ If a branch is not provided, the defaults settings (described below) are used.
54
+
55
+ For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml
56
+ under the section "loss"
57
+
58
+ Example:
59
+ nuclei_type_map:
60
+ bce:
61
+ loss_fn: xentropy_loss
62
+ weight: 1
63
+ dice:
64
+ loss_fn: dice_loss
65
+ weight: 1
66
+
67
+ Returns:
68
+ dict: Dictionary with loss functions for each branch. Structure:
69
+ branch_name(str):
70
+ loss_name(str):
71
+ "loss_fn": Callable loss function
72
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
73
+ loss_name(str):
74
+ "loss_fn": Callable loss function
75
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
76
+ branch_name(str)
77
+ ...
78
+
79
+ Default loss dictionary:
80
+ dist_map:
81
+ bceweighted:
82
+ loss_fn: BCEWithLogitsLoss
83
+ weight: 1
84
+ stardist_map:
85
+ L1LossWeighted:
86
+ loss_fn: L1LossWeighted
87
+ weight: 1
88
+ nuclei_type_map
89
+ bce:
90
+ loss_fn: xentropy_loss
91
+ weight: 1
92
+ dice:
93
+ loss_fn: dice_loss
94
+ weight: 1
95
+ tissue_type has no default loss and might be skipped
96
+ """
97
+ loss_fn_dict = {}
98
+ if "dist_map" in loss_fn_settings.keys():
99
+ loss_fn_dict["dist_map"] = {}
100
+ for loss_name, loss_sett in loss_fn_settings["dist_map"].items():
101
+ parameters = loss_sett.get("args", {})
102
+ loss_fn_dict["dist_map"][loss_name] = {
103
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
104
+ "weight": loss_sett["weight"],
105
+ }
106
+ else:
107
+ loss_fn_dict["dist_map"] = {
108
+ "bceweighted": {
109
+ "loss_fn": retrieve_loss_fn("BCEWithLogitsLoss"),
110
+ "weight": 1,
111
+ },
112
+ }
113
+ if "stardist_map" in loss_fn_settings.keys():
114
+ loss_fn_dict["stardist_map"] = {}
115
+ for loss_name, loss_sett in loss_fn_settings["stardist_map"].items():
116
+ parameters = loss_sett.get("args", {})
117
+ loss_fn_dict["stardist_map"][loss_name] = {
118
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
119
+ "weight": loss_sett["weight"],
120
+ }
121
+ else:
122
+ loss_fn_dict["stardist_map"] = {
123
+ "L1LossWeighted": {
124
+ "loss_fn": retrieve_loss_fn("L1LossWeighted"),
125
+ "weight": 1,
126
+ },
127
+ }
128
+ if "stardist_map_refined" in loss_fn_settings.keys():
129
+ loss_fn_dict["stardist_map_refined"] = {}
130
+ for loss_name, loss_sett in loss_fn_settings[
131
+ "stardist_map_refined"
132
+ ].items():
133
+ parameters = loss_sett.get("args", {})
134
+ loss_fn_dict["stardist_map_refined"][loss_name] = {
135
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
136
+ "weight": loss_sett["weight"],
137
+ }
138
+ else:
139
+ loss_fn_dict["stardist_map_refined"] = {
140
+ "L1LossWeighted": {
141
+ "loss_fn": retrieve_loss_fn("L1LossWeighted"),
142
+ "weight": 1,
143
+ },
144
+ }
145
+ if "nuclei_type_map" in loss_fn_settings.keys():
146
+ loss_fn_dict["nuclei_type_map"] = {}
147
+ for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items():
148
+ parameters = loss_sett.get("args", {})
149
+ loss_fn_dict["nuclei_type_map"][loss_name] = {
150
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
151
+ "weight": loss_sett["weight"],
152
+ }
153
+ else:
154
+ loss_fn_dict["nuclei_type_map"] = {
155
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
156
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
157
+ }
158
+ if "tissue_types" in loss_fn_settings.keys():
159
+ loss_fn_dict["tissue_types"] = {}
160
+ for loss_name, loss_sett in loss_fn_settings["tissue_types"].items():
161
+ parameters = loss_sett.get("args", {})
162
+ loss_fn_dict["tissue_types"][loss_name] = {
163
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
164
+ "weight": loss_sett["weight"],
165
+ }
166
+ # skip default tissue loss!
167
+ return loss_fn_dict
168
+
169
+ def get_train_model(
170
+ self,
171
+ pretrained_encoder: Union[Path, str] = None,
172
+ pretrained_model: Union[Path, str] = None,
173
+ backbone_type: str = "default",
174
+ shared_decoders: bool = False,
175
+ **kwargs,
176
+ ) -> nn.Module:
177
+ """Return the CellViTStarDist training model
178
+
179
+ Args:
180
+ pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None.
181
+ pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None.
182
+ backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H). Defaults to None
183
+ shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False.
184
+
185
+ Returns:
186
+ nn.Module: StarDist training model with given setup
187
+ """
188
+ # reseed needed, due to subprocess seeding compatibility
189
+ self.seed_run(self.default_conf["random_seed"])
190
+
191
+ # check for backbones
192
+ implemented_backbones = [
193
+ "default",
194
+ "vit256",
195
+ "sam-b",
196
+ "sam-l",
197
+ "sam-h",
198
+ ]
199
+ if backbone_type.lower() not in implemented_backbones:
200
+ raise NotImplementedError(
201
+ f"Unknown Backbone Type - Currently supported are: {implemented_backbones}"
202
+ )
203
+ if backbone_type.lower() == "default":
204
+ if shared_decoders:
205
+ raise NotImplementedError(
206
+ "Shared decoders are not implemented for StarDist"
207
+ )
208
+ else:
209
+ model_class = CellViTCPP
210
+ model = model_class(
211
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
212
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
213
+ embed_dim=self.run_conf["model"]["embed_dim"],
214
+ input_channels=self.run_conf["model"].get("input_channels", 3),
215
+ depth=self.run_conf["model"]["depth"],
216
+ num_heads=self.run_conf["model"]["num_heads"],
217
+ extract_layers=self.run_conf["model"]["extract_layers"],
218
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
219
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
220
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
221
+ nrays=self.run_conf["model"].get("nrays", 32),
222
+ )
223
+
224
+ if pretrained_model is not None:
225
+ self.logger.info(
226
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
227
+ )
228
+ cellvit_pretrained = torch.load(pretrained_model)
229
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
230
+ self.logger.info("Loaded CellViT model")
231
+
232
+ if backbone_type.lower() == "vit256":
233
+ if shared_decoders:
234
+ raise NotImplementedError(
235
+ "Shared decoders are not implemented for StarDist"
236
+ )
237
+ else:
238
+ model_class = CellViT256CPP
239
+ model = model_class(
240
+ model256_path=pretrained_encoder,
241
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
242
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
243
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
244
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
245
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
246
+ nrays=self.run_conf["model"].get("nrays", 32),
247
+ )
248
+ model.load_pretrained_encoder(model.model256_path)
249
+ if pretrained_model is not None:
250
+ self.logger.info(
251
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
252
+ )
253
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
254
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
255
+ model.freeze_encoder()
256
+ self.logger.info("Loaded CellVit256 model")
257
+ if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]:
258
+ if shared_decoders:
259
+ raise NotImplementedError(
260
+ "Shared decoders are not implemented for StarDist"
261
+ )
262
+ else:
263
+ model_class = CellViTSAMCPP
264
+ model = model_class(
265
+ model_path=pretrained_encoder,
266
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
267
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
268
+ vit_structure=backbone_type,
269
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
270
+ nrays=self.run_conf["model"].get("nrays", 32),
271
+ )
272
+ model.load_pretrained_encoder(model.model_path)
273
+ if pretrained_model is not None:
274
+ self.logger.info(
275
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
276
+ )
277
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
278
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
279
+ model.freeze_encoder()
280
+ self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}")
281
+
282
+ self.logger.info(f"\nModel: {model}")
283
+ model = model.to("cpu")
284
+ self.logger.info(
285
+ f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}"
286
+ )
287
+
288
+ return model
289
+
290
+ def get_trainer(self) -> BaseTrainer:
291
+ """Return Trainer matching to this network
292
+
293
+ Returns:
294
+ BaseTrainer: Trainer
295
+ """
296
+ return CellViTCPPTrainer
cell_segmentation/experiments/experiment_stardist_pannuke.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # StarDist Experiment Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import inspect
9
+ import os
10
+ import sys
11
+
12
+ import yaml
13
+
14
+ from base_ml.base_trainer import BaseTrainer
15
+
16
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
17
+ parentdir = os.path.dirname(currentdir)
18
+ sys.path.insert(0, parentdir)
19
+
20
+ from pathlib import Path
21
+ from typing import Callable, Tuple, Union
22
+
23
+ import torch
24
+ import torch.nn as nn
25
+ from torch.optim import Optimizer
26
+ from torch.optim.lr_scheduler import (
27
+ ConstantLR,
28
+ CosineAnnealingLR,
29
+ ExponentialLR,
30
+ ReduceLROnPlateau,
31
+ SequentialLR,
32
+ _LRScheduler,
33
+ )
34
+ from torch.utils.data import Dataset
35
+ from torchinfo import summary
36
+
37
+ from base_ml.base_loss import retrieve_loss_fn
38
+ from cell_unireplknet.cell_segmentation.experiments.experiment_cellvit_pannuke_origin import (
39
+ ExperimentCellVitPanNuke,
40
+ )
41
+ from cell_segmentation.trainer.trainer_stardist import CellViTStarDistTrainer
42
+ from models.segmentation.cell_segmentation.cellvit_stardist import (
43
+ CellViTStarDist,
44
+ CellViT256StarDist,
45
+ CellViTSAMStarDist,
46
+ )
47
+ from models.segmentation.cell_segmentation.cellvit_stardist_shared import (
48
+ CellViTStarDistShared,
49
+ CellViT256StarDistShared,
50
+ CellViTSAMStarDistShared,
51
+ )
52
+ from models.segmentation.cell_segmentation.cpp_net_stardist_rn50 import StarDistRN50
53
+
54
+
55
+ class ExperimentCellViTStarDist(ExperimentCellVitPanNuke):
56
+ def load_dataset_setup(self, dataset_path: Union[Path, str]) -> None:
57
+ """Load the configuration of the PanNuke cell segmentation dataset.
58
+
59
+ The dataset must have a dataset_config.yaml file in their dataset path with the following entries:
60
+ * tissue_types: describing the present tissue types with corresponding integer
61
+ * nuclei_types: describing the present nuclei types with corresponding integer
62
+
63
+ Args:
64
+ dataset_path (Union[Path, str]): Path to dataset folder
65
+ """
66
+ dataset_config_path = Path(dataset_path) / "dataset_config.yaml"
67
+ with open(dataset_config_path, "r") as dataset_config_file:
68
+ yaml_config = yaml.safe_load(dataset_config_file)
69
+ self.dataset_config = dict(yaml_config)
70
+
71
+ def get_loss_fn(self, loss_fn_settings: dict) -> dict:
72
+ """Create a dictionary with loss functions for all branches
73
+
74
+ Branches: "dist_map", "stardist_map", "nuclei_type_map", "tissue_types"
75
+
76
+ Args:
77
+ loss_fn_settings (dict): Dictionary with the loss function settings. Structure
78
+ branch_name(str):
79
+ loss_name(str):
80
+ loss_fn(str): String matching to the loss functions defined in the LOSS_DICT (base_ml.base_loss)
81
+ weight(float): Weighting factor as float value
82
+ (optional) args: Optional parameters for initializing the loss function
83
+ arg_name: value
84
+
85
+ If a branch is not provided, the defaults settings (described below) are used.
86
+
87
+ For further information, please have a look at the file configs/examples/cell_segmentation/train_cellvit.yaml
88
+ under the section "loss"
89
+
90
+ Example:
91
+ nuclei_type_map:
92
+ bce:
93
+ loss_fn: xentropy_loss
94
+ weight: 1
95
+ dice:
96
+ loss_fn: dice_loss
97
+ weight: 1
98
+
99
+ Returns:
100
+ dict: Dictionary with loss functions for each branch. Structure:
101
+ branch_name(str):
102
+ loss_name(str):
103
+ "loss_fn": Callable loss function
104
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
105
+ loss_name(str):
106
+ "loss_fn": Callable loss function
107
+ "weight": weight of the loss since in the end all losses of all branches are added together for backward pass
108
+ branch_name(str)
109
+ ...
110
+
111
+ Default loss dictionary:
112
+ dist_map:
113
+ bceweighted:
114
+ loss_fn: BCEWithLogitsLoss
115
+ weight: 1
116
+ stardist_map:
117
+ L1LossWeighted:
118
+ loss_fn: L1LossWeighted
119
+ weight: 1
120
+ nuclei_type_map
121
+ bce:
122
+ loss_fn: xentropy_loss
123
+ weight: 1
124
+ dice:
125
+ loss_fn: dice_loss
126
+ weight: 1
127
+ tissue_type has no default loss and might be skipped
128
+ """
129
+ loss_fn_dict = {}
130
+ if "dist_map" in loss_fn_settings.keys():
131
+ loss_fn_dict["dist_map"] = {}
132
+ for loss_name, loss_sett in loss_fn_settings["dist_map"].items():
133
+ parameters = loss_sett.get("args", {})
134
+ loss_fn_dict["dist_map"][loss_name] = {
135
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
136
+ "weight": loss_sett["weight"],
137
+ }
138
+ else:
139
+ loss_fn_dict["dist_map"] = {
140
+ "bceweighted": {
141
+ "loss_fn": retrieve_loss_fn("BCEWithLogitsLoss"),
142
+ "weight": 1,
143
+ },
144
+ }
145
+ if "stardist_map" in loss_fn_settings.keys():
146
+ loss_fn_dict["stardist_map"] = {}
147
+ for loss_name, loss_sett in loss_fn_settings["stardist_map"].items():
148
+ parameters = loss_sett.get("args", {})
149
+ loss_fn_dict["stardist_map"][loss_name] = {
150
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
151
+ "weight": loss_sett["weight"],
152
+ }
153
+ else:
154
+ loss_fn_dict["stardist_map"] = {
155
+ "L1LossWeighted": {
156
+ "loss_fn": retrieve_loss_fn("L1LossWeighted"),
157
+ "weight": 1,
158
+ },
159
+ }
160
+ if "nuclei_type_map" in loss_fn_settings.keys():
161
+ loss_fn_dict["nuclei_type_map"] = {}
162
+ for loss_name, loss_sett in loss_fn_settings["nuclei_type_map"].items():
163
+ parameters = loss_sett.get("args", {})
164
+ loss_fn_dict["nuclei_type_map"][loss_name] = {
165
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
166
+ "weight": loss_sett["weight"],
167
+ }
168
+ else:
169
+ loss_fn_dict["nuclei_type_map"] = {
170
+ "bce": {"loss_fn": retrieve_loss_fn("xentropy_loss"), "weight": 1},
171
+ "dice": {"loss_fn": retrieve_loss_fn("dice_loss"), "weight": 1},
172
+ }
173
+ if "tissue_types" in loss_fn_settings.keys():
174
+ loss_fn_dict["tissue_types"] = {}
175
+ for loss_name, loss_sett in loss_fn_settings["tissue_types"].items():
176
+ parameters = loss_sett.get("args", {})
177
+ loss_fn_dict["tissue_types"][loss_name] = {
178
+ "loss_fn": retrieve_loss_fn(loss_sett["loss_fn"], **parameters),
179
+ "weight": loss_sett["weight"],
180
+ }
181
+ # skip default tissue loss!
182
+ return loss_fn_dict
183
+
184
+ def get_scheduler(self, scheduler_type: str, optimizer: Optimizer) -> _LRScheduler:
185
+ """Get the learning rate scheduler for CellViT
186
+
187
+ The configuration of the scheduler is given in the "training" -> "scheduler" section.
188
+ Currenlty, "constant", "exponential" and "cosine" schedulers are implemented.
189
+
190
+ Required parameters for implemented schedulers:
191
+ - "constant": None
192
+ - "exponential": gamma (optional, defaults to 0.95)
193
+ - "cosine": eta_min (optional, defaults to 1-e5)
194
+ - "reducelronplateau": everything hardcoded right now, uses vall los for checking
195
+ Args:
196
+ scheduler_type (str): Type of scheduler as a string. Currently implemented:
197
+ - "constant" (lowering by a factor of ten after 25 epochs, increasing after 50, decreasimg again after 75)
198
+ - "exponential" (ExponentialLR with given gamma, gamma defaults to 0.95)
199
+ - "cosine" (CosineAnnealingLR, eta_min as parameter, defaults to 1-e5)
200
+ optimizer (Optimizer): Optimizer
201
+
202
+ Returns:
203
+ _LRScheduler: PyTorch Scheduler
204
+ """
205
+ implemented_schedulers = [
206
+ "constant",
207
+ "exponential",
208
+ "cosine",
209
+ "reducelronplateau",
210
+ ]
211
+ if scheduler_type.lower() not in implemented_schedulers:
212
+ self.logger.warning(
213
+ f"Unknown Scheduler - No scheduler from the list {implemented_schedulers} select. Using default scheduling."
214
+ )
215
+ if scheduler_type.lower() == "constant":
216
+ scheduler = SequentialLR(
217
+ optimizer=optimizer,
218
+ schedulers=[
219
+ ConstantLR(optimizer, factor=1, total_iters=25),
220
+ ConstantLR(optimizer, factor=0.1, total_iters=25),
221
+ ConstantLR(optimizer, factor=1, total_iters=25),
222
+ ConstantLR(optimizer, factor=0.1, total_iters=1000),
223
+ ],
224
+ milestones=[24, 49, 74],
225
+ )
226
+ elif scheduler_type.lower() == "exponential":
227
+ scheduler = ExponentialLR(
228
+ optimizer,
229
+ gamma=self.run_conf["training"]["scheduler"].get("gamma", 0.95),
230
+ )
231
+ elif scheduler_type.lower() == "cosine":
232
+ scheduler = CosineAnnealingLR(
233
+ optimizer,
234
+ T_max=self.run_conf["training"]["epochs"],
235
+ eta_min=self.run_conf["training"]["scheduler"].get("eta_min", 1e-5),
236
+ )
237
+ elif scheduler_type.lower() == "reducelronplateau":
238
+ scheduler = ReduceLROnPlateau(
239
+ optimizer,
240
+ mode="min",
241
+ factor=0.5,
242
+ min_lr=0.0000001,
243
+ patience=10,
244
+ threshold=1e-20,
245
+ )
246
+ else:
247
+ scheduler = super().get_scheduler(optimizer)
248
+ return scheduler
249
+
250
+ def get_datasets(
251
+ self,
252
+ train_transforms: Callable = None,
253
+ val_transforms: Callable = None,
254
+ ) -> Tuple[Dataset, Dataset]:
255
+ """Retrieve training dataset and validation dataset
256
+
257
+ Args:
258
+ dataset_name (str): Name of dataset to use
259
+ train_transforms (Callable, optional): PyTorch transformations for train set. Defaults to None.
260
+ val_transforms (Callable, optional): PyTorch transformations for validation set. Defaults to None.
261
+
262
+ Returns:
263
+ Tuple[Dataset, Dataset]: Training dataset and validation dataset
264
+ """
265
+ self.run_conf["data"]["stardist"] = True
266
+ train_dataset, val_dataset = super().get_datasets(
267
+ train_transforms=train_transforms,
268
+ val_transforms=val_transforms,
269
+ )
270
+ return train_dataset, val_dataset
271
+
272
+ def get_train_model(
273
+ self,
274
+ pretrained_encoder: Union[Path, str] = None,
275
+ pretrained_model: Union[Path, str] = None,
276
+ backbone_type: str = "default",
277
+ shared_decoders: bool = False,
278
+ **kwargs,
279
+ ) -> nn.Module:
280
+ """Return the CellViTStarDist training model
281
+
282
+ Args:
283
+ pretrained_encoder (Union[Path, str]): Path to a pretrained encoder. Defaults to None.
284
+ pretrained_model (Union[Path, str], optional): Path to a pretrained model. Defaults to None.
285
+ backbone_type (str, optional): Backbone Type. Currently supported are default (None, ViT256, SAM-B, SAM-L, SAM-H, RN50). Defaults to None
286
+ shared_decoders (bool, optional): If shared skip decoders should be used. Defaults to False.
287
+
288
+ Returns:
289
+ nn.Module: StarDist training model with given setup
290
+ """
291
+ # reseed needed, due to subprocess seeding compatibility
292
+ self.seed_run(self.default_conf["random_seed"])
293
+
294
+ # check for backbones
295
+ implemented_backbones = ["default", "vit256", "sam-b", "sam-l", "sam-h", "rn50"]
296
+ if backbone_type.lower() not in implemented_backbones:
297
+ raise NotImplementedError(
298
+ f"Unknown Backbone Type - Currently supported are: {implemented_backbones}"
299
+ )
300
+ if backbone_type.lower() == "default":
301
+ if shared_decoders:
302
+ model_class = CellViTStarDistShared
303
+ else:
304
+ model_class = CellViTStarDist
305
+ model = model_class(
306
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
307
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
308
+ embed_dim=self.run_conf["model"]["embed_dim"],
309
+ input_channels=self.run_conf["model"].get("input_channels", 3),
310
+ depth=self.run_conf["model"]["depth"],
311
+ num_heads=self.run_conf["model"]["num_heads"],
312
+ extract_layers=self.run_conf["model"]["extract_layers"],
313
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
314
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
315
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
316
+ nrays=self.run_conf["model"].get("nrays", 32),
317
+ )
318
+
319
+ if pretrained_model is not None:
320
+ self.logger.info(
321
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
322
+ )
323
+ cellvit_pretrained = torch.load(pretrained_model)
324
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
325
+ self.logger.info("Loaded CellViT model")
326
+
327
+ if backbone_type.lower() == "vit256":
328
+ if shared_decoders:
329
+ model_class = CellViT256StarDistShared
330
+ else:
331
+ model_class = CellViT256StarDist
332
+ model = model_class(
333
+ model256_path=pretrained_encoder,
334
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
335
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
336
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
337
+ attn_drop_rate=self.run_conf["training"].get("attn_drop_rate", 0),
338
+ drop_path_rate=self.run_conf["training"].get("drop_path_rate", 0),
339
+ nrays=self.run_conf["model"].get("nrays", 32),
340
+ )
341
+ model.load_pretrained_encoder(model.model256_path)
342
+ if pretrained_model is not None:
343
+ self.logger.info(
344
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
345
+ )
346
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
347
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
348
+ model.freeze_encoder()
349
+ self.logger.info("Loaded CellVit256 model")
350
+ if backbone_type.lower() in ["sam-b", "sam-l", "sam-h"]:
351
+ if shared_decoders:
352
+ model_class = CellViTSAMStarDistShared
353
+ else:
354
+ model_class = CellViTSAMStarDist
355
+ model = model_class(
356
+ model_path=pretrained_encoder,
357
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
358
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
359
+ vit_structure=backbone_type,
360
+ drop_rate=self.run_conf["training"].get("drop_rate", 0),
361
+ nrays=self.run_conf["model"].get("nrays", 32),
362
+ )
363
+ model.load_pretrained_encoder(model.model_path)
364
+ if pretrained_model is not None:
365
+ self.logger.info(
366
+ f"Loading pretrained CellViT model from path: {pretrained_model}"
367
+ )
368
+ cellvit_pretrained = torch.load(pretrained_model, map_location="cpu")
369
+ self.logger.info(model.load_state_dict(cellvit_pretrained, strict=True))
370
+ model.freeze_encoder()
371
+ self.logger.info(f"Loaded CellViT-SAM model with backbone: {backbone_type}")
372
+ if backbone_type.lower() == "rn50":
373
+ model = StarDistRN50(
374
+ n_rays=self.run_conf["model"].get("nrays", 32),
375
+ n_seg_cls=self.run_conf["data"]["num_nuclei_classes"],
376
+ )
377
+
378
+ self.logger.info(f"\nModel: {model}")
379
+ model = model.to("cpu")
380
+ self.logger.info(
381
+ f"\n{summary(model, input_size=(1, 3, 256, 256), device='cpu')}"
382
+ )
383
+
384
+ return model
385
+
386
+ def get_trainer(self) -> BaseTrainer:
387
+ """Return Trainer matching to this network
388
+
389
+ Returns:
390
+ BaseTrainer: Trainer
391
+ """
392
+ return CellViTStarDistTrainer
cell_segmentation/inference/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Inference related methods for each network type
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
cell_segmentation/inference/cell_detection.py ADDED
@@ -0,0 +1,1077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI
3
+ #
4
+ # Detect Cells with our Networks
5
+ # Patches dataset needs to have the follwoing requirements:
6
+ # Patch-Size must be 1024, with overlap of 64
7
+ #
8
+ # We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py
9
+ #
10
+ # @ Fabian Hörst, [email protected]
11
+ # Institute for Artifical Intelligence in Medicine,
12
+ # University Medicine Essen
13
+
14
+ import inspect
15
+ import os
16
+ import sys
17
+
18
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
19
+ parentdir = os.path.dirname(currentdir)
20
+ sys.path.insert(0, parentdir)
21
+ parentdir = os.path.dirname(parentdir)
22
+ sys.path.insert(0, parentdir)
23
+
24
+ import argparse
25
+ import logging
26
+ import uuid
27
+ import warnings
28
+ from collections import deque
29
+ from pathlib import Path
30
+ from typing import List, Tuple, Union
31
+
32
+ import numpy as np
33
+ import pandas as pd
34
+ import torch
35
+ import torch.nn.functional as F
36
+ import tqdm
37
+ import ujson
38
+ from einops import rearrange
39
+ from pandarallel import pandarallel
40
+
41
+ # from PIL import Image
42
+ from shapely import strtree
43
+ from shapely.errors import ShapelyDeprecationWarning
44
+ from shapely.geometry import Polygon, MultiPolygon
45
+
46
+ # from skimage.color import rgba2rgb
47
+ from torch.utils.data import DataLoader
48
+ from torchvision import transforms as T
49
+
50
+ from cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI
51
+ from cell_segmentation.utils.template_geojson import (
52
+ get_template_point,
53
+ get_template_segmentation,
54
+ )
55
+ from datamodel.wsi_datamodel import WSI
56
+ from models.segmentation.cell_segmentation.cellvit import (
57
+ CellViT,
58
+ )
59
+
60
+ from preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference
61
+ from utils.file_handling import load_wsi_files_from_csv
62
+ from utils.logger import Logger
63
+ from utils.tools import unflatten_dict, get_size_of_dict
64
+
65
+ warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning)
66
+ pandarallel.initialize(progress_bar=False, nb_workers=12)
67
+
68
+
69
+ # color setup
70
+ COLOR_DICT = {
71
+ 1: [255, 0, 0],
72
+ 2: [34, 221, 77],
73
+ 3: [35, 92, 236],
74
+ 4: [254, 255, 0],
75
+ 5: [255, 159, 68],
76
+ }
77
+
78
+ TYPE_NUCLEI_DICT = {
79
+ 1: "Neoplastic",
80
+ 2: "Inflammatory",
81
+ 3: "Connective",
82
+ 4: "Dead",
83
+ 5: "Epithelial",
84
+ }
85
+
86
+ class CellSegmentationInference:
87
+ def __init__(
88
+ self,
89
+ model_path: Union[Path, str],
90
+ gpu: int,
91
+ enforce_mixed_precision: bool = False,
92
+ ) -> None:
93
+ """Cell Segmentation Inference class.
94
+
95
+ After setup, a WSI can be processed by calling process_wsi method
96
+
97
+ Args:
98
+ model_path (Union[Path, str]): Path to model checkpoint
99
+ gpu (int): CUDA GPU id to use
100
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
101
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
102
+ Defaults to False.
103
+ """
104
+ self.model_path = Path(model_path)
105
+ self.device = f"cuda:{gpu}"
106
+ self.__instantiate_logger()
107
+ self.__load_model()
108
+ self.__load_inference_transforms()
109
+ self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision)
110
+
111
+ def __instantiate_logger(self) -> None:
112
+ """Instantiate logger
113
+
114
+ Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log
115
+ """
116
+ logger = Logger(
117
+ level="INFO",
118
+ )
119
+ self.logger = logger.create_logger()
120
+
121
+ def __load_model(self) -> None:
122
+ """Load model and checkpoint and load the state_dict"""
123
+ self.logger.info(f"Loading model: {self.model_path}")
124
+
125
+ model_checkpoint = torch.load(self.model_path, map_location="cpu")
126
+
127
+ # unpack checkpoint
128
+ self.run_conf = unflatten_dict(model_checkpoint["config"], ".")
129
+ self.model = self.__get_model(model_type=model_checkpoint["arch"])
130
+ self.logger.info(
131
+ self.model.load_state_dict(model_checkpoint["model_state_dict"])
132
+ )
133
+ self.model.eval()
134
+ self.model.to(self.device)
135
+
136
+ def __get_model(
137
+ self, model_type: str
138
+ ) -> Union[
139
+ CellViT]:
140
+ """Return the trained model for inference
141
+
142
+ Args:
143
+ model_type (str): Name of the model. Must either be one of:
144
+ CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared
145
+
146
+ Returns:
147
+ Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model
148
+ """
149
+ implemented_models = [
150
+ "CellViT",
151
+ ]
152
+ if model_type not in implemented_models:
153
+ raise NotImplementedError(
154
+ f"Unknown model type. Please select one of {implemented_models}"
155
+ )
156
+ if model_type in ["CellViT", "CellViTShared"]:
157
+ if model_type == "CellViT":
158
+ model_class = CellViT
159
+ model = model_class(
160
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
161
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
162
+ embed_dim=self.run_conf["model"]["embed_dim"],
163
+ input_channels=self.run_conf["model"].get("input_channels", 3),
164
+ depth=self.run_conf["model"]["depth"],
165
+ num_heads=self.run_conf["model"]["num_heads"],
166
+ extract_layers=self.run_conf["model"]["extract_layers"],
167
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
168
+ )
169
+ return model
170
+
171
+ def __load_inference_transforms(self):
172
+ """Load the inference transformations from the run_configuration"""
173
+ self.logger.info("Loading inference transformations")
174
+
175
+ transform_settings = self.run_conf["transformations"]
176
+ if "normalize" in transform_settings:
177
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
178
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
179
+ else:
180
+ mean = (0.5, 0.5, 0.5)
181
+ std = (0.5, 0.5, 0.5)
182
+ self.inference_transforms = T.Compose(
183
+ [T.ToTensor(), T.Normalize(mean=mean, std=std)]
184
+ )
185
+
186
+ def __setup_amp(self, enforce_mixed_precision: bool = False) -> None:
187
+ """Setup automated mixed precision (amp) for inference.
188
+
189
+ Args:
190
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
191
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
192
+ Defaults to False.
193
+ """
194
+ if enforce_mixed_precision:
195
+ self.mixed_precision = enforce_mixed_precision
196
+ else:
197
+ self.mixed_precision = self.run_conf["training"].get(
198
+ "mixed_precision", False
199
+ )
200
+
201
+ def process_wsi(
202
+ self,
203
+ wsi: WSI,
204
+ subdir_name: str = None,
205
+ patch_size: int = 1024,
206
+ overlap: int = 64,
207
+ batch_size: int = 8,
208
+ geojson: bool = False,
209
+ ) -> None:
210
+ """Process WSI file
211
+
212
+ Args:
213
+ wsi (WSI): WSI object
214
+ subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder.
215
+ Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir).
216
+ patch_size (int, optional): Patch-Size. Default to 1024.
217
+ overlap (int, optional): Overlap between patches. Defaults to 64.
218
+ batch_size (int, optional): Batch-size for inference. Defaults to 8.
219
+ geosjon (bool, optional): If a geojson export should be performed. Defaults to False.
220
+ """
221
+ self.logger.info(f"Processing WSI: {wsi.name}")
222
+
223
+ wsi_inference_dataset = PatchedWSIInference(
224
+ wsi, transform=self.inference_transforms
225
+ )
226
+
227
+ num_workers = int(3 / 4 * os.cpu_count())
228
+ if num_workers is None:
229
+ num_workers = 16
230
+ num_workers = int(np.clip(num_workers, 1, 2 * batch_size))
231
+
232
+ wsi_inference_dataloader = DataLoader(
233
+ dataset=wsi_inference_dataset,
234
+ batch_size=batch_size,
235
+ num_workers=num_workers,
236
+ shuffle=False,
237
+ collate_fn=wsi_inference_dataset.collate_batch,
238
+ pin_memory=False,
239
+ )
240
+ dataset_config = self.run_conf["dataset_config"]
241
+ nuclei_types = dataset_config["nuclei_types"]
242
+
243
+ if subdir_name is not None:
244
+ outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name
245
+ else:
246
+ outdir = Path(wsi.patched_slide_path) / "cell_detection"
247
+ outdir.mkdir(exist_ok=True, parents=True)
248
+
249
+ cell_dict_wsi = [] # for storing all cell information
250
+ cell_dict_detection = [] # for storing only the centroids
251
+
252
+ graph_data = {
253
+ "cell_tokens": [],
254
+ "positions": [],
255
+ "contours": [],
256
+ "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types},
257
+ }
258
+ processed_patches = []
259
+
260
+ memory_usage = 0
261
+ cell_count = 0
262
+
263
+ with torch.no_grad():
264
+
265
+ pbar = tqdm.tqdm(wsi_inference_dataloader, total=len(wsi_inference_dataset))
266
+
267
+ for batch in wsi_inference_dataloader:
268
+ patches = batch[0].to(self.device)
269
+
270
+ metadata = batch[1]
271
+ if self.mixed_precision:
272
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
273
+ predictions = self.model.forward(patches, retrieve_tokens=True)
274
+ else:
275
+ predictions = self.model.forward(patches, retrieve_tokens=True)
276
+ # reshape, apply softmax to segmentation maps
277
+ # predictions = self.model.reshape_model_output(predictions_, self.device)
278
+ instance_types, tokens = self.get_cell_predictions_with_tokens(
279
+ predictions, magnification=wsi.metadata["magnification"]
280
+ )
281
+ print(f"Token-Shape: {tokens.shape}")
282
+ # unpack each patch from batch
283
+ for idx, (patch_instance_types, patch_metadata) in enumerate(
284
+ zip(instance_types, metadata)
285
+ ):
286
+ pbar.update(1)
287
+ # add global patch metadata
288
+ patch_cell_detection = {}
289
+ patch_cell_detection["patch_metadata"] = patch_metadata
290
+ patch_cell_detection["type_map"] = dataset_config["nuclei_types"]
291
+
292
+ processed_patches.append(
293
+ f"{patch_metadata['row']}_{patch_metadata['col']}"
294
+ )
295
+
296
+ # calculate coordinate on highest magnifications
297
+ # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"]
298
+ # patch_size = patch_metadata["wsi_metadata"]["patch_size"]
299
+ wsi_scaling_factor = wsi.metadata["downsampling"]
300
+ patch_size = wsi.metadata["patch_size"]
301
+ x_global = int(
302
+ patch_metadata["row"] * patch_size * wsi_scaling_factor
303
+ - (patch_metadata["row"] + 0.5) * overlap
304
+ )
305
+ y_global = int(
306
+ patch_metadata["col"] * patch_size * wsi_scaling_factor
307
+ - (patch_metadata["col"] + 0.5) * overlap
308
+ )
309
+
310
+ # extract cell information
311
+ for cell in patch_instance_types.values():
312
+ if cell["type"] == nuclei_types["Background"]:
313
+ continue
314
+ offset_global = np.array([x_global, y_global])
315
+ centroid_global = cell["centroid"] + np.flip(offset_global)
316
+ contour_global = cell["contour"] + np.flip(offset_global)
317
+ bbox_global = cell["bbox"] + offset_global
318
+ cell_dict = {
319
+ "bbox": bbox_global.tolist(),
320
+ "centroid": centroid_global.tolist(),
321
+ "contour": contour_global.tolist(),
322
+ "type_prob": cell["type_prob"],
323
+ "type": cell["type"],
324
+ "patch_coordinates": [
325
+ patch_metadata["row"],
326
+ patch_metadata["col"],
327
+ ],
328
+ "cell_status": get_cell_position_marging(
329
+ cell["bbox"], 1024, 64
330
+ ),
331
+ "offset_global": offset_global.tolist()
332
+ }
333
+ cell_detection = {
334
+ "bbox": bbox_global.tolist(),
335
+ "centroid": centroid_global.tolist(),
336
+ "type": cell["type"],
337
+ }
338
+ if np.max(cell["bbox"]) == 1024 or np.min(cell["bbox"]) == 0:
339
+ position = get_cell_position(cell["bbox"], 1024)
340
+ cell_dict["edge_position"] = True
341
+ cell_dict["edge_information"] = {}
342
+ cell_dict["edge_information"]["position"] = position
343
+ cell_dict["edge_information"][
344
+ "edge_patches"
345
+ ] = get_edge_patch(
346
+ position, patch_metadata["row"], patch_metadata["col"]
347
+ )
348
+ else:
349
+ cell_dict["edge_position"] = False
350
+
351
+ cell_dict_wsi.append(cell_dict)
352
+ cell_dict_detection.append(cell_detection)
353
+
354
+ # get the cell token
355
+ bb_index = cell["bbox"] / self.model.patch_size
356
+ bb_index[0, :] = np.floor(bb_index[0, :])
357
+ bb_index[1, :] = np.ceil(bb_index[1, :])
358
+ bb_index = bb_index.astype(np.uint8)
359
+ print(f"Token-Shape-Patch: {idx.shape}")
360
+ cell_token = tokens[
361
+ idx,
362
+ :,
363
+ bb_index[0, 1] : bb_index[1, 1],
364
+ bb_index[0, 0] : bb_index[1, 0],
365
+ ]
366
+ cell_token = torch.mean(
367
+ rearrange(cell_token, "D H W -> (H W) D"), dim=0
368
+ )
369
+
370
+ graph_data["cell_tokens"].append(cell_token)
371
+ graph_data["positions"].append(torch.Tensor(centroid_global))
372
+ graph_data["contours"].append(torch.Tensor(contour_global))
373
+
374
+ cell_count = cell_count + 1
375
+ # dict sizes
376
+ memory_usage = memory_usage + get_size_of_dict(cell_dict)/(1024*1024) + get_size_of_dict(cell_detection)/(1024*1024) # + sys.getsizeof(cell_token)/(1024*1024)
377
+ # pytorch
378
+ memory_usage = memory_usage + (cell_token.nelement() * cell_token.element_size())/(1024*1024) + centroid_global.nbytes/(1024*1024) + contour_global.nbytes/(1024*1024)
379
+
380
+ pbar.set_postfix(Cells=cell_count, Memory=f"{memory_usage:.2f} MB")
381
+
382
+ # post processing
383
+ self.logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}")
384
+ keep_idx = self.post_process_edge_cells(cell_list=cell_dict_wsi)
385
+ cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx]
386
+ cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx]
387
+ graph_data["cell_tokens"] = [
388
+ graph_data["cell_tokens"][idx_c] for idx_c in keep_idx
389
+ ]
390
+ graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx]
391
+ graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx]
392
+ self.logger.info(f"Detected cells after cleaning: {len(keep_idx)}")
393
+
394
+ self.logger.info(
395
+ f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json"
396
+ )
397
+ cell_dict_wsi = {
398
+ "wsi_metadata": wsi.metadata,
399
+ "processed_patches": processed_patches,
400
+ "type_map": dataset_config["nuclei_types"],
401
+ "cells": cell_dict_wsi,
402
+ }
403
+ with open(str(outdir / "cells.json"), "w") as outfile:
404
+ ujson.dump(cell_dict_wsi, outfile, indent=2)
405
+ if geojson:
406
+ self.logger.info("Converting segmentation to geojson")
407
+ geojson_list = self.convert_geojson(cell_dict_wsi["cells"], True)
408
+ with open(str(str(outdir / "cells.geojson")), "w") as outfile:
409
+ ujson.dump(geojson_list, outfile, indent=2)
410
+
411
+ cell_dict_detection = {
412
+ "wsi_metadata": wsi.metadata,
413
+ "processed_patches": processed_patches,
414
+ "type_map": dataset_config["nuclei_types"],
415
+ "cells": cell_dict_detection,
416
+ }
417
+ with open(str(outdir / "cell_detection.json"), "w") as outfile:
418
+ ujson.dump(cell_dict_detection, outfile, indent=2)
419
+ if geojson:
420
+ self.logger.info("Converting detection to geojson")
421
+ geojson_list = self.convert_geojson(cell_dict_wsi["cells"], False)
422
+ with open(str(str(outdir / "cell_detection.geojson")), "w") as outfile:
423
+ ujson.dump(geojson_list, outfile, indent=2)
424
+
425
+ self.logger.info(
426
+ f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}"
427
+ )
428
+ graph = CellGraphDataWSI(
429
+ x=torch.stack(graph_data["cell_tokens"]),
430
+ positions=torch.stack(graph_data["positions"]),
431
+ contours=graph_data["contours"],
432
+ metadata=graph_data["metadata"],
433
+ )
434
+ torch.save(graph, outdir / "cells.pt")
435
+
436
+ cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"])
437
+ cell_stats = dict(cell_stats_df.value_counts("type"))
438
+ nuclei_types_inverse = {v: k for k, v in nuclei_types.items()}
439
+ verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()}
440
+ self.logger.info(f"Finished with cell detection for WSI {wsi.name}")
441
+ self.logger.info("Stats:")
442
+ self.logger.info(f"{verbose_stats}")
443
+
444
+ def get_cell_predictions_with_tokens(
445
+ self, predictions: dict, magnification: int = 40
446
+ ) -> Tuple[List[dict], torch.Tensor]:
447
+ """Take the raw predictions, apply softmax and calculate type instances
448
+
449
+ Args:
450
+ predictions (dict): Network predictions with tokens. Keys:
451
+ magnification (int, optional): WSI magnification. Defaults to 40.
452
+
453
+ Returns:
454
+ Tuple[List[dict], torch.Tensor]:
455
+ * List[dict]: List with a dictionary for each batch element with cell seg results
456
+ Contains bbox, contour, 2D-position, type and type_prob for each cell
457
+ * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim)
458
+ """
459
+ predictions["nuclei_binary_map"] = F.softmax(
460
+ predictions["nuclei_binary_map"], dim=1
461
+ ) # shape: (batch_size, 2, H, W)
462
+ predictions["nuclei_type_map"] = F.softmax(
463
+ predictions["nuclei_type_map"], dim=1
464
+ ) # shape: (batch_size, num_nuclei_classes, H, W)
465
+ # get the instance types
466
+ (
467
+ _,
468
+ instance_types,
469
+ ) = self.model.calculate_instance_map(predictions, magnification=magnification)
470
+
471
+ tokens = predictions["tokens"].to("cpu")
472
+
473
+ return instance_types, tokens
474
+
475
+ def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]:
476
+ """Use the CellPostProcessor to remove multiple cells and merge due to overlap
477
+
478
+ Args:
479
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
480
+ * bbox
481
+ * centroid
482
+ * contour
483
+ * type_prob
484
+ * type
485
+ * patch_coordinates
486
+ * cell_status
487
+ * offset_global
488
+
489
+ Returns:
490
+ List[int]: List with integers of cells that should be kept
491
+ """
492
+ cell_processor = CellPostProcessor(cell_list, self.logger)
493
+ cleaned_cells = cell_processor.post_process_cells()
494
+
495
+ return list(cleaned_cells.index.values)
496
+
497
+ def convert_geojson(
498
+ self, cell_list: list[dict], polygons: bool = False
499
+ ) -> List[dict]:
500
+ """Convert a list of cells to a geojson object
501
+
502
+ Either a segmentation object (polygon) or detection points are converted
503
+
504
+ Args:
505
+ cell_list (list[dict]): Cell list with dict entry for each cell.
506
+ Required keys for detection:
507
+ * type
508
+ * centroid
509
+ Required keys for segmentation:
510
+ * type
511
+ * contour
512
+ polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False.
513
+
514
+ Returns:
515
+ List[dict]: Geojson like list
516
+ """
517
+ if polygons:
518
+ cell_segmentation_df = pd.DataFrame(cell_list)
519
+ detected_types = sorted(cell_segmentation_df.type.unique())
520
+ geojson_placeholder = []
521
+ for cell_type in detected_types:
522
+ cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type]
523
+ contours = cells["contour"].to_list()
524
+ final_c = []
525
+ for c in contours:
526
+ c.append(c[0])
527
+ final_c.append([c])
528
+
529
+ cell_geojson_object = get_template_segmentation()
530
+ cell_geojson_object["id"] = str(uuid.uuid4())
531
+ cell_geojson_object["geometry"]["coordinates"] = final_c
532
+ cell_geojson_object["properties"]["classification"][
533
+ "name"
534
+ ] = TYPE_NUCLEI_DICT[cell_type]
535
+ cell_geojson_object["properties"]["classification"][
536
+ "color"
537
+ ] = COLOR_DICT[cell_type]
538
+ geojson_placeholder.append(cell_geojson_object)
539
+ else:
540
+ cell_detection_df = pd.DataFrame(cell_list)
541
+ detected_types = sorted(cell_detection_df.type.unique())
542
+ geojson_placeholder = []
543
+ for cell_type in detected_types:
544
+ cells = cell_detection_df[cell_detection_df["type"] == cell_type]
545
+ centroids = cells["centroid"].to_list()
546
+ cell_geojson_object = get_template_point()
547
+ cell_geojson_object["id"] = str(uuid.uuid4())
548
+ cell_geojson_object["geometry"]["coordinates"] = centroids
549
+ cell_geojson_object["properties"]["classification"][
550
+ "name"
551
+ ] = TYPE_NUCLEI_DICT[cell_type]
552
+ cell_geojson_object["properties"]["classification"][
553
+ "color"
554
+ ] = COLOR_DICT[cell_type]
555
+ geojson_placeholder.append(cell_geojson_object)
556
+ return geojson_placeholder
557
+
558
+
559
+ class CellPostProcessor:
560
+ def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None:
561
+ """POst-Processing a list of cells from one WSI
562
+
563
+ Args:
564
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
565
+ * bbox
566
+ * centroid
567
+ * contour
568
+ * type_prob
569
+ * type
570
+ * patch_coordinates
571
+ * cell_status
572
+ * offset_global
573
+ logger (logging.Logger): Logger
574
+ """
575
+ self.logger = logger
576
+ self.logger.info("Initializing Cell-Postprocessor")
577
+ self.cell_df = pd.DataFrame(cell_list)
578
+ self.cell_df = self.cell_df.parallel_apply(convert_coordinates, axis=1)
579
+
580
+ self.mid_cells = self.cell_df[
581
+ self.cell_df["cell_status"] == 0
582
+ ] # cells in the mid
583
+ self.cell_df_margin = self.cell_df[
584
+ self.cell_df["cell_status"] != 0
585
+ ] # cells either torching the border or margin
586
+
587
+ def post_process_cells(self) -> pd.DataFrame:
588
+ """Main Post-Processing coordinator, entry point
589
+
590
+ Returns:
591
+ pd.DataFrame: DataFrame with post-processed and cleaned cells
592
+ """
593
+ self.logger.info("Finding edge-cells for merging")
594
+ cleaned_edge_cells = self._clean_edge_cells()
595
+ self.logger.info("Removal of cells detected multiple times")
596
+ cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells)
597
+
598
+ # merge with mid cells
599
+ postprocessed_cells = pd.concat(
600
+ [self.mid_cells, cleaned_edge_cells]
601
+ ).sort_index()
602
+ return postprocessed_cells
603
+
604
+ def _clean_edge_cells(self) -> pd.DataFrame:
605
+ """Create a DataFrame that just contains all margin cells (cells inside the margin, not touching the border)
606
+ and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour)
607
+
608
+ Returns:
609
+ pd.DataFrame: Cleaned DataFrame
610
+ """
611
+
612
+ margin_cells = self.cell_df_margin[
613
+ self.cell_df_margin["edge_position"] == 0
614
+ ] # cells at the margin, but not touching the border
615
+ edge_cells = self.cell_df_margin[
616
+ self.cell_df_margin["edge_position"] == 1
617
+ ] # cells touching the border
618
+ existing_patches = list(set(self.cell_df_margin["patch_coordinates"].to_list()))
619
+
620
+ edge_cells_unique = pd.DataFrame(
621
+ columns=self.cell_df_margin.columns
622
+ ) # cells torching the border without having an overlap from other patches
623
+
624
+ for idx, cell_info in edge_cells.iterrows():
625
+ edge_information = dict(cell_info["edge_information"])
626
+ edge_patch = edge_information["edge_patches"][0]
627
+ edge_patch = f"{edge_patch[0]}_{edge_patch[1]}"
628
+ if edge_patch not in existing_patches:
629
+ edge_cells_unique.loc[idx, :] = cell_info
630
+
631
+ cleaned_edge_cells = pd.concat([margin_cells, edge_cells_unique])
632
+
633
+ return cleaned_edge_cells.sort_index()
634
+
635
+ def _remove_overlap(self, cleaned_edge_cells: pd.DataFrame) -> pd.DataFrame:
636
+ """Remove overlapping cells from provided DataFrame
637
+
638
+ Args:
639
+ cleaned_edge_cells (pd.DataFrame): DataFrame that should be cleaned
640
+
641
+ Returns:
642
+ pd.DataFrame: Cleaned DataFrame
643
+ """
644
+ merged_cells = cleaned_edge_cells
645
+
646
+ for iteration in range(20):
647
+ poly_list = []
648
+ for idx, cell_info in merged_cells.iterrows():
649
+ poly = Polygon(cell_info["contour"])
650
+ if not poly.is_valid:
651
+ self.logger.debug("Found invalid polygon - Fixing with buffer 0")
652
+ multi = poly.buffer(0)
653
+ if isinstance(multi, MultiPolygon):
654
+ if len(multi) > 1:
655
+ poly_idx = np.argmax([p.area for p in multi])
656
+ poly = multi[poly_idx]
657
+ poly = Polygon(poly)
658
+ else:
659
+ poly = multi[0]
660
+ poly = Polygon(poly)
661
+ else:
662
+ poly = Polygon(multi)
663
+ poly.uid = idx
664
+ poly_list.append(poly)
665
+
666
+ # use an strtree for fast querying
667
+ tree = strtree.STRtree(poly_list)
668
+
669
+ merged_idx = deque()
670
+ iterated_cells = set()
671
+ overlaps = 0
672
+
673
+ for query_poly in poly_list:
674
+ if query_poly.uid not in iterated_cells:
675
+ intersected_polygons = tree.query(
676
+ query_poly
677
+ ) # this also contains a self-intersection
678
+ if (
679
+ len(intersected_polygons) > 1
680
+ ): # we have more at least one intersection with another cell
681
+ submergers = [] # all cells that overlap with query
682
+ for inter_poly in intersected_polygons:
683
+ if (
684
+ inter_poly.uid != query_poly.uid
685
+ and inter_poly.uid not in iterated_cells
686
+ ):
687
+ if (
688
+ query_poly.intersection(inter_poly).area
689
+ / query_poly.area
690
+ > 0.01
691
+ or query_poly.intersection(inter_poly).area
692
+ / inter_poly.area
693
+ > 0.01
694
+ ):
695
+ overlaps = overlaps + 1
696
+ submergers.append(inter_poly)
697
+ iterated_cells.add(inter_poly.uid)
698
+ # catch block: empty list -> some cells are touching, but not overlapping strongly enough
699
+ if len(submergers) == 0:
700
+ merged_idx.append(query_poly.uid)
701
+ else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented
702
+ selected_poly_index = np.argmax(
703
+ np.array([p.area for p in submergers])
704
+ )
705
+ selected_poly_uid = submergers[selected_poly_index].uid
706
+ merged_idx.append(selected_poly_uid)
707
+ else:
708
+ # no intersection, just add
709
+ merged_idx.append(query_poly.uid)
710
+ iterated_cells.add(query_poly.uid)
711
+
712
+ self.logger.info(
713
+ f"Iteration {iteration}: Found overlap of # cells: {overlaps}"
714
+ )
715
+ if overlaps == 0:
716
+ self.logger.info("Found all overlapping cells")
717
+ break
718
+ elif iteration == 20:
719
+ self.logger.info(
720
+ f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations."
721
+ )
722
+ merged_cells = cleaned_edge_cells.loc[
723
+ cleaned_edge_cells.index.isin(merged_idx)
724
+ ].sort_index()
725
+
726
+ return merged_cells.sort_index()
727
+
728
+
729
+ def convert_coordinates(row: pd.Series) -> pd.Series:
730
+ """Convert a row from x,y type to one string representation of the patch position for fast querying
731
+ Repr: x_y
732
+
733
+ Args:
734
+ row (pd.Series): Row to be processed
735
+
736
+ Returns:
737
+ pd.Series: Processed Row
738
+ """
739
+ x, y = row["patch_coordinates"]
740
+ row["patch_row"] = x
741
+ row["patch_col"] = y
742
+ row["patch_coordinates"] = f"{x}_{y}"
743
+ return row
744
+
745
+
746
+ def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]:
747
+ """Get cell position as a list
748
+
749
+ Entry is 1, if cell touches the border: [top, right, down, left]
750
+
751
+ Args:
752
+ bbox (np.ndarray): Bounding-Box of cell
753
+ patch_size (int, optional): Patch-size. Defaults to 1024.
754
+
755
+ Returns:
756
+ List[int]: List with 4 integers for each position
757
+ """
758
+ # bbox = 2x2 array in h, w style
759
+ # bbox[0,0] = upper position (height)
760
+ # bbox[1,0] = lower dimension (height)
761
+ # boox[0,1] = left position (width)
762
+ # bbox[1,1] = right position (width)
763
+ # bbox[:,0] -> x dimensions
764
+ top, left, down, right = False, False, False, False
765
+ if bbox[0, 0] == 0:
766
+ top = True
767
+ if bbox[0, 1] == 0:
768
+ left = True
769
+ if bbox[1, 0] == patch_size:
770
+ down = True
771
+ if bbox[1, 1] == patch_size:
772
+ right = True
773
+ position = [top, right, down, left]
774
+ position = [int(pos) for pos in position]
775
+
776
+ return position
777
+
778
+
779
+ def get_cell_position_marging(
780
+ bbox: np.ndarray, patch_size: int = 1024, margin: int = 64
781
+ ) -> int:
782
+ """Get the status of the cell, describing the cell position
783
+
784
+ A cell is either in the mid (0) or at one of the borders (1-8)
785
+
786
+ # Numbers are assigned clockwise, starting from top left
787
+ # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8
788
+ # Mid status is denoted by 0
789
+
790
+ Args:
791
+ bbox (np.ndarray): Bounding Box of cell
792
+ patch_size (int, optional): Patch-Size. Defaults to 1024.
793
+ margin (int, optional): Margin-Size. Defaults to 64.
794
+
795
+ Returns:
796
+ int: Cell Status
797
+ """
798
+ cell_status = None
799
+ if np.max(bbox) > patch_size - margin or np.min(bbox) < margin:
800
+ if bbox[0, 0] < margin:
801
+ # top left, top or top right
802
+ if bbox[0, 1] < margin:
803
+ # top left
804
+ cell_status = 1
805
+ elif bbox[1, 1] > patch_size - margin:
806
+ # top right
807
+ cell_status = 3
808
+ else:
809
+ # top
810
+ cell_status = 2
811
+ elif bbox[1, 1] > patch_size - margin:
812
+ # top right, right or bottom right
813
+ if bbox[1, 0] > patch_size - margin:
814
+ # bottom right
815
+ cell_status = 5
816
+ else:
817
+ # right
818
+ cell_status = 4
819
+ elif bbox[1, 0] > patch_size - margin:
820
+ # bottom right, bottom, bottom left
821
+ if bbox[0, 1] < margin:
822
+ # bottom left
823
+ cell_status = 7
824
+ else:
825
+ # bottom
826
+ cell_status = 6
827
+ elif bbox[0, 1] < margin:
828
+ # bottom left, left, top left, but only left is left
829
+ cell_status = 8
830
+ else:
831
+ cell_status = 0
832
+
833
+ return cell_status
834
+
835
+
836
+ def get_edge_patch(position, row, col):
837
+ # row starting on bottom or on top?
838
+ if position == [1, 0, 0, 0]:
839
+ # top
840
+ return [[row - 1, col]]
841
+ if position == [1, 1, 0, 0]:
842
+ # top and right
843
+ return [[row - 1, col], [row - 1, col + 1], [row, col + 1]]
844
+ if position == [0, 1, 0, 0]:
845
+ # right
846
+ return [[row, col + 1]]
847
+ if position == [0, 1, 1, 0]:
848
+ # right and down
849
+ return [[row, col + 1], [row + 1, col + 1], [row + 1, col]]
850
+ if position == [0, 0, 1, 0]:
851
+ # down
852
+ return [[row + 1, col]]
853
+ if position == [0, 0, 1, 1]:
854
+ # down and left
855
+ return [[row + 1, col], [row + 1, col - 1], [row, col - 1]]
856
+ if position == [0, 0, 0, 1]:
857
+ # left
858
+ return [[row, col - 1]]
859
+ if position == [1, 0, 0, 1]:
860
+ # left and top
861
+ return [[row, col - 1], [row - 1, col - 1], [row - 1, col]]
862
+
863
+
864
+ # CLI
865
+ class InferenceWSIParser:
866
+ """Parser"""
867
+
868
+ def __init__(self) -> None:
869
+ parser = argparse.ArgumentParser(
870
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
871
+ description="Perform CellViT inference for given run-directory with model checkpoints and logs. Just for CellViT, not for StarDist models",
872
+ )
873
+ requiredNamed = parser.add_argument_group("required named arguments")
874
+ requiredNamed.add_argument(
875
+ "--model",
876
+ type=str,
877
+ help="Model checkpoint file that is used for inference",
878
+ required=True,
879
+ )
880
+ parser.add_argument(
881
+ "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0
882
+ )
883
+ parser.add_argument(
884
+ "--magnification",
885
+ type=float,
886
+ help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40",
887
+ default=40,
888
+ )
889
+ parser.add_argument(
890
+ "--enforce_amp",
891
+ action="store_true",
892
+ help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used."
893
+ " Default: False",
894
+ )
895
+ parser.add_argument(
896
+ "--batch_size",
897
+ type=int,
898
+ help="Inference batch-size. Default: 8",
899
+ default=8,
900
+ )
901
+ parser.add_argument(
902
+ "--outdir_subdir",
903
+ type=str,
904
+ help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None",
905
+ default=None,
906
+ )
907
+ parser.add_argument(
908
+ "--geojson",
909
+ action="store_true",
910
+ help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.",
911
+ )
912
+
913
+ # subparsers for either loading a WSI or a WSI folder
914
+
915
+ # WSI
916
+ subparsers = parser.add_subparsers(
917
+ dest="command",
918
+ description="Main run command for either performing inference on single WSI-file or on whole dataset",
919
+ )
920
+ subparser_wsi = subparsers.add_parser(
921
+ "process_wsi", description="Process a single WSI file"
922
+ )
923
+ subparser_wsi.add_argument(
924
+ "--wsi_path",
925
+ type=str,
926
+ help="Path to WSI file",
927
+ )
928
+ subparser_wsi.add_argument(
929
+ "--patched_slide_path",
930
+ type=str,
931
+ help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)",
932
+ )
933
+
934
+ # Dataset
935
+ subparser_dataset = subparsers.add_parser(
936
+ "process_dataset",
937
+ description="Process a whole dataset",
938
+ )
939
+ subparser_dataset.add_argument(
940
+ "--wsi_paths", type=str, help="Path to the folder where all WSI are stored"
941
+ )
942
+ subparser_dataset.add_argument(
943
+ "--patch_dataset_path",
944
+ type=str,
945
+ help="Path to the folder where the patch dataset is stored",
946
+ )
947
+ subparser_dataset.add_argument(
948
+ "--filelist",
949
+ type=str,
950
+ help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')."
951
+ "If not provided, all WSI files with given ending in the filelist are processed.",
952
+ default=None,
953
+ )
954
+ subparser_dataset.add_argument(
955
+ "--wsi_extension",
956
+ type=str,
957
+ help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)",
958
+ default="svs",
959
+ )
960
+
961
+ self.parser = parser
962
+
963
+ def parse_arguments(self) -> dict:
964
+ opt = self.parser.parse_args()
965
+ return vars(opt)
966
+
967
+
968
+ def check_wsi(wsi: WSI, magnification: float = 40.0):
969
+ """Check if provided patched WSI is having the right settings
970
+
971
+ Args:
972
+ wsi (WSI): WSI to check
973
+ magnification (float, optional): Check magnification. Defaults to 40.0.
974
+
975
+ Raises:
976
+ RuntimeError: The magnification is not matching to the network input magnification.
977
+ RuntimeError: The patch-size is not devisible by 256.
978
+ RunTimeError: The patch-size is not 1024
979
+ RunTimeError: The overlap is not 64px sized
980
+ """
981
+ if wsi.metadata["magnification"] is not None:
982
+ patch_magnification = float(wsi.metadata["magnification"])
983
+ else:
984
+ patch_magnification = float(
985
+ float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"]
986
+ )
987
+ patch_size = int(wsi.metadata["patch_size"])
988
+
989
+ if patch_magnification != magnification:
990
+ raise RuntimeError(
991
+ "The magnification is not matching to the network input magnification."
992
+ )
993
+ if (patch_size % 256) != 0:
994
+ raise RuntimeError("The patch-size must be devisible by 256.")
995
+ if wsi.metadata["patch_size"] != 1024:
996
+ raise RuntimeError("The patch-size must be 1024.")
997
+ if wsi.metadata["patch_overlap"] != 64:
998
+ raise RuntimeError("The patch-overlap must be 64")
999
+
1000
+
1001
+ if __name__ == "__main__":
1002
+ configuration_parser = InferenceWSIParser()
1003
+ configuration = configuration_parser.parse_arguments()
1004
+ command = configuration["command"]
1005
+
1006
+ cell_segmentation = CellSegmentationInference(
1007
+ model_path=configuration["model"],
1008
+ gpu=configuration["gpu"],
1009
+ enforce_mixed_precision=configuration["enforce_amp"],
1010
+ )
1011
+
1012
+ if command.lower() == "process_wsi":
1013
+ cell_segmentation.logger.info("Processing single WSI file")
1014
+ wsi_path = Path(configuration["wsi_path"])
1015
+ wsi_name = wsi_path.stem
1016
+ wsi_file = WSI(
1017
+ name=wsi_name,
1018
+ patient=wsi_name,
1019
+ slide_path=wsi_path,
1020
+ patched_slide_path=configuration["patched_slide_path"],
1021
+ )
1022
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
1023
+ cell_segmentation.process_wsi(
1024
+ wsi_file,
1025
+ subdir_name=configuration["outdir_subdir"],
1026
+ geojson=configuration["geojson"],
1027
+ batch_size=configuration["batch_size"],
1028
+ )
1029
+
1030
+ elif command.lower() == "process_dataset":
1031
+ cell_segmentation.logger.info("Processing whole dataset")
1032
+ if configuration["filelist"] is not None:
1033
+ if Path(configuration["filelist"]).suffix != ".csv":
1034
+ raise ValueError("Filelist must be a .csv file!")
1035
+ cell_segmentation.logger.info(
1036
+ f"Loading files from filelist {configuration['filelist']}"
1037
+ )
1038
+ wsi_filelist = load_wsi_files_from_csv(
1039
+ csv_path=configuration["filelist"],
1040
+ wsi_extension=configuration["wsi_extension"],
1041
+ )
1042
+ wsi_filelist = [
1043
+ Path(configuration["wsi_paths"]) / f
1044
+ if configuration["wsi_paths"] not in f
1045
+ else Path(f)
1046
+ for f in wsi_filelist
1047
+ ]
1048
+ else:
1049
+ cell_segmentation.logger.info(
1050
+ f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided."
1051
+ )
1052
+ wsi_filelist = [
1053
+ f
1054
+ for f in sorted(
1055
+ Path(configuration["wsi_paths"]).glob(
1056
+ f"**/*.{configuration['wsi_extension']}"
1057
+ )
1058
+ )
1059
+ ]
1060
+ for i, wsi_path in enumerate(wsi_filelist):
1061
+ wsi_path = Path(wsi_path)
1062
+ wsi_name = wsi_path.stem
1063
+ patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name
1064
+ cell_segmentation.logger.info(f"File {i+1}/{len(wsi_filelist)}: {wsi_name}")
1065
+ wsi_file = WSI(
1066
+ name=wsi_name,
1067
+ patient=wsi_name,
1068
+ slide_path=wsi_path,
1069
+ patched_slide_path=patched_slide_path,
1070
+ )
1071
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
1072
+ cell_segmentation.process_wsi(
1073
+ wsi_file,
1074
+ subdir_name=configuration["outdir_subdir"],
1075
+ geojson=configuration["geojson"],
1076
+ batch_size=configuration["batch_size"],
1077
+ )
cell_segmentation/inference/cell_detection_256.py ADDED
@@ -0,0 +1,1111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI
3
+ #
4
+ # Detect Cells with our Networks
5
+ # Patches dataset needs to have the follwoing requirements:
6
+ # Patch-Size must be 256, with overlap of 64
7
+ #
8
+ # We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py
9
+ #
10
+ # @ Fabian Hörst, [email protected]
11
+ # Institute for Artifical Intelligence in Medicine,
12
+ # University Medicine Essen
13
+
14
+ import inspect
15
+ import os
16
+ import sys
17
+
18
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
19
+ parentdir = os.path.dirname(currentdir)
20
+ sys.path.insert(0, parentdir)
21
+ parentdir = os.path.dirname(parentdir)
22
+ sys.path.insert(0, parentdir)
23
+
24
+ import argparse
25
+ import logging
26
+ import uuid
27
+ import warnings
28
+ from collections import deque
29
+ from pathlib import Path
30
+ from typing import List, Tuple, Union
31
+
32
+ import numpy as np
33
+ import pandas as pd
34
+ import torch
35
+ import torch.nn.functional as F
36
+ import tqdm
37
+ import ujson
38
+ from einops import rearrange
39
+ from pandarallel import pandarallel
40
+
41
+ # from PIL import Image
42
+ from shapely import strtree
43
+ from shapely.errors import ShapelyDeprecationWarning
44
+ from shapely.geometry import Polygon, MultiPolygon
45
+
46
+ # from skimage.color import rgba2rgb
47
+ from torch.utils.data import DataLoader
48
+ from torchvision import transforms as T
49
+
50
+ from cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI
51
+ from cell_segmentation.utils.template_geojson import (
52
+ get_template_point,
53
+ get_template_segmentation,
54
+ )
55
+ from datamodel.wsi_datamodel import WSI
56
+ from models.segmentation.cell_segmentation.cellvit import (
57
+ CellViT,
58
+ CellViT256,
59
+ CellViTSAM,
60
+ )
61
+ from models.segmentation.cell_segmentation.cellvit_shared import (
62
+ CellViT256Shared,
63
+ CellViTSAMShared,
64
+ CellViTShared,
65
+ )
66
+ from preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference
67
+ from utils.file_handling import load_wsi_files_from_csv
68
+ from utils.logger import Logger
69
+ from utils.tools import unflatten_dict
70
+
71
+ warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning)
72
+ pandarallel.initialize(progress_bar=False, nb_workers=12)
73
+
74
+
75
+ # color setup
76
+ COLOR_DICT = {
77
+ 1: [255, 0, 0],
78
+ 2: [34, 221, 77],
79
+ 3: [35, 92, 236],
80
+ 4: [254, 255, 0],
81
+ 5: [255, 159, 68],
82
+ }
83
+
84
+ TYPE_NUCLEI_DICT = {
85
+ 1: "Neoplastic",
86
+ 2: "Inflammatory",
87
+ 3: "Connective",
88
+ 4: "Dead",
89
+ 5: "Epithelial",
90
+ }
91
+
92
+
93
+ class CellSegmentationInference:
94
+ def __init__(
95
+ self,
96
+ model_path: Union[Path, str],
97
+ gpu: int,
98
+ enforce_mixed_precision: bool = False,
99
+ ) -> None:
100
+ """Cell Segmentation Inference class.
101
+
102
+ After setup, a WSI can be processed by calling process_wsi method
103
+
104
+ Args:
105
+ model_path (Union[Path, str]): Path to model checkpoint
106
+ gpu (int): CUDA GPU id to use
107
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
108
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
109
+ Defaults to False.
110
+ """
111
+ self.model_path = Path(model_path)
112
+ self.device = f"cuda:{gpu}"
113
+ self.__instantiate_logger()
114
+ self.__load_model()
115
+ self.__load_inference_transforms()
116
+ self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision)
117
+
118
+ def __instantiate_logger(self) -> None:
119
+ """Instantiate logger
120
+
121
+ Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log
122
+ """
123
+ logger = Logger(
124
+ level="INFO",
125
+ )
126
+ self.logger = logger.create_logger()
127
+
128
+ def __load_model(self) -> None:
129
+ """Load model and checkpoint and load the state_dict"""
130
+ self.logger.info(f"Loading model: {self.model_path}")
131
+
132
+ model_checkpoint = torch.load(self.model_path, map_location="cpu")
133
+
134
+ # unpack checkpoint
135
+ self.run_conf = unflatten_dict(model_checkpoint["config"], ".")
136
+ self.model = self.__get_model(model_type=model_checkpoint["arch"])
137
+ self.logger.info(
138
+ self.model.load_state_dict(model_checkpoint["model_state_dict"])
139
+ )
140
+ self.model.eval()
141
+ self.model.to(self.device)
142
+
143
+ def __get_model(
144
+ self, model_type: str
145
+ ) -> Union[
146
+ CellViT,
147
+ CellViTShared,
148
+ CellViT256,
149
+ CellViT256Shared,
150
+ CellViTSAM,
151
+ CellViTSAMShared,
152
+ ]:
153
+ """Return the trained model for inference
154
+
155
+ Args:
156
+ model_type (str): Name of the model. Must either be one of:
157
+ CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared
158
+
159
+ Returns:
160
+ Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model
161
+ """
162
+ implemented_models = [
163
+ "CellViT",
164
+ "CellViTShared",
165
+ "CellViT256",
166
+ "CellViT256Shared",
167
+ "CellViTSAM",
168
+ "CellViTSAMShared",
169
+ ]
170
+ if model_type not in implemented_models:
171
+ raise NotImplementedError(
172
+ f"Unknown model type. Please select one of {implemented_models}"
173
+ )
174
+ if model_type in ["CellViT", "CellViTShared"]:
175
+ if model_type == "CellViT":
176
+ model_class = CellViT
177
+ elif model_type == "CellViTShared":
178
+ model_class = CellViTShared
179
+ model = model_class(
180
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
181
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
182
+ embed_dim=self.run_conf["model"]["embed_dim"],
183
+ input_channels=self.run_conf["model"].get("input_channels", 3),
184
+ depth=self.run_conf["model"]["depth"],
185
+ num_heads=self.run_conf["model"]["num_heads"],
186
+ extract_layers=self.run_conf["model"]["extract_layers"],
187
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
188
+ )
189
+
190
+ elif model_type in ["CellViT256", "CellViT256Shared"]:
191
+ if model_type == "CellViT256":
192
+ model_class = CellViT256
193
+ elif model_type == "CellViTVIT256Shared":
194
+ model_class = CellViT256Shared
195
+ model = model_class(
196
+ model256_path=None,
197
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
198
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
199
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
200
+ )
201
+ elif model_type in ["CellViTSAM", "CellViTSAMShared"]:
202
+ if model_type == "CellViTSAM":
203
+ model_class = CellViTSAM
204
+ elif model_type == "CellViTSAMShared":
205
+ model_class = CellViTSAMShared
206
+ model = model_class(
207
+ model_path=None,
208
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
209
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
210
+ vit_structure=self.run_conf["model"]["backbone"],
211
+ regression_loss=self.run_conf["model"].get("regression_loss", False),
212
+ )
213
+ return model
214
+
215
+ def __load_inference_transforms(self):
216
+ """Load the inference transformations from the run_configuration"""
217
+ self.logger.info("Loading inference transformations")
218
+
219
+ transform_settings = self.run_conf["transformations"]
220
+ if "normalize" in transform_settings:
221
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
222
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
223
+ else:
224
+ mean = (0.5, 0.5, 0.5)
225
+ std = (0.5, 0.5, 0.5)
226
+ self.inference_transforms = T.Compose(
227
+ [T.ToTensor(), T.Normalize(mean=mean, std=std)]
228
+ )
229
+
230
+ def __setup_amp(self, enforce_mixed_precision: bool = False) -> None:
231
+ """Setup automated mixed precision (amp) for inference.
232
+
233
+ Args:
234
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
235
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
236
+ Defaults to False.
237
+ """
238
+ if enforce_mixed_precision:
239
+ self.mixed_precision = enforce_mixed_precision
240
+ else:
241
+ self.mixed_precision = self.run_conf["training"].get(
242
+ "mixed_precision", False
243
+ )
244
+
245
+ def process_wsi(
246
+ self,
247
+ wsi: WSI,
248
+ subdir_name: str = None,
249
+ patch_size: int = 256,
250
+ overlap: int = 64,
251
+ batch_size: int = 8,
252
+ geojson: bool = False,
253
+ ) -> None:
254
+ """Process WSI file
255
+
256
+ Args:
257
+ wsi (WSI): WSI object
258
+ subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder.
259
+ Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir).
260
+ patch_size (int, optional): Patch-Size. Default to 256.
261
+ overlap (int, optional): Overlap between patches. Defaults to 64.
262
+ batch_size (int, optional): Batch-size for inference. Defaults to 8.
263
+ geosjon (bool, optional): If a geojson export should be performed. Defaults to False.
264
+ """
265
+ self.logger.info(f"Processing WSI: {wsi.name}")
266
+
267
+ wsi_inference_dataset = PatchedWSIInference(
268
+ wsi, transform=self.inference_transforms
269
+ )
270
+
271
+ num_workers = int(3 / 4 * os.cpu_count())
272
+ if num_workers is None:
273
+ num_workers = 16
274
+ num_workers = int(np.clip(num_workers, 1, 2 * batch_size))
275
+
276
+ wsi_inference_dataloader = DataLoader(
277
+ dataset=wsi_inference_dataset,
278
+ batch_size=batch_size,
279
+ num_workers=num_workers,
280
+ shuffle=False,
281
+ collate_fn=wsi_inference_dataset.collate_batch,
282
+ pin_memory=False,
283
+ )
284
+ dataset_config = self.run_conf["dataset_config"]
285
+ nuclei_types = dataset_config["nuclei_types"]
286
+
287
+ if subdir_name is not None:
288
+ outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name
289
+ else:
290
+ outdir = Path(wsi.patched_slide_path) / "cell_detection"
291
+ outdir.mkdir(exist_ok=True, parents=True)
292
+
293
+ cell_dict_wsi = [] # for storing all cell information
294
+ cell_dict_detection = [] # for storing only the centroids
295
+
296
+ graph_data = {
297
+ "cell_tokens": [],
298
+ "positions": [],
299
+ "contours": [],
300
+ "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types},
301
+ }
302
+ processed_patches = []
303
+
304
+ with torch.no_grad():
305
+ for batch in tqdm.tqdm(
306
+ wsi_inference_dataloader, total=len(wsi_inference_dataloader)
307
+ ):
308
+ patches = batch[0].to(self.device)
309
+
310
+ metadata = batch[1]
311
+ if self.mixed_precision:
312
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
313
+ predictions = self.model.forward(patches, retrieve_tokens=True)
314
+ else:
315
+ predictions = self.model.forward(patches, retrieve_tokens=True)
316
+ # reshape, apply softmax to segmentation maps
317
+ # predictions = self.model.reshape_model_output(predictions_, self.device)
318
+ instance_types, tokens = self.get_cell_predictions_with_tokens(
319
+ predictions, magnification=wsi.metadata["magnification"]
320
+ )
321
+
322
+ # unpack each patch from batch
323
+ for idx, (patch_instance_types, patch_metadata) in enumerate(
324
+ zip(instance_types, metadata)
325
+ ):
326
+ # add global patch metadata
327
+ patch_cell_detection = {}
328
+ patch_cell_detection["patch_metadata"] = patch_metadata
329
+ patch_cell_detection["type_map"] = dataset_config["nuclei_types"]
330
+
331
+ processed_patches.append(
332
+ f"{patch_metadata['row']}_{patch_metadata['col']}"
333
+ )
334
+
335
+ # calculate coordinate on highest magnifications
336
+ # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"]
337
+ # patch_size = patch_metadata["wsi_metadata"]["patch_size"]
338
+ wsi_scaling_factor = wsi.metadata["downsampling"]
339
+ patch_size = wsi.metadata["patch_size"]
340
+ x_global = int(
341
+ patch_metadata["row"] * patch_size * wsi_scaling_factor
342
+ - (patch_metadata["row"] + 0.5) * overlap
343
+ )
344
+ y_global = int(
345
+ patch_metadata["col"] * patch_size * wsi_scaling_factor
346
+ - (patch_metadata["col"] + 0.5) * overlap
347
+ )
348
+
349
+ # extract cell information
350
+ for cell in patch_instance_types.values():
351
+ if cell["type"] == nuclei_types["Background"]:
352
+ continue
353
+ offset_global = np.array([x_global, y_global])
354
+ centroid_global = cell["centroid"] + np.flip(offset_global)
355
+ contour_global = cell["contour"] + np.flip(offset_global)
356
+ bbox_global = cell["bbox"] + offset_global
357
+ cell_dict = {
358
+ "bbox": bbox_global.tolist(),
359
+ "centroid": centroid_global.tolist(),
360
+ "contour": contour_global.tolist(),
361
+ "type_prob": cell["type_prob"],
362
+ "type": cell["type"],
363
+ "patch_coordinates": [
364
+ patch_metadata["row"],
365
+ patch_metadata["col"],
366
+ ],
367
+ "cell_status": get_cell_position_marging(
368
+ cell["bbox"], 256, 64
369
+ ),
370
+ "offset_global": offset_global.tolist()
371
+ # optional: Local positional information
372
+ # "bbox_local": cell["bbox"].tolist(),
373
+ # "centroid_local": cell["centroid"].tolist(),
374
+ # "contour_local": cell["contour"].tolist(),
375
+ }
376
+ cell_detection = {
377
+ "bbox": bbox_global.tolist(),
378
+ "centroid": centroid_global.tolist(),
379
+ "type": cell["type"],
380
+ }
381
+ if np.max(cell["bbox"]) == 256 or np.min(cell["bbox"]) == 0:
382
+ position = get_cell_position(cell["bbox"], 256)
383
+ cell_dict["edge_position"] = True
384
+ cell_dict["edge_information"] = {}
385
+ cell_dict["edge_information"]["position"] = position
386
+ cell_dict["edge_information"][
387
+ "edge_patches"
388
+ ] = get_edge_patch(
389
+ position, patch_metadata["row"], patch_metadata["col"]
390
+ )
391
+ else:
392
+ cell_dict["edge_position"] = False
393
+
394
+ cell_dict_wsi.append(cell_dict)
395
+ cell_dict_detection.append(cell_detection)
396
+
397
+ # get the cell token
398
+ bb_index = cell["bbox"] / self.model.patch_size
399
+ bb_index[0, :] = np.floor(bb_index[0, :])
400
+ bb_index[1, :] = np.ceil(bb_index[1, :])
401
+ bb_index = bb_index.astype(np.uint8)
402
+ cell_token = tokens[
403
+ idx,
404
+ bb_index[0, 1] : bb_index[1, 1],
405
+ bb_index[0, 0] : bb_index[1, 0],
406
+ :,
407
+ ]
408
+ cell_token = torch.mean(
409
+ rearrange(cell_token, "H W D -> (H W) D"), dim=0
410
+ )
411
+
412
+ graph_data["cell_tokens"].append(cell_token)
413
+ graph_data["positions"].append(torch.Tensor(centroid_global))
414
+ graph_data["contours"].append(torch.Tensor(contour_global))
415
+
416
+ # post processing
417
+ self.logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}")
418
+ keep_idx = self.post_process_edge_cells(cell_list=cell_dict_wsi)
419
+ cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx]
420
+ cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx]
421
+ graph_data["cell_tokens"] = [
422
+ graph_data["cell_tokens"][idx_c] for idx_c in keep_idx
423
+ ]
424
+ graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx]
425
+ graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx]
426
+ self.logger.info(f"Detected cells after cleaning: {len(keep_idx)}")
427
+
428
+ self.logger.info(
429
+ f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json"
430
+ )
431
+ cell_dict_wsi = {
432
+ "wsi_metadata": wsi.metadata,
433
+ "processed_patches": processed_patches,
434
+ "type_map": dataset_config["nuclei_types"],
435
+ "cells": cell_dict_wsi,
436
+ }
437
+ with open(str(outdir / "cells.json"), "w") as outfile:
438
+ ujson.dump(cell_dict_wsi, outfile, indent=2)
439
+ if geojson:
440
+ self.logger.info("Converting segmentation to geojson")
441
+ geojson_list = self.convert_geojson(cell_dict_wsi["cells"], True)
442
+ with open(str(str(outdir / "cells.geojson")), "w") as outfile:
443
+ ujson.dump(geojson_list, outfile, indent=2)
444
+
445
+ cell_dict_detection = {
446
+ "wsi_metadata": wsi.metadata,
447
+ "processed_patches": processed_patches,
448
+ "type_map": dataset_config["nuclei_types"],
449
+ "cells": cell_dict_detection,
450
+ }
451
+ with open(str(outdir / "cell_detection.json"), "w") as outfile:
452
+ ujson.dump(cell_dict_detection, outfile, indent=2)
453
+ if geojson:
454
+ self.logger.info("Converting detection to geojson")
455
+ geojson_list = self.convert_geojson(cell_dict_wsi["cells"], False)
456
+ with open(str(str(outdir / "cell_detection.geojson")), "w") as outfile:
457
+ ujson.dump(geojson_list, outfile, indent=2)
458
+
459
+ self.logger.info(
460
+ f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}"
461
+ )
462
+ graph = CellGraphDataWSI(
463
+ x=torch.stack(graph_data["cell_tokens"]),
464
+ positions=torch.stack(graph_data["positions"]),
465
+ contours=graph_data["contours"],
466
+ metadata=graph_data["metadata"],
467
+ )
468
+ torch.save(graph, outdir / "cells.pt")
469
+
470
+ cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"])
471
+ cell_stats = dict(cell_stats_df.value_counts("type"))
472
+ nuclei_types_inverse = {v: k for k, v in nuclei_types.items()}
473
+ verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()}
474
+ self.logger.info(f"Finished with cell detection for WSI {wsi.name}")
475
+ self.logger.info("Stats:")
476
+ self.logger.info(f"{verbose_stats}")
477
+
478
+ def get_cell_predictions_with_tokens(
479
+ self, predictions: dict, magnification: int = 40
480
+ ) -> Tuple[List[dict], torch.Tensor]:
481
+ """Take the raw predictions, apply softmax and calculate type instances
482
+
483
+ Args:
484
+ predictions (dict): Network predictions with tokens. Keys:
485
+ magnification (int, optional): WSI magnification. Defaults to 40.
486
+
487
+ Returns:
488
+ Tuple[List[dict], torch.Tensor]:
489
+ * List[dict]: List with a dictionary for each batch element with cell seg results
490
+ Contains bbox, contour, 2D-position, type and type_prob for each cell
491
+ * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim)
492
+ """
493
+ predictions["nuclei_binary_map"] = F.softmax(
494
+ predictions["nuclei_binary_map"], dim=1
495
+ ) # shape: (batch_size, 2, H, W)
496
+ predictions["nuclei_type_map"] = F.softmax(
497
+ predictions["nuclei_type_map"], dim=1
498
+ ) # shape: (batch_size, num_nuclei_classes, H, W)
499
+ # get the instance types
500
+ (
501
+ _,
502
+ instance_types,
503
+ ) = self.model.calculate_instance_map(predictions, magnification=magnification)
504
+
505
+ tokens = predictions["tokens"].to("cpu")
506
+
507
+ return instance_types, tokens
508
+
509
+ def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]:
510
+ """Use the CellPostProcessor to remove multiple cells and merge due to overlap
511
+
512
+ Args:
513
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
514
+ * bbox
515
+ * centroid
516
+ * contour
517
+ * type_prob
518
+ * type
519
+ * patch_coordinates
520
+ * cell_status
521
+ * offset_global
522
+
523
+ Returns:
524
+ List[int]: List with integers of cells that should be kept
525
+ """
526
+ cell_processor = CellPostProcessor(cell_list, self.logger)
527
+ cleaned_cells = cell_processor.post_process_cells()
528
+
529
+ return list(cleaned_cells.index.values)
530
+
531
+ def convert_geojson(
532
+ self, cell_list: list[dict], polygons: bool = False
533
+ ) -> List[dict]:
534
+ """Convert a list of cells to a geojson object
535
+
536
+ Either a segmentation object (polygon) or detection points are converted
537
+
538
+ Args:
539
+ cell_list (list[dict]): Cell list with dict entry for each cell.
540
+ Required keys for detection:
541
+ * type
542
+ * centroid
543
+ Required keys for segmentation:
544
+ * type
545
+ * contour
546
+ polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False.
547
+
548
+ Returns:
549
+ List[dict]: Geojson like list
550
+ """
551
+ if polygons:
552
+ cell_segmentation_df = pd.DataFrame(cell_list)
553
+ detected_types = sorted(cell_segmentation_df.type.unique())
554
+ geojson_placeholder = []
555
+ for cell_type in detected_types:
556
+ cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type]
557
+ contours = cells["contour"].to_list()
558
+ final_c = []
559
+ for c in contours:
560
+ c.append(c[0])
561
+ final_c.append([c])
562
+
563
+ cell_geojson_object = get_template_segmentation()
564
+ cell_geojson_object["id"] = str(uuid.uuid4())
565
+ cell_geojson_object["geometry"]["coordinates"] = final_c
566
+ cell_geojson_object["properties"]["classification"][
567
+ "name"
568
+ ] = TYPE_NUCLEI_DICT[cell_type]
569
+ cell_geojson_object["properties"]["classification"][
570
+ "color"
571
+ ] = COLOR_DICT[cell_type]
572
+ geojson_placeholder.append(cell_geojson_object)
573
+ else:
574
+ cell_detection_df = pd.DataFrame(cell_list)
575
+ detected_types = sorted(cell_detection_df.type.unique())
576
+ geojson_placeholder = []
577
+ for cell_type in detected_types:
578
+ cells = cell_detection_df[cell_detection_df["type"] == cell_type]
579
+ centroids = cells["centroid"].to_list()
580
+ cell_geojson_object = get_template_point()
581
+ cell_geojson_object["id"] = str(uuid.uuid4())
582
+ cell_geojson_object["geometry"]["coordinates"] = centroids
583
+ cell_geojson_object["properties"]["classification"][
584
+ "name"
585
+ ] = TYPE_NUCLEI_DICT[cell_type]
586
+ cell_geojson_object["properties"]["classification"][
587
+ "color"
588
+ ] = COLOR_DICT[cell_type]
589
+ geojson_placeholder.append(cell_geojson_object)
590
+ return geojson_placeholder
591
+
592
+
593
+ class CellPostProcessor:
594
+ def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None:
595
+ """POst-Processing a list of cells from one WSI
596
+
597
+ Args:
598
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
599
+ * bbox
600
+ * centroid
601
+ * contour
602
+ * type_prob
603
+ * type
604
+ * patch_coordinates
605
+ * cell_status
606
+ * offset_global
607
+ logger (logging.Logger): Logger
608
+ """
609
+ self.logger = logger
610
+ self.logger.info("Initializing Cell-Postprocessor")
611
+ self.cell_df = pd.DataFrame(cell_list)
612
+ self.cell_df = self.cell_df.parallel_apply(convert_coordinates, axis=1)
613
+
614
+ self.mid_cells = self.cell_df[
615
+ self.cell_df["cell_status"] == 0
616
+ ] # cells in the mid
617
+ self.cell_df_margin = self.cell_df[
618
+ self.cell_df["cell_status"] != 0
619
+ ] # cells either torching the border or margin
620
+
621
+ def post_process_cells(self) -> pd.DataFrame:
622
+ """Main Post-Processing coordinator, entry point
623
+
624
+ Returns:
625
+ pd.DataFrame: DataFrame with post-processed and cleaned cells
626
+ """
627
+ self.logger.info("Finding edge-cells for merging")
628
+ cleaned_edge_cells = self._clean_edge_cells()
629
+ self.logger.info("Removal of cells detected multiple times")
630
+ cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells)
631
+
632
+ # merge with mid cells
633
+ postprocessed_cells = pd.concat(
634
+ [self.mid_cells, cleaned_edge_cells]
635
+ ).sort_index()
636
+ return postprocessed_cells
637
+
638
+ def _clean_edge_cells(self) -> pd.DataFrame:
639
+ """Create a DataFrame that just contains all margin cells (cells inside the margin, not touching the border)
640
+ and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour)
641
+
642
+ Returns:
643
+ pd.DataFrame: Cleaned DataFrame
644
+ """
645
+
646
+ margin_cells = self.cell_df_margin[
647
+ self.cell_df_margin["edge_position"] == 0
648
+ ] # cells at the margin, but not touching the border
649
+ edge_cells = self.cell_df_margin[
650
+ self.cell_df_margin["edge_position"] == 1
651
+ ] # cells touching the border
652
+ existing_patches = list(set(self.cell_df_margin["patch_coordinates"].to_list()))
653
+
654
+ edge_cells_unique = pd.DataFrame(
655
+ columns=self.cell_df_margin.columns
656
+ ) # cells torching the border without having an overlap from other patches
657
+
658
+ for idx, cell_info in edge_cells.iterrows():
659
+ edge_information = dict(cell_info["edge_information"])
660
+ edge_patch = edge_information["edge_patches"][0]
661
+ edge_patch = f"{edge_patch[0]}_{edge_patch[1]}"
662
+ if edge_patch not in existing_patches:
663
+ edge_cells_unique.loc[idx, :] = cell_info
664
+
665
+ cleaned_edge_cells = pd.concat([margin_cells, edge_cells_unique])
666
+
667
+ return cleaned_edge_cells.sort_index()
668
+
669
+ def _remove_overlap(self, cleaned_edge_cells: pd.DataFrame) -> pd.DataFrame:
670
+ """Remove overlapping cells from provided DataFrame
671
+
672
+ Args:
673
+ cleaned_edge_cells (pd.DataFrame): DataFrame that should be cleaned
674
+
675
+ Returns:
676
+ pd.DataFrame: Cleaned DataFrame
677
+ """
678
+ merged_cells = cleaned_edge_cells
679
+
680
+ for iteration in range(20):
681
+ poly_list = []
682
+ for idx, cell_info in merged_cells.iterrows():
683
+ poly = Polygon(cell_info["contour"])
684
+ if not poly.is_valid:
685
+ self.logger.debug("Found invalid polygon - Fixing with buffer 0")
686
+ multi = poly.buffer(0)
687
+ if isinstance(multi, MultiPolygon):
688
+ if len(multi) > 1:
689
+ poly_idx = np.argmax([p.area for p in multi])
690
+ poly = multi[poly_idx]
691
+ poly = Polygon(poly)
692
+ else:
693
+ poly = multi[0]
694
+ poly = Polygon(poly)
695
+ else:
696
+ poly = Polygon(multi)
697
+ poly.uid = idx
698
+ poly_list.append(poly)
699
+
700
+ # use an strtree for fast querying
701
+ tree = strtree.STRtree(poly_list)
702
+
703
+ merged_idx = deque()
704
+ iterated_cells = set()
705
+ overlaps = 0
706
+
707
+ for query_poly in poly_list:
708
+ if query_poly.uid not in iterated_cells:
709
+ intersected_polygons = tree.query(
710
+ query_poly
711
+ ) # this also contains a self-intersection
712
+ if (
713
+ len(intersected_polygons) > 1
714
+ ): # we have more at least one intersection with another cell
715
+ submergers = [] # all cells that overlap with query
716
+ for inter_poly in intersected_polygons:
717
+ if (
718
+ inter_poly.uid != query_poly.uid
719
+ and inter_poly.uid not in iterated_cells
720
+ ):
721
+ if (
722
+ query_poly.intersection(inter_poly).area
723
+ / query_poly.area
724
+ > 0.01
725
+ or query_poly.intersection(inter_poly).area
726
+ / inter_poly.area
727
+ > 0.01
728
+ ):
729
+ overlaps = overlaps + 1
730
+ submergers.append(inter_poly)
731
+ iterated_cells.add(inter_poly.uid)
732
+ # catch block: empty list -> some cells are touching, but not overlapping strongly enough
733
+ if len(submergers) == 0:
734
+ merged_idx.append(query_poly.uid)
735
+ else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented
736
+ selected_poly_index = np.argmax(
737
+ np.array([p.area for p in submergers])
738
+ )
739
+ selected_poly_uid = submergers[selected_poly_index].uid
740
+ merged_idx.append(selected_poly_uid)
741
+ else:
742
+ # no intersection, just add
743
+ merged_idx.append(query_poly.uid)
744
+ iterated_cells.add(query_poly.uid)
745
+
746
+ self.logger.info(
747
+ f"Iteration {iteration}: Found overlap of # cells: {overlaps}"
748
+ )
749
+ if overlaps == 0:
750
+ self.logger.info("Found all overlapping cells")
751
+ break
752
+ elif iteration == 20:
753
+ self.logger.info(
754
+ f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations."
755
+ )
756
+ merged_cells = cleaned_edge_cells.loc[
757
+ cleaned_edge_cells.index.isin(merged_idx)
758
+ ].sort_index()
759
+
760
+ return merged_cells.sort_index()
761
+
762
+
763
+ def convert_coordinates(row: pd.Series) -> pd.Series:
764
+ """Convert a row from x,y type to one string representation of the patch position for fast querying
765
+ Repr: x_y
766
+
767
+ Args:
768
+ row (pd.Series): Row to be processed
769
+
770
+ Returns:
771
+ pd.Series: Processed Row
772
+ """
773
+ x, y = row["patch_coordinates"]
774
+ row["patch_row"] = x
775
+ row["patch_col"] = y
776
+ row["patch_coordinates"] = f"{x}_{y}"
777
+ return row
778
+
779
+
780
+ def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]:
781
+ """Get cell position as a list
782
+
783
+ Entry is 1, if cell touches the border: [top, right, down, left]
784
+
785
+ Args:
786
+ bbox (np.ndarray): Bounding-Box of cell
787
+ patch_size (int, optional): Patch-size. Defaults to 1024.
788
+
789
+ Returns:
790
+ List[int]: List with 4 integers for each position
791
+ """
792
+ # bbox = 2x2 array in h, w style
793
+ # bbox[0,0] = upper position (height)
794
+ # bbox[1,0] = lower dimension (height)
795
+ # boox[0,1] = left position (width)
796
+ # bbox[1,1] = right position (width)
797
+ # bbox[:,0] -> x dimensions
798
+ top, left, down, right = False, False, False, False
799
+ if bbox[0, 0] == 0:
800
+ top = True
801
+ if bbox[0, 1] == 0:
802
+ left = True
803
+ if bbox[1, 0] == patch_size:
804
+ down = True
805
+ if bbox[1, 1] == patch_size:
806
+ right = True
807
+ position = [top, right, down, left]
808
+ position = [int(pos) for pos in position]
809
+
810
+ return position
811
+
812
+
813
+ def get_cell_position_marging(
814
+ bbox: np.ndarray, patch_size: int = 1024, margin: int = 64
815
+ ) -> int:
816
+ """Get the status of the cell, describing the cell position
817
+
818
+ A cell is either in the mid (0) or at one of the borders (1-8)
819
+
820
+ # Numbers are assigned clockwise, starting from top left
821
+ # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8
822
+ # Mid status is denoted by 0
823
+
824
+ Args:
825
+ bbox (np.ndarray): Bounding Box of cell
826
+ patch_size (int, optional): Patch-Size. Defaults to 1024.
827
+ margin (int, optional): Margin-Size. Defaults to 64.
828
+
829
+ Returns:
830
+ int: Cell Status
831
+ """
832
+ cell_status = None
833
+ if np.max(bbox) > patch_size - margin or np.min(bbox) < margin:
834
+ if bbox[0, 0] < margin:
835
+ # top left, top or top right
836
+ if bbox[0, 1] < margin:
837
+ # top left
838
+ cell_status = 1
839
+ elif bbox[1, 1] > patch_size - margin:
840
+ # top right
841
+ cell_status = 3
842
+ else:
843
+ # top
844
+ cell_status = 2
845
+ elif bbox[1, 1] > patch_size - margin:
846
+ # top right, right or bottom right
847
+ if bbox[1, 0] > patch_size - margin:
848
+ # bottom right
849
+ cell_status = 5
850
+ else:
851
+ # right
852
+ cell_status = 4
853
+ elif bbox[1, 0] > patch_size - margin:
854
+ # bottom right, bottom, bottom left
855
+ if bbox[0, 1] < margin:
856
+ # bottom left
857
+ cell_status = 7
858
+ else:
859
+ # bottom
860
+ cell_status = 6
861
+ elif bbox[0, 1] < margin:
862
+ # bottom left, left, top left, but only left is left
863
+ cell_status = 8
864
+ else:
865
+ cell_status = 0
866
+
867
+ return cell_status
868
+
869
+
870
+ def get_edge_patch(position, row, col):
871
+ # row starting on bottom or on top?
872
+ if position == [1, 0, 0, 0]:
873
+ # top
874
+ return [[row - 1, col]]
875
+ if position == [1, 1, 0, 0]:
876
+ # top and right
877
+ return [[row - 1, col], [row - 1, col + 1], [row, col + 1]]
878
+ if position == [0, 1, 0, 0]:
879
+ # right
880
+ return [[row, col + 1]]
881
+ if position == [0, 1, 1, 0]:
882
+ # right and down
883
+ return [[row, col + 1], [row + 1, col + 1], [row + 1, col]]
884
+ if position == [0, 0, 1, 0]:
885
+ # down
886
+ return [[row + 1, col]]
887
+ if position == [0, 0, 1, 1]:
888
+ # down and left
889
+ return [[row + 1, col], [row + 1, col - 1], [row, col - 1]]
890
+ if position == [0, 0, 0, 1]:
891
+ # left
892
+ return [[row, col - 1]]
893
+ if position == [1, 0, 0, 1]:
894
+ # left and top
895
+ return [[row, col - 1], [row - 1, col - 1], [row - 1, col]]
896
+
897
+
898
+ # CLI
899
+ class InferenceWSIParser:
900
+ """Parser"""
901
+
902
+ def __init__(self) -> None:
903
+ parser = argparse.ArgumentParser(
904
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
905
+ description="Perform CellViT inference for given run-directory with model checkpoints and logs. Just for CellViT, not for StarDist models",
906
+ )
907
+ requiredNamed = parser.add_argument_group("required named arguments")
908
+ requiredNamed.add_argument(
909
+ "--model",
910
+ type=str,
911
+ help="Model checkpoint file that is used for inference",
912
+ required=True,
913
+ )
914
+ parser.add_argument(
915
+ "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0
916
+ )
917
+ parser.add_argument(
918
+ "--magnification",
919
+ type=float,
920
+ help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40",
921
+ default=40,
922
+ )
923
+ parser.add_argument(
924
+ "--enforce_amp",
925
+ action="store_true",
926
+ help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used."
927
+ " Default: False",
928
+ )
929
+ parser.add_argument(
930
+ "--batch_size",
931
+ type=int,
932
+ help="Inference batch-size. Default: 8",
933
+ default=8,
934
+ )
935
+ parser.add_argument(
936
+ "--outdir_subdir",
937
+ type=str,
938
+ help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None",
939
+ default=None,
940
+ )
941
+ parser.add_argument(
942
+ "--geojson",
943
+ action="store_true",
944
+ help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.",
945
+ )
946
+
947
+ # subparsers for either loading a WSI or a WSI folder
948
+
949
+ # WSI
950
+ subparsers = parser.add_subparsers(
951
+ dest="command",
952
+ description="Main run command for either performing inference on single WSI-file or on whole dataset",
953
+ )
954
+ subparser_wsi = subparsers.add_parser(
955
+ "process_wsi", description="Process a single WSI file"
956
+ )
957
+ subparser_wsi.add_argument(
958
+ "--wsi_path",
959
+ type=str,
960
+ help="Path to WSI file",
961
+ )
962
+ subparser_wsi.add_argument(
963
+ "--patched_slide_path",
964
+ type=str,
965
+ help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)",
966
+ )
967
+
968
+ # Dataset
969
+ subparser_dataset = subparsers.add_parser(
970
+ "process_dataset",
971
+ description="Process a whole dataset",
972
+ )
973
+ subparser_dataset.add_argument(
974
+ "--wsi_paths", type=str, help="Path to the folder where all WSI are stored"
975
+ )
976
+ subparser_dataset.add_argument(
977
+ "--patch_dataset_path",
978
+ type=str,
979
+ help="Path to the folder where the patch dataset is stored",
980
+ )
981
+ subparser_dataset.add_argument(
982
+ "--filelist",
983
+ type=str,
984
+ help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')."
985
+ "If not provided, all WSI files with given ending in the filelist are processed.",
986
+ default=None,
987
+ )
988
+ subparser_dataset.add_argument(
989
+ "--wsi_extension",
990
+ type=str,
991
+ help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)",
992
+ default="svs",
993
+ )
994
+
995
+ self.parser = parser
996
+
997
+ def parse_arguments(self) -> dict:
998
+ opt = self.parser.parse_args()
999
+ return vars(opt)
1000
+
1001
+
1002
+ def check_wsi(wsi: WSI, magnification: float = 40.0):
1003
+ """Check if provided patched WSI is having the right settings
1004
+
1005
+ Args:
1006
+ wsi (WSI): WSI to check
1007
+ magnification (float, optional): Check magnification. Defaults to 40.0.
1008
+
1009
+ Raises:
1010
+ RuntimeError: The magnification is not matching to the network input magnification.
1011
+ RuntimeError: The patch-size is not devisible by 256.
1012
+ RunTimeError: The patch-size is not 256
1013
+ RunTimeError: The overlap is not 64px sized
1014
+ """
1015
+ if wsi.metadata["magnification"] is not None:
1016
+ patch_magnification = float(wsi.metadata["magnification"])
1017
+ else:
1018
+ patch_magnification = float(
1019
+ float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"]
1020
+ )
1021
+ patch_size = int(wsi.metadata["patch_size"])
1022
+
1023
+ if patch_magnification != magnification:
1024
+ raise RuntimeError(
1025
+ "The magnification is not matching to the network input magnification."
1026
+ )
1027
+ if (patch_size % 256) != 0:
1028
+ raise RuntimeError("The patch-size must be devisible by 256.")
1029
+ if wsi.metadata["patch_size"] != 256:
1030
+ raise RuntimeError("The patch-size must be 256.")
1031
+ if wsi.metadata["patch_overlap"] != 64:
1032
+ raise RuntimeError("The patch-overlap must be 64")
1033
+
1034
+
1035
+ if __name__ == "__main__":
1036
+ configuration_parser = InferenceWSIParser()
1037
+ configuration = configuration_parser.parse_arguments()
1038
+ command = configuration["command"]
1039
+
1040
+ cell_segmentation = CellSegmentationInference(
1041
+ model_path=configuration["model"],
1042
+ gpu=configuration["gpu"],
1043
+ enforce_mixed_precision=configuration["enforce_amp"],
1044
+ )
1045
+
1046
+ if command.lower() == "process_wsi":
1047
+ cell_segmentation.logger.info("Processing single WSI file")
1048
+ wsi_path = Path(configuration["wsi_path"])
1049
+ wsi_name = wsi_path.stem
1050
+ wsi_file = WSI(
1051
+ name=wsi_name,
1052
+ patient=wsi_name,
1053
+ slide_path=wsi_path,
1054
+ patched_slide_path=configuration["patched_slide_path"],
1055
+ )
1056
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
1057
+ cell_segmentation.process_wsi(
1058
+ wsi_file,
1059
+ subdir_name=configuration["outdir_subdir"],
1060
+ geojson=configuration["geojson"],
1061
+ batch_size=configuration["batch_size"],
1062
+ )
1063
+
1064
+ elif command.lower() == "process_dataset":
1065
+ cell_segmentation.logger.info("Processing whole dataset")
1066
+ if configuration["filelist"] is not None:
1067
+ if Path(configuration["filelist"]).suffix != ".csv":
1068
+ raise ValueError("Filelist must be a .csv file!")
1069
+ cell_segmentation.logger.info(
1070
+ f"Loading files from filelist {configuration['filelist']}"
1071
+ )
1072
+ wsi_filelist = load_wsi_files_from_csv(
1073
+ csv_path=configuration["filelist"],
1074
+ wsi_extension=configuration["wsi_extension"],
1075
+ )
1076
+ wsi_filelist = [
1077
+ Path(configuration["wsi_paths"]) / f
1078
+ if configuration["wsi_paths"] not in f
1079
+ else Path(f)
1080
+ for f in wsi_filelist
1081
+ ]
1082
+ else:
1083
+ cell_segmentation.logger.info(
1084
+ f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided."
1085
+ )
1086
+ wsi_filelist = [
1087
+ f
1088
+ for f in sorted(
1089
+ Path(configuration["wsi_paths"]).glob(
1090
+ f"**/*.{configuration['wsi_extension']}"
1091
+ )
1092
+ )
1093
+ ]
1094
+ for i, wsi_path in enumerate(wsi_filelist):
1095
+ wsi_path = Path(wsi_path)
1096
+ wsi_name = wsi_path.stem
1097
+ patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name
1098
+ cell_segmentation.logger.info(f"File {i+1}/{len(wsi_filelist)}: {wsi_name}")
1099
+ wsi_file = WSI(
1100
+ name=wsi_name,
1101
+ patient=wsi_name,
1102
+ slide_path=wsi_path,
1103
+ patched_slide_path=patched_slide_path,
1104
+ )
1105
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
1106
+ cell_segmentation.process_wsi(
1107
+ wsi_file,
1108
+ subdir_name=configuration["outdir_subdir"],
1109
+ geojson=configuration["geojson"],
1110
+ batch_size=configuration["batch_size"],
1111
+ )
cell_segmentation/inference/cell_detection_mp.py ADDED
@@ -0,0 +1,1527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Inference Method for Patch-Wise Inference on a patches test set/Whole WSI
3
+ #
4
+ # Detect Cells with our Networks
5
+ # Patches dataset needs to have the follwoing requirements:
6
+ # Patch-Size must be 1024, with overlap of 64
7
+ #
8
+ # We provide preprocessing code here: ./preprocessing/patch_extraction/main_extraction.py
9
+ #
10
+ # @ Fabian Hörst, [email protected]
11
+ # Institute for Artifical Intelligence in Medicine,
12
+ # University Medicine Essen
13
+ # @ Erik Ylipää, [email protected]
14
+ # Linköping University
15
+ # Luleå, Sweden
16
+
17
+
18
+ from dataclasses import dataclass
19
+ from functools import partial
20
+ import inspect
21
+ from io import BytesIO
22
+ import os
23
+ import queue
24
+ import sys
25
+ import multiprocessing
26
+ from multiprocessing.pool import ThreadPool
27
+ import zipfile
28
+ from time import sleep
29
+
30
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
31
+ parentdir = os.path.dirname(currentdir)
32
+ sys.path.insert(0, parentdir)
33
+ parentdir = os.path.dirname(parentdir)
34
+ sys.path.insert(0, parentdir)
35
+
36
+ from cellvit.cell_segmentation.utils.post_proc import DetectionCellPostProcessor
37
+
38
+
39
+ import argparse
40
+ import logging
41
+ import uuid
42
+ import warnings
43
+ from collections import defaultdict, deque
44
+ from pathlib import Path
45
+ from typing import Dict, List, Literal, OrderedDict, Tuple, Union, Callable
46
+
47
+ import numpy as np
48
+ import pandas as pd
49
+ import torch
50
+ import torch.nn.functional as F
51
+ import tqdm
52
+ import ujson
53
+ from einops import rearrange
54
+
55
+ # from PIL import Image
56
+ from shapely import strtree
57
+ from shapely.errors import ShapelyDeprecationWarning
58
+ from shapely.geometry import Polygon, MultiPolygon
59
+
60
+
61
+ # from skimage.color import rgba2rgb
62
+ from torch.utils.data import DataLoader, Dataset
63
+ from torchvision import transforms as T
64
+ #from torch.profiler import profile, record_function, ProfilerActivity
65
+
66
+ from cellvit.cell_segmentation.datasets.cell_graph_datamodel import CellGraphDataWSI
67
+ from cellvit.cell_segmentation.utils.template_geojson import (
68
+ get_template_point,
69
+ get_template_segmentation,
70
+ )
71
+ from cellvit.datamodel.wsi_datamodel import WSI
72
+ from cellvit.models.segmentation.cell_segmentation.cellvit import (
73
+ CellViT,
74
+ CellViT256,
75
+ CellViT256Unshared,
76
+ CellViTSAM,
77
+ CellViTSAMUnshared,
78
+ CellViTUnshared,
79
+ )
80
+ from cellvit.preprocessing.encoding.datasets.patched_wsi_inference import PatchedWSIInference
81
+ from cellvit.utils.file_handling import load_wsi_files_from_csv
82
+ from cellvit.utils.logger import Logger
83
+ from cellvit.utils.tools import unflatten_dict
84
+
85
+ warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning)
86
+ #pandarallel.initialize(progress_bar=False, nb_workers=12)
87
+
88
+
89
+
90
+ # color setup
91
+ COLOR_DICT = {
92
+ 1: [255, 0, 0],
93
+ 2: [34, 221, 77],
94
+ 3: [35, 92, 236],
95
+ 4: [254, 255, 0],
96
+ 5: [255, 159, 68],
97
+ }
98
+
99
+ TYPE_NUCLEI_DICT = {
100
+ 1: "Neoplastic",
101
+ 2: "Inflammatory",
102
+ 3: "Connective",
103
+ 4: "Dead",
104
+ 5: "Epithelial",
105
+ }
106
+
107
+ # This file will be used to indicate that a image has been processed
108
+ FLAG_FILE_NAME = ".cell_detection_done"
109
+
110
+ def load_wsi(wsi_path, overwrite=False):
111
+ try:
112
+ wsi_name = wsi_path.stem
113
+ patched_slide_path = Path(configuration["patch_dataset_path"]) / wsi_name
114
+ flag_file_path = patched_slide_path / "cell_detection" / FLAG_FILE_NAME
115
+ if not overwrite and flag_file_path.exists():
116
+ return
117
+ wsi_file = WSI(
118
+ name=wsi_name,
119
+ patient=wsi_name,
120
+ slide_path=wsi_path,
121
+ patched_slide_path=patched_slide_path,
122
+ )
123
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
124
+ return wsi_file
125
+ except BaseException as e:
126
+ e.wsi_file = wsi_path
127
+ return e
128
+
129
+
130
+ class InferenceWSIDataset(Dataset):
131
+ def __init__(self, wsi_filelist, n_workers: int = 0, overwrite=False, transform: Callable = None):
132
+ self.wsi_files = []
133
+
134
+ # This index will contain a repeat of all the wsi objects the number of
135
+ # patches they have. This means that it will be as long as the total number
136
+ # of patches in all WSI files. One can simply get the desired patch by
137
+ # subscripting into this list to get the correct WSI file object and
138
+ # pertinent metadata
139
+ self.wsi_index = []
140
+ self.transform = transform
141
+
142
+ pb = tqdm.trange(len(wsi_filelist), desc='Loading WSI file list')
143
+ already_processed_files = []
144
+ if n_workers > 0:
145
+ #Since this is mostly and IO-bound task, we use a thread pool
146
+ #with multiprocessing.Pool(n_workers) as pool:
147
+ with ThreadPool(n_workers) as pool:
148
+ load_wsi_partial = partial(load_wsi, overwrite=overwrite)
149
+ for wsi_file in pool.imap(load_wsi_partial, wsi_filelist):
150
+ if isinstance(wsi_file, BaseException):
151
+ logging.warn(f"Could not load file {wsi_file.wsi_file}, caught exception {str(wsi_file)}")
152
+ elif wsi_file is None:
153
+ already_processed_files.append(wsi_file)
154
+ else:
155
+ self.wsi_files.append(wsi_file)
156
+ n_patches = wsi_file.get_number_patches()
157
+ indexing_info = [(wsi_file, i) for i in range(n_patches)]
158
+ self.wsi_index.extend(indexing_info)
159
+ pb.update()
160
+ else:
161
+ for wsi_file_path in wsi_filelist:
162
+ wsi_file = load_wsi(wsi_file_path, overwrite)
163
+ if isinstance(wsi_file, BaseException):
164
+ logging.warn(f"Could not load file {wsi_file.wsi_file}, caught exception {str(wsi_file)}")
165
+ elif wsi_file is None:
166
+ already_processed_files.append(wsi_file)
167
+ else:
168
+ self.wsi_files.append(wsi_file)
169
+ n_patches = wsi_file.get_number_patches()
170
+ indexing_info = [(wsi_file, i) for i in range(n_patches)]
171
+ self.wsi_index.extend(indexing_info)
172
+ pb.update()
173
+
174
+
175
+ def __len__(self):
176
+ return len(self.wsi_index)
177
+
178
+ def __getitem__(self, item):
179
+ wsi_file, local_idx = self.wsi_index[item]
180
+ patch, metadata = wsi_file.get_patch(local_idx, self.transform)
181
+ return patch, local_idx, wsi_file, metadata
182
+
183
+ def get_n_files(self):
184
+ return len(self.wsi_files)
185
+
186
+
187
+ def wsi_patch_collator(batch):
188
+ patches, local_idx, wsi_file, metadata = zip(*batch) # Transpose the batch
189
+ patches = torch.stack(patches)
190
+ return patches, local_idx, wsi_file, metadata
191
+
192
+
193
+ def f_post_processing_worker(wsi_file, wsi_work_list, postprocess_arguments):
194
+ local_idxs, predictions_records, metadata = zip(*wsi_work_list)
195
+ # Merge the prediction records into a single dictionary again.
196
+ predictions = defaultdict(list)
197
+ for record in predictions_records:
198
+ for k,v in record.items():
199
+ predictions[k].append(v)
200
+ predictions_stacked = {k: torch.stack(v).to(torch.float32) for k,v in predictions.items()}
201
+ postprocess_predictions(predictions_stacked, metadata, wsi_file, postprocess_arguments)
202
+
203
+
204
+ @dataclass
205
+ class PostprocessArguments:
206
+ n_images: int
207
+ num_nuclei_classes: int
208
+ dataset_config: Dict
209
+ overlap: int
210
+ patch_size: int
211
+ geojson: bool
212
+ subdir_name: str
213
+ logger: Logger
214
+ n_workers: int = 0
215
+ wait_time: float = 2.
216
+
217
+
218
+ def postprocess_predictions(predictions, metadata, wsi, postprocessing_args: PostprocessArguments):
219
+ # logger = postprocessing_args.logger
220
+ logger = logging.getLogger()
221
+ num_nuclei_classes = postprocessing_args.num_nuclei_classes
222
+ dataset_config = postprocessing_args.dataset_config
223
+ overlap = postprocessing_args.overlap
224
+ patch_size = postprocessing_args.patch_size
225
+ geojson = postprocessing_args.geojson
226
+ subdir_name = postprocessing_args.subdir_name
227
+
228
+ if subdir_name is not None:
229
+ outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name
230
+ else:
231
+ outdir = Path(wsi.patched_slide_path) / "cell_detection"
232
+ outdir.mkdir(exist_ok=True, parents=True)
233
+
234
+ outfile = outdir / "cell_detection.zip"
235
+
236
+ instance_types, tokens = get_cell_predictions_with_tokens(num_nuclei_classes,
237
+ predictions, magnification=wsi.metadata["magnification"]
238
+ )
239
+
240
+ processed_patches = []
241
+ # unpack each patch from batch
242
+ cell_dict_wsi = [] # for storing all cell information
243
+ cell_dict_detection = [] # for storing only the centroids
244
+ nuclei_types = dataset_config["nuclei_types"]
245
+
246
+ graph_data = {
247
+ "cell_tokens": [],
248
+ "positions": [],
249
+ "contours": [],
250
+ "metadata": {"wsi_metadata": wsi.metadata, "nuclei_types": nuclei_types},
251
+ }
252
+
253
+ for idx, (patch_instance_types, patch_metadata) in enumerate(
254
+ zip(instance_types, metadata)
255
+ ):
256
+ # add global patch metadata
257
+ patch_cell_detection = {}
258
+ patch_cell_detection["patch_metadata"] = patch_metadata
259
+ patch_cell_detection["type_map"] = dataset_config["nuclei_types"]
260
+
261
+ processed_patches.append(
262
+ f"{patch_metadata['row']}_{patch_metadata['col']}"
263
+ )
264
+
265
+ # calculate coordinate on highest magnifications
266
+ # wsi_scaling_factor = patch_metadata["wsi_metadata"]["downsampling"]
267
+ # patch_size = patch_metadata["wsi_metadata"]["patch_size"]
268
+ wsi_scaling_factor = wsi.metadata["downsampling"]
269
+ patch_size = wsi.metadata["patch_size"]
270
+ x_global = int(
271
+ patch_metadata["row"] * patch_size * wsi_scaling_factor
272
+ - (patch_metadata["row"] + 0.5) * overlap
273
+ )
274
+ y_global = int(
275
+ patch_metadata["col"] * patch_size * wsi_scaling_factor
276
+ - (patch_metadata["col"] + 0.5) * overlap
277
+ )
278
+
279
+ # extract cell information
280
+ for cell in patch_instance_types.values():
281
+ if cell["type"] == nuclei_types["Background"]:
282
+ continue
283
+ offset_global = np.array([x_global, y_global])
284
+ centroid_global = cell["centroid"] + np.flip(offset_global)
285
+ contour_global = cell["contour"] + np.flip(offset_global)
286
+ bbox_global = cell["bbox"] + offset_global
287
+ cell_dict = {
288
+ "bbox": bbox_global.tolist(),
289
+ "centroid": centroid_global.tolist(),
290
+ "contour": contour_global.tolist(),
291
+ "type_prob": cell["type_prob"],
292
+ "type": cell["type"],
293
+ "patch_coordinates": [
294
+ patch_metadata["row"],
295
+ patch_metadata["col"],
296
+ ],
297
+ "cell_status": get_cell_position_marging(
298
+ cell["bbox"], 1024, 64
299
+ ),
300
+ "offset_global": offset_global.tolist()
301
+ # optional: Local positional information
302
+ # "bbox_local": cell["bbox"].tolist(),
303
+ # "centroid_local": cell["centroid"].tolist(),
304
+ # "contour_local": cell["contour"].tolist(),
305
+ }
306
+ cell_detection = {
307
+ "bbox": bbox_global.tolist(),
308
+ "centroid": centroid_global.tolist(),
309
+ "type": cell["type"],
310
+ }
311
+ if np.max(cell["bbox"]) == 1024 or np.min(cell["bbox"]) == 0:
312
+ position = get_cell_position(cell["bbox"], 1024)
313
+ cell_dict["edge_position"] = True
314
+ cell_dict["edge_information"] = {}
315
+ cell_dict["edge_information"]["position"] = position
316
+ cell_dict["edge_information"][
317
+ "edge_patches"
318
+ ] = get_edge_patch(
319
+ position, patch_metadata["row"], patch_metadata["col"]
320
+ )
321
+ else:
322
+ cell_dict["edge_position"] = False
323
+
324
+ cell_dict_wsi.append(cell_dict)
325
+ cell_dict_detection.append(cell_detection)
326
+
327
+ # get the cell token
328
+ bb_index = cell["bbox"] / patch_size
329
+ bb_index[0, :] = np.floor(bb_index[0, :])
330
+ bb_index[1, :] = np.ceil(bb_index[1, :])
331
+ bb_index = bb_index.astype(np.uint8)
332
+ cell_token = tokens[
333
+ idx,
334
+ bb_index[0, 1] : bb_index[1, 1],
335
+ bb_index[0, 0] : bb_index[1, 0],
336
+ :,
337
+ ]
338
+ cell_token = torch.mean(
339
+ rearrange(cell_token, "H W D -> (H W) D"), dim=0
340
+ )
341
+
342
+ graph_data["cell_tokens"].append(cell_token)
343
+ graph_data["positions"].append(torch.Tensor(centroid_global))
344
+ graph_data["contours"].append(torch.Tensor(contour_global))
345
+
346
+ # post processing
347
+ logger.info(f"Detected cells before cleaning: {len(cell_dict_wsi)}")
348
+ keep_idx = post_process_edge_cells(cell_list=cell_dict_wsi, logger=logger)
349
+ cell_dict_wsi = [cell_dict_wsi[idx_c] for idx_c in keep_idx]
350
+ cell_dict_detection = [cell_dict_detection[idx_c] for idx_c in keep_idx]
351
+ graph_data["cell_tokens"] = [
352
+ graph_data["cell_tokens"][idx_c] for idx_c in keep_idx
353
+ ]
354
+ graph_data["positions"] = [graph_data["positions"][idx_c] for idx_c in keep_idx]
355
+ graph_data["contours"] = [graph_data["contours"][idx_c] for idx_c in keep_idx]
356
+ logger.info(f"Detected cells after cleaning: {len(keep_idx)}")
357
+
358
+ logger.info(
359
+ f"Processed all patches. Storing final results: {str(outdir / f'cells.json')} and cell_detection.json"
360
+ )
361
+ cell_dict_wsi = {
362
+ "wsi_metadata": wsi.metadata,
363
+ "processed_patches": processed_patches,
364
+ "type_map": dataset_config["nuclei_types"],
365
+ "cells": cell_dict_wsi,
366
+ }
367
+
368
+ with zipfile.ZipFile(outfile, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=9) as zf:
369
+ zf.writestr("cells.json", ujson.dumps(cell_dict_wsi, outfile, indent=2))
370
+
371
+ if geojson:
372
+ logger.info("Converting segmentation to geojson")
373
+
374
+ geojson_list = convert_geojson(cell_dict_wsi["cells"], True)
375
+ zf.writestr("cells.geojson", ujson.dumps(geojson_list, outfile, indent=2))
376
+
377
+ cell_dict_detection = {
378
+ "wsi_metadata": wsi.metadata,
379
+ "processed_patches": processed_patches,
380
+ "type_map": dataset_config["nuclei_types"],
381
+ "cells": cell_dict_detection,
382
+ }
383
+ zf.writestr("cell_detection.json", ujson.dumps(cell_dict_detection, outfile, indent=2))
384
+ if geojson:
385
+ logger.info("Converting detection to geojson")
386
+ geojson_list = convert_geojson(cell_dict_wsi["cells"], False)
387
+ zf.writestr("cell_detection.geojson", ujson.dumps(geojson_list, outfile, indent=2))
388
+
389
+ logger.info(
390
+ f"Create cell graph with embeddings and save it under: {str(outdir / 'cells.pt')}"
391
+ )
392
+ graph = CellGraphDataWSI(
393
+ x=torch.stack(graph_data["cell_tokens"]),
394
+ positions=torch.stack(graph_data["positions"]),
395
+ contours=graph_data["contours"],
396
+ metadata=graph_data["metadata"],
397
+ )
398
+ torch_bytes_io = BytesIO()
399
+ #torch.save(graph, outdir / "cells.pt")
400
+ torch.save(graph, torch_bytes_io)
401
+ zf.writestr("cells.pt", torch_bytes_io.getvalue())
402
+
403
+ flag_file = outdir / FLAG_FILE_NAME
404
+ flag_file.touch()
405
+
406
+ cell_stats_df = pd.DataFrame(cell_dict_wsi["cells"])
407
+ cell_stats = dict(cell_stats_df.value_counts("type"))
408
+ nuclei_types_inverse = {v: k for k, v in nuclei_types.items()}
409
+ verbose_stats = {nuclei_types_inverse[k]: v for k, v in cell_stats.items()}
410
+ logger.info(f"Finished with cell detection for WSI {wsi.name}")
411
+ logger.info("Stats:")
412
+ logger.info(f"{verbose_stats}")
413
+
414
+
415
+ def post_process_edge_cells(cell_list: List[dict], logger) -> List[int]:
416
+ """Use the CellPostProcessor to remove multiple cells and merge due to overlap
417
+
418
+ Args:
419
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
420
+ * bbox
421
+ * centroid
422
+ * contour
423
+ * type_prob
424
+ * type
425
+ * patch_coordinates
426
+ * cell_status
427
+ * offset_global
428
+
429
+ Returns:
430
+ List[int]: List with integers of cells that should be kept
431
+ """
432
+ cell_processor = CellPostProcessor(cell_list, logger)
433
+ cleaned_cells_idx = cell_processor.post_process_cells()
434
+
435
+ return sorted(cell_record["index"] for cell_record in cleaned_cells_idx)
436
+
437
+
438
+ def convert_geojson(cell_list: list[dict], polygons: bool = False) -> List[dict]:
439
+ """Convert a list of cells to a geojson object
440
+
441
+ Either a segmentation object (polygon) or detection points are converted
442
+
443
+ Args:
444
+ cell_list (list[dict]): Cell list with dict entry for each cell.
445
+ Required keys for detection:
446
+ * type
447
+ * centroid
448
+ Required keys for segmentation:
449
+ * type
450
+ * contour
451
+ polygons (bool, optional): If polygon segmentations (True) or detection points (False). Defaults to False.
452
+
453
+ Returns:
454
+ List[dict]: Geojson like list
455
+ """
456
+ if polygons:
457
+ cell_segmentation_df = pd.DataFrame(cell_list)
458
+ detected_types = sorted(cell_segmentation_df.type.unique())
459
+ geojson_placeholder = []
460
+ for cell_type in detected_types:
461
+ cells = cell_segmentation_df[cell_segmentation_df["type"] == cell_type]
462
+ contours = cells["contour"].to_list()
463
+ final_c = []
464
+ for c in contours:
465
+ c.append(c[0])
466
+ final_c.append([c])
467
+
468
+ cell_geojson_object = get_template_segmentation()
469
+ cell_geojson_object["id"] = str(uuid.uuid4())
470
+ cell_geojson_object["geometry"]["coordinates"] = final_c
471
+ cell_geojson_object["properties"]["classification"][
472
+ "name"
473
+ ] = TYPE_NUCLEI_DICT[cell_type]
474
+ cell_geojson_object["properties"]["classification"][
475
+ "color"
476
+ ] = COLOR_DICT[cell_type]
477
+ geojson_placeholder.append(cell_geojson_object)
478
+ else:
479
+ cell_detection_df = pd.DataFrame(cell_list)
480
+ detected_types = sorted(cell_detection_df.type.unique())
481
+ geojson_placeholder = []
482
+ for cell_type in detected_types:
483
+ cells = cell_detection_df[cell_detection_df["type"] == cell_type]
484
+ centroids = cells["centroid"].to_list()
485
+ cell_geojson_object = get_template_point()
486
+ cell_geojson_object["id"] = str(uuid.uuid4())
487
+ cell_geojson_object["geometry"]["coordinates"] = centroids
488
+ cell_geojson_object["properties"]["classification"][
489
+ "name"
490
+ ] = TYPE_NUCLEI_DICT[cell_type]
491
+ cell_geojson_object["properties"]["classification"][
492
+ "color"
493
+ ] = COLOR_DICT[cell_type]
494
+ geojson_placeholder.append(cell_geojson_object)
495
+ return geojson_placeholder
496
+
497
+
498
+ def calculate_instance_map(num_nuclei_classes: int, predictions: OrderedDict, magnification: Literal[20, 40] = 40
499
+ ) -> Tuple[torch.Tensor, List[dict]]:
500
+ """Calculate Instance Map from network predictions (after Softmax output)
501
+
502
+ Args:
503
+ predictions (dict): Dictionary with the following required keys:
504
+ * nuclei_binary_map: Binary Nucleus Predictions. Shape: (batch_size, H, W, 2)
505
+ * nuclei_type_map: Type prediction of nuclei. Shape: (batch_size, H, W, 6)
506
+ * hv_map: Horizontal-Vertical nuclei mapping. Shape: (batch_size, H, W, 2)
507
+ magnification (Literal[20, 40], optional): Which magnification the data has. Defaults to 40.
508
+
509
+ Returns:
510
+ Tuple[torch.Tensor, List[dict]]:
511
+ * torch.Tensor: Instance map. Each Instance has own integer. Shape: (batch_size, H, W)
512
+ * List of dictionaries. Each List entry is one image. Each dict contains another dict for each detected nucleus.
513
+ For each nucleus, the following information are returned: "bbox", "centroid", "contour", "type_prob", "type"
514
+ """
515
+ cell_post_processor = DetectionCellPostProcessor(nr_types=num_nuclei_classes, magnification=magnification, gt=False)
516
+ instance_preds = []
517
+ type_preds = []
518
+ max_nuclei_type_predictions = predictions["nuclei_type_map"].argmax(dim=-1, keepdims=True).detach()
519
+ max_nuclei_type_predictions = max_nuclei_type_predictions.cpu() # This is a costly operation because this map is rather large
520
+ max_nuclei_location_predictions = predictions["nuclei_binary_map"].argmax(dim=-1, keepdims=True).detach().cpu()
521
+
522
+ for i in range(predictions["nuclei_binary_map"].shape[0]):
523
+ # Broke this out to profile better
524
+ pred_map = np.concatenate(
525
+ [
526
+ max_nuclei_type_predictions[i],
527
+ max_nuclei_location_predictions[i],
528
+ predictions["hv_map"][i].detach().cpu(),
529
+ ],
530
+ axis=-1,
531
+ )
532
+ instance_pred = cell_post_processor.post_process_cell_segmentation(pred_map)
533
+ instance_preds.append(instance_pred[0])
534
+ type_preds.append(instance_pred[1])
535
+
536
+ return torch.Tensor(np.stack(instance_preds)), type_preds
537
+
538
+
539
+ def get_cell_predictions_with_tokens(num_nuclei_classes: int,
540
+ predictions: dict, magnification: int = 40
541
+ ) -> Tuple[List[dict], torch.Tensor]:
542
+ """Take the raw predictions, apply softmax and calculate type instances
543
+
544
+ Args:
545
+ predictions (dict): Network predictions with tokens. Keys:
546
+ magnification (int, optional): WSI magnification. Defaults to 40.
547
+
548
+ Returns:
549
+ Tuple[List[dict], torch.Tensor]:
550
+ * List[dict]: List with a dictionary for each batch element with cell seg results
551
+ Contains bbox, contour, 2D-position, type and type_prob for each cell
552
+ * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim)
553
+ """
554
+ predictions["nuclei_binary_map"] = F.softmax(
555
+ predictions["nuclei_binary_map"], dim=-1
556
+ )
557
+ predictions["nuclei_type_map"] = F.softmax(
558
+ predictions["nuclei_type_map"], dim=-1
559
+ )
560
+
561
+ # get the instance types
562
+ (
563
+ _,
564
+ instance_types,
565
+ ) = calculate_instance_map(num_nuclei_classes, predictions, magnification=magnification)
566
+ # get the tokens
567
+ tokens = predictions["tokens"]
568
+
569
+ return instance_types, tokens
570
+
571
+
572
+ class CellSegmentationInference:
573
+ def __init__(
574
+ self,
575
+ model_path: Union[Path, str],
576
+ gpu: int,
577
+ enforce_mixed_precision: bool = False,
578
+ ) -> None:
579
+ """Cell Segmentation Inference class.
580
+
581
+ After setup, a WSI can be processed by calling process_wsi method
582
+
583
+ Args:
584
+ model_path (Union[Path, str]): Path to model checkpoint
585
+ gpu (int): CUDA GPU id to use
586
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
587
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
588
+ Defaults to False.
589
+ """
590
+ self.model_path = Path(model_path)
591
+ if gpu >= 0:
592
+ self.device = f"cuda:{gpu}"
593
+ else:
594
+ self.device = "cpu"
595
+ self.__instantiate_logger()
596
+ self.__load_model()
597
+ self.__load_inference_transforms()
598
+ self.__setup_amp(enforce_mixed_precision=enforce_mixed_precision)
599
+
600
+ def __instantiate_logger(self) -> None:
601
+ """Instantiate logger
602
+
603
+ Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log
604
+ """
605
+ logger = Logger(
606
+ level="INFO",
607
+ )
608
+ self.logger = logger.create_logger()
609
+
610
+ def __load_model(self) -> None:
611
+ """Load model and checkpoint and load the state_dict"""
612
+ self.logger.info(f"Loading model: {self.model_path}")
613
+
614
+ model_checkpoint = torch.load(self.model_path, map_location="cpu")
615
+
616
+ # unpack checkpoint
617
+ self.run_conf = unflatten_dict(model_checkpoint["config"], ".")
618
+ self.model = self.__get_model(model_type=model_checkpoint["arch"])
619
+ self.logger.info(
620
+ self.model.load_state_dict(model_checkpoint["model_state_dict"])
621
+ )
622
+
623
+ self.model.eval()
624
+ self.model.to(self.device)
625
+
626
+
627
+ def __get_model(
628
+ self, model_type: str
629
+ ) -> Union[
630
+ CellViT,
631
+ CellViTUnshared,
632
+ CellViT256,
633
+ CellViTUnshared,
634
+ CellViTSAM,
635
+ CellViTSAMUnshared,
636
+ ]:
637
+ """Return the trained model for inference
638
+
639
+ Args:
640
+ model_type (str): Name of the model. Must either be one of:
641
+ CellViT, CellViTUnshared, CellViT256, CellViT256Unshared, CellViTSAM, CellViTSAMUnshared
642
+
643
+ Returns:
644
+ Union[CellViT, CellViTUnshared, CellViT256, CellViT256Unshared, CellViTSAM, CellViTSAMUnshared]: Model
645
+ """
646
+ implemented_models = [
647
+ "CellViT",
648
+ "CellViTUnshared",
649
+ "CellViT256",
650
+ "CellViT256Unshared",
651
+ "CellViTSAM",
652
+ "CellViTSAMUnshared",
653
+ ]
654
+ if model_type not in implemented_models:
655
+ raise NotImplementedError(
656
+ f"Unknown model type. Please select one of {implemented_models}"
657
+ )
658
+ if model_type in ["CellViT", "CellViTUnshared"]:
659
+ if model_type == "CellViT":
660
+ model_class = CellViT
661
+ elif model_type == "CellViTUnshared":
662
+ model_class = CellViTUnshared
663
+ model = model_class(
664
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
665
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
666
+ embed_dim=self.run_conf["model"]["embed_dim"],
667
+ input_channels=self.run_conf["model"].get("input_channels", 3),
668
+ depth=self.run_conf["model"]["depth"],
669
+ num_heads=self.run_conf["model"]["num_heads"],
670
+ extract_layers=self.run_conf["model"]["extract_layers"],
671
+ )
672
+
673
+ elif model_type in ["CellViT256", "CellViT256Unshared"]:
674
+ if model_type == "CellViT256":
675
+ model_class = CellViT256
676
+ elif model_type == "CellViTVIT256Unshared":
677
+ model_class = CellViT256Unshared
678
+ model = model_class(
679
+ model256_path=None,
680
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
681
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
682
+ )
683
+ elif model_type in ["CellViTSAM", "CellViTSAMUnshared"]:
684
+ if model_type == "CellViTSAM":
685
+ model_class = CellViTSAM
686
+ elif model_type == "CellViTSAMUnshared":
687
+ model_class = CellViTSAMUnshared
688
+ model = model_class(
689
+ model_path=None,
690
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
691
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
692
+ vit_structure=self.run_conf["model"]["backbone"],
693
+ )
694
+ return model
695
+
696
+ def __load_inference_transforms(self):
697
+ """Load the inference transformations from the run_configuration"""
698
+ self.logger.info("Loading inference transformations")
699
+
700
+ transform_settings = self.run_conf["transformations"]
701
+ if "normalize" in transform_settings:
702
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
703
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
704
+ else:
705
+ mean = (0.5, 0.5, 0.5)
706
+ std = (0.5, 0.5, 0.5)
707
+ self.inference_transforms = T.Compose(
708
+ [T.ToTensor(), T.Normalize(mean=mean, std=std)]
709
+ )
710
+
711
+ def __setup_amp(self, enforce_mixed_precision: bool = False) -> None:
712
+ """Setup automated mixed precision (amp) for inference.
713
+
714
+ Args:
715
+ enforce_mixed_precision (bool, optional): Using PyTorch autocasting with dtype float16 to speed up inference. Also good for trained amp networks.
716
+ Can be used to enforce amp inference even for networks trained without amp. Otherwise, the network setting is used.
717
+ Defaults to False.
718
+ """
719
+ if enforce_mixed_precision:
720
+ self.mixed_precision = enforce_mixed_precision
721
+ else:
722
+ self.mixed_precision = self.run_conf["training"].get(
723
+ "mixed_precision", False
724
+ )
725
+
726
+ def process_wsi(
727
+ self,
728
+ wsi: WSI,
729
+ subdir_name: str = None,
730
+ patch_size: int = 1024,
731
+ overlap: int = 64,
732
+ batch_size: int = 8,
733
+ geojson: bool = False,
734
+ ) -> None:
735
+ """Process WSI file
736
+
737
+ Args:
738
+ wsi (WSI): WSI object
739
+ subdir_name (str, optional): If provided, a subdir with the given name is created in the cell_detection folder.
740
+ Helpful if you need to store different cell detection results next to each other. Defaults to None (no subdir).
741
+ patch_size (int, optional): Patch-Size. Default to 1024.
742
+ overlap (int, optional): Overlap between patches. Defaults to 64.
743
+ batch_size (int, optional): Batch-size for inference. Defaults to 8.
744
+ geosjon (bool, optional): If a geojson export should be performed. Defaults to False.
745
+ """
746
+ self.logger.info(f"Processing WSI: {wsi.name}")
747
+
748
+ wsi_inference_dataset = PatchedWSIInference(
749
+ wsi, transform=self.inference_transforms
750
+ )
751
+
752
+ num_workers = int(3 / 4 * os.cpu_count())
753
+ if num_workers is None:
754
+ num_workers = 16
755
+ num_workers = int(np.clip(num_workers, 1, 2 * batch_size))
756
+
757
+ wsi_inference_dataloader = DataLoader(
758
+ dataset=wsi_inference_dataset,
759
+ batch_size=batch_size,
760
+ num_workers=num_workers,
761
+ shuffle=False,
762
+ collate_fn=wsi_inference_dataset.collate_batch,
763
+ pin_memory=False,
764
+ )
765
+ dataset_config = self.run_conf["dataset_config"]
766
+ nuclei_types = dataset_config["nuclei_types"]
767
+
768
+ if subdir_name is not None:
769
+ outdir = Path(wsi.patched_slide_path) / "cell_detection" / subdir_name
770
+ else:
771
+ outdir = Path(wsi.patched_slide_path) / "cell_detection"
772
+ outdir.mkdir(exist_ok=True, parents=True)
773
+
774
+ predicted_batches = []
775
+ with torch.no_grad():
776
+ for batch in tqdm.tqdm(
777
+ wsi_inference_dataloader, total=len(wsi_inference_dataloader)
778
+ ):
779
+ patches = batch[0].to(self.device)
780
+
781
+ metadata = batch[1]
782
+ if self.mixed_precision:
783
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
784
+ predictions_ = self.model(patches, retrieve_tokens=True)
785
+ else:
786
+ predictions_ = self.model(patches, retrieve_tokens=True)
787
+ # reshape, apply softmax to segmentation maps
788
+ #predictions = self.model.reshape_model_output(predictions_, self.device)
789
+ predictions = self.model.reshape_model_output(predictions_, 'cpu')
790
+ predicted_batches.append((predictions, metadata))
791
+
792
+ postprocess_predictions(predicted_batches, self.model.num_nuclei_classes, wsi, self.logger, dataset_config, overlap, patch_size, geojson, outdir)
793
+
794
+ def process_wsi_filelist(self,
795
+ wsi_filelist,
796
+ subdir_name: str = None,
797
+ patch_size: int = 1024,
798
+ overlap: int = 64,
799
+ batch_size: int = 8,
800
+ torch_compile: bool = False,
801
+ geojson: bool = False,
802
+ n_postprocess_workers: int = 0,
803
+ n_dataloader_workers: int = 4,
804
+ overwrite: bool = False):
805
+ if torch_compile:
806
+ self.logger.info("Model will be compiled using torch.compile. First batch will take a lot more time to compute.")
807
+ self.model = torch.compile(self.model)
808
+
809
+ dataset = InferenceWSIDataset(wsi_filelist, transform=self.inference_transforms, overwrite=overwrite, n_workers=n_postprocess_workers)
810
+ self.logger.info(f"Loaded dataset with {dataset.get_n_files()} images")
811
+
812
+ dataloader = DataLoader(dataset, batch_size=batch_size, collate_fn=wsi_patch_collator, num_workers=n_dataloader_workers)
813
+ #with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:
814
+ post_process_arguments = PostprocessArguments(n_images=dataset.get_n_files(),
815
+ num_nuclei_classes=self.model.num_nuclei_classes,
816
+ dataset_config=self.run_conf['dataset_config'],
817
+ overlap=overlap,
818
+ patch_size=patch_size,
819
+ geojson=geojson,
820
+ subdir_name=subdir_name,
821
+ n_workers=n_postprocess_workers,
822
+ logger=self.logger)
823
+ if n_postprocess_workers > 0:
824
+ self._process_wsi_filelist_multiprocessing(dataloader,
825
+ post_process_arguments)
826
+ else:
827
+ self._process_wsi_filelist_singleprocessing(dataloader,
828
+ post_process_arguments)
829
+
830
+
831
+ #print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
832
+
833
+ def _process_wsi_filelist_singleprocessing(self,
834
+ dataloader,
835
+ post_process_arguments):
836
+ wsi_work_map = {}
837
+
838
+ with torch.no_grad():
839
+ try:
840
+ for batch in tqdm.tqdm(dataloader, desc="Processing patches"):
841
+ patches, local_idxs, wsi_files, metadatas = batch
842
+ patches = patches.to(self.device)
843
+
844
+ if self.mixed_precision:
845
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
846
+ predictions_ = self.model(patches, retrieve_tokens=True)
847
+ else:
848
+ predictions_ = self.model(patches, retrieve_tokens=True)
849
+ # reshape, apply softmax to segmentation maps
850
+ #predictions = self.model.reshape_model_output(predictions_, self.device)
851
+ predictions = self.model.reshape_model_output(predictions_, 'cpu')
852
+ # We break out the predictions into records (one dict per patch instead of all patches in one dict)
853
+ prediction_records = [{k: v[i] for k,v in predictions.items()} for i in range(len(local_idxs))]
854
+
855
+ for i, wsi_file in enumerate(wsi_files):
856
+ wsi_name = wsi_file.name
857
+ if wsi_name not in wsi_work_map:
858
+ wsi_work_map[wsi_name] = []
859
+ (wsi_work_list) = wsi_work_map[wsi_name]
860
+ work_package = (local_idxs[i], prediction_records[i], metadatas[i])
861
+ (wsi_work_list).append(work_package)
862
+ if len((wsi_work_list)) == wsi_file.get_number_patches():
863
+ local_idxs, predictions_records, metadata = zip(*wsi_work_list)
864
+ # Merge the prediction records into a single dictionary again.
865
+ predictions = defaultdict(list)
866
+ for record in predictions_records:
867
+ for k,v in record.items():
868
+ predictions[k].append(v)
869
+ predictions_stacked = {k: torch.stack(v).to(torch.float32) for k,v in predictions.items()}
870
+ postprocess_predictions(predictions_stacked, metadata, wsi_file, post_process_arguments)
871
+ del wsi_work_map[wsi_name]
872
+
873
+ except KeyboardInterrupt:
874
+ pass
875
+
876
+ def _process_wsi_filelist_multiprocessing(self,
877
+ dataloader,
878
+ post_process_arguments: PostprocessArguments):
879
+
880
+ pbar_batches = tqdm.trange(len(dataloader), desc="Processing patch-batches")
881
+ pbar_postprocessing = tqdm.trange(post_process_arguments.n_images, desc="Postprocessed images")
882
+
883
+ wsi_work_map = {}
884
+
885
+ with torch.no_grad():
886
+ with multiprocessing.Pool(post_process_arguments.n_workers) as pool:
887
+ try:
888
+ results = []
889
+
890
+ for batch in dataloader:
891
+ patches, local_idxs, wsi_files, metadatas = batch
892
+ patches = patches.to(self.device)
893
+
894
+ if self.mixed_precision:
895
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
896
+ predictions_ = self.model(patches, retrieve_tokens=True)
897
+ else:
898
+ predictions_ = self.model(patches, retrieve_tokens=True)
899
+ # reshape, apply softmax to segmentation maps
900
+ #predictions = self.model.reshape_model_output(predictions_, self.device)
901
+ predictions = self.model.reshape_model_output(predictions_, 'cpu')
902
+ pbar_batches.update()
903
+
904
+ # We break out the predictions into records (one dict per patch instead of all patches in one dict)
905
+ prediction_records = [{k: v[i] for k,v in predictions.items()} for i in range(len(local_idxs))]
906
+
907
+ for i, wsi_file in enumerate(wsi_files):
908
+ wsi_name = wsi_file.name
909
+ if wsi_name not in wsi_work_map:
910
+ wsi_work_map[wsi_name] = []
911
+ wsi_work_list = wsi_work_map[wsi_name]
912
+ work_package = (local_idxs[i], prediction_records[i], metadatas[i])
913
+ wsi_work_list.append(work_package)
914
+ if len((wsi_work_list)) == wsi_file.get_number_patches():
915
+ while len(results) >= post_process_arguments.n_workers:
916
+ n_working = len(results)
917
+ results = [result for result in results if not result.ready()]
918
+ n_done = n_working - len(results)
919
+ pbar_postprocessing.update(n_done)
920
+ pbar_batches.set_description(f"Processing patch-batches (waiting on postprocessing workers)")
921
+ sleep(post_process_arguments.wait_time)
922
+ result = pool.apply_async(f_post_processing_worker, (wsi_file, wsi_work_list, post_process_arguments))
923
+ pbar_batches.set_description(f"Processing patch-batches")
924
+ results.append(result)
925
+ del wsi_work_map[wsi_name]
926
+ self.logger.info("Model predictions done, waiting for postprocessing to finish.")
927
+ pool.close()
928
+ pool.join()
929
+ except KeyboardInterrupt:
930
+ pool.terminate()
931
+ pool.join()
932
+
933
+ def get_cell_predictions_with_tokens(
934
+ self, predictions: dict, magnification: int = 40
935
+ ) -> Tuple[List[dict], torch.Tensor]:
936
+ """Take the raw predictions, apply softmax and calculate type instances
937
+
938
+ Args:
939
+ predictions (dict): Network predictions with tokens. Keys:
940
+ magnification (int, optional): WSI magnification. Defaults to 40.
941
+
942
+ Returns:
943
+ Tuple[List[dict], torch.Tensor]:
944
+ * List[dict]: List with a dictionary for each batch element with cell seg results
945
+ Contains bbox, contour, 2D-position, type and type_prob for each cell
946
+ * List[dict]: Network tokens on cpu device with shape (batch_size, num_tokens_h, num_tokens_w, embd_dim)
947
+ """
948
+ predictions["nuclei_binary_map"] = F.softmax(
949
+ predictions["nuclei_binary_map"], dim=-1
950
+ )
951
+ predictions["nuclei_type_map"] = F.softmax(
952
+ predictions["nuclei_type_map"], dim=-1
953
+ )
954
+
955
+ # get the instance types
956
+ (
957
+ _,
958
+ instance_types,
959
+ ) = calculate_instance_map(self.model.num_nuclei_classes, predictions, magnification=magnification)
960
+ # get the tokens
961
+ tokens = predictions["tokens"].to("cpu")
962
+
963
+ return instance_types, tokens
964
+
965
+ def post_process_edge_cells(self, cell_list: List[dict]) -> List[int]:
966
+ """Use the CellPostProcessor to remove multiple cells and merge due to overlap
967
+
968
+ Args:
969
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
970
+ * bbox
971
+ * centroid
972
+ * contour
973
+ * type_prob
974
+ * type
975
+ * patch_coordinates
976
+ * cell_status
977
+ * offset_global
978
+
979
+ Returns:
980
+ List[int]: List with integers of cells that should be kept
981
+ """
982
+ cell_processor = CellPostProcessor(cell_list, self.logger)
983
+ cleaned_cells = cell_processor.post_process_cells()
984
+
985
+ return list(cleaned_cells.index.values)
986
+
987
+
988
+ class CellPostProcessor:
989
+ def __init__(self, cell_list: List[dict], logger: logging.Logger) -> None:
990
+ """POst-Processing a list of cells from one WSI
991
+
992
+ Args:
993
+ cell_list (List[dict]): List with cell-dictionaries. Required keys:
994
+ * bbox
995
+ * centroid
996
+ * contour
997
+ * type_prob
998
+ * type
999
+ * patch_coordinates
1000
+ * cell_status
1001
+ * offset_global
1002
+ logger (logging.Logger): Logger
1003
+ """
1004
+ self.logger = logger
1005
+ self.logger.info("Initializing Cell-Postprocessor")
1006
+
1007
+ for index, cell_dict in enumerate(cell_list):
1008
+ # TODO: Shouldn't it be the other way around? Column = x, Row = Y
1009
+ x,y = cell_dict["patch_coordinates"]
1010
+ cell_dict["patch_row"] = x
1011
+ cell_dict["patch_col"] = y
1012
+ cell_dict["patch_coordinates"] = f"{x}_{y}"
1013
+ cell_dict["index"] = index
1014
+
1015
+ #self.cell_df = pd.DataFrame(cell_list)
1016
+ self.cell_records = cell_list
1017
+
1018
+ #xs, ys = zip(*self.cell_df["patch_coordinates"])
1019
+
1020
+ #self.cell_df["patch_row"] = xs
1021
+ #self.cell_df["patch_col"] = ys
1022
+ #self.cell_df["patch_coordinates"] = [f"{x}_{y}" for x,y in zip(xs, ys)]
1023
+ # The call to DataFrame.apply below was exceedingly slow, the list comprehension above is _much_ faster
1024
+ #self.cell_df = self.cell_df.apply(convert_coordinates, axis=1)
1025
+ self.mid_cells = [cell_record for cell_record in self.cell_records if cell_record["cell_status"] == 0]
1026
+ self.margin_cells = [cell_record for cell_record in self.cell_records if cell_record["cell_status"] != 0]
1027
+
1028
+ def post_process_cells(self) -> List[Dict]:
1029
+ """Main Post-Processing coordinator, entry point
1030
+
1031
+ Returns:
1032
+ List[Dict]: List of records (dictionaries) with post-processed and cleaned cells
1033
+ """
1034
+ self.logger.info("Finding edge-cells for merging")
1035
+ cleaned_edge_cells = self._clean_edge_cells()
1036
+ self.logger.info("Removal of cells detected multiple times")
1037
+ cleaned_edge_cells = self._remove_overlap(cleaned_edge_cells)
1038
+
1039
+ # merge with mid cells
1040
+ postprocessed_cells = self.mid_cells + cleaned_edge_cells
1041
+
1042
+ return postprocessed_cells
1043
+
1044
+ def _clean_edge_cells(self) -> List[Dict]:
1045
+ """Create a record list that just contains all margin cells (cells inside the margin, not touching the border)
1046
+ and border/edge cells (touching border) with no overlapping equivalent (e.g, if patch has no neighbour)
1047
+
1048
+ Returns:
1049
+ List[Dict]: Cleaned record list
1050
+ """
1051
+
1052
+ margin_cells = [record for record in self.cell_records if record["edge_position"] == 0]
1053
+ edge_cells = [record for record in self.cell_records if record["edge_position"] == 1]
1054
+
1055
+ existing_patches = list(set(record["patch_coordinates"] for record in self.margin_cells))
1056
+
1057
+ edge_cells_unique = []
1058
+
1059
+ for record in edge_cells:
1060
+ edge_information = record["edge_information"]
1061
+ edge_patch = edge_information["edge_patches"][0]
1062
+ edge_patch = f"{edge_patch[0]}_{edge_patch[1]}"
1063
+ if edge_patch not in existing_patches:
1064
+ edge_cells_unique.append(record)
1065
+
1066
+ cleaned_edge_cells = margin_cells + edge_cells_unique
1067
+
1068
+ return cleaned_edge_cells
1069
+
1070
+ def _remove_overlap(self, cleaned_edge_cells: List[Dict]) -> List[Dict]:
1071
+ """Remove overlapping cells from provided cell record list
1072
+
1073
+ Args:
1074
+ cleaned_edge_cells (List[Dict]): List[Dict] that should be cleaned
1075
+
1076
+ Returns:
1077
+ List[Dict]: Cleaned cell records
1078
+ """
1079
+ merged_cells = cleaned_edge_cells
1080
+
1081
+ for iteration in range(20):
1082
+ poly_list = []
1083
+ for i, cell_info in enumerate(merged_cells):
1084
+ poly = Polygon(cell_info["contour"])
1085
+ if not poly.is_valid:
1086
+ self.logger.debug("Found invalid polygon - Fixing with buffer 0")
1087
+ multi = poly.buffer(0)
1088
+ if isinstance(multi, MultiPolygon):
1089
+ if len(multi) > 1:
1090
+ poly_idx = np.argmax([p.area for p in multi])
1091
+ poly = multi[poly_idx]
1092
+ poly = Polygon(poly)
1093
+ else:
1094
+ poly = multi[0]
1095
+ poly = Polygon(poly)
1096
+ else:
1097
+ poly = Polygon(multi)
1098
+ poly.uid = i
1099
+ poly_list.append(poly)
1100
+
1101
+ # use an strtree for fast querying
1102
+ tree = strtree.STRtree(poly_list)
1103
+
1104
+ merged_idx = deque()
1105
+ iterated_cells = set()
1106
+ overlaps = 0
1107
+
1108
+ for query_poly in poly_list:
1109
+ if query_poly.uid not in iterated_cells:
1110
+ intersected_polygons = tree.query(
1111
+ query_poly
1112
+ ) # this also contains a self-intersection
1113
+ if (
1114
+ len(intersected_polygons) > 1
1115
+ ): # we have more at least one intersection with another cell
1116
+ submergers = [] # all cells that overlap with query
1117
+ for inter_poly in intersected_polygons:
1118
+ if (
1119
+ inter_poly.uid != query_poly.uid
1120
+ and inter_poly.uid not in iterated_cells
1121
+ ):
1122
+ if (
1123
+ query_poly.intersection(inter_poly).area
1124
+ / query_poly.area
1125
+ > 0.01
1126
+ or query_poly.intersection(inter_poly).area
1127
+ / inter_poly.area
1128
+ > 0.01
1129
+ ):
1130
+ overlaps = overlaps + 1
1131
+ submergers.append(inter_poly)
1132
+ iterated_cells.add(inter_poly.uid)
1133
+ # catch block: empty list -> some cells are touching, but not overlapping strongly enough
1134
+ if len(submergers) == 0:
1135
+ merged_idx.append(query_poly.uid)
1136
+ else: # merging strategy: take the biggest cell, other merging strategies needs to get implemented
1137
+ selected_poly_index = np.argmax(
1138
+ np.array([p.area for p in submergers])
1139
+ )
1140
+ selected_poly_uid = submergers[selected_poly_index].uid
1141
+ merged_idx.append(selected_poly_uid)
1142
+ else:
1143
+ # no intersection, just add
1144
+ merged_idx.append(query_poly.uid)
1145
+ iterated_cells.add(query_poly.uid)
1146
+
1147
+ self.logger.info(
1148
+ f"Iteration {iteration}: Found overlap of # cells: {overlaps}"
1149
+ )
1150
+ if overlaps == 0:
1151
+ self.logger.info("Found all overlapping cells")
1152
+ break
1153
+ elif iteration == 20:
1154
+ self.logger.info(
1155
+ f"Not all doubled cells removed, still {overlaps} to remove. For perfomance issues, we stop iterations now. Please raise an issue in git or increase number of iterations."
1156
+ )
1157
+
1158
+ merged_cells = [cleaned_edge_cells[i] for i in merged_idx]
1159
+ return merged_cells
1160
+
1161
+
1162
+ def convert_coordinates(row: pd.Series) -> pd.Series:
1163
+ """Convert a row from x,y type to one string representation of the patch position for fast querying
1164
+ Repr: x_y
1165
+
1166
+ Args:
1167
+ row (pd.Series): Row to be processed
1168
+
1169
+ Returns:
1170
+ pd.Series: Processed Row
1171
+ """
1172
+ x, y = row["patch_coordinates"]
1173
+ row["patch_row"] = x
1174
+ row["patch_col"] = y
1175
+ row["patch_coordinates"] = f"{x}_{y}"
1176
+ return row
1177
+
1178
+
1179
+ def get_cell_position(bbox: np.ndarray, patch_size: int = 1024) -> List[int]:
1180
+ """Get cell position as a list
1181
+
1182
+ Entry is 1, if cell touches the border: [top, right, down, left]
1183
+
1184
+ Args:
1185
+ bbox (np.ndarray): Bounding-Box of cell
1186
+ patch_size (int, optional): Patch-size. Defaults to 1024.
1187
+
1188
+ Returns:
1189
+ List[int]: List with 4 integers for each position
1190
+ """
1191
+ # bbox = 2x2 array in h, w style
1192
+ # bbox[0,0] = upper position (height)
1193
+ # bbox[1,0] = lower dimension (height)
1194
+ # boox[0,1] = left position (width)
1195
+ # bbox[1,1] = right position (width)
1196
+ # bbox[:,0] -> x dimensions
1197
+ top, left, down, right = False, False, False, False
1198
+ if bbox[0, 0] == 0:
1199
+ top = True
1200
+ if bbox[0, 1] == 0:
1201
+ left = True
1202
+ if bbox[1, 0] == patch_size:
1203
+ down = True
1204
+ if bbox[1, 1] == patch_size:
1205
+ right = True
1206
+ position = [top, right, down, left]
1207
+ position = [int(pos) for pos in position]
1208
+
1209
+ return position
1210
+
1211
+
1212
+ def get_cell_position_marging(
1213
+ bbox: np.ndarray, patch_size: int = 1024, margin: int = 64
1214
+ ) -> int:
1215
+ """Get the status of the cell, describing the cell position
1216
+
1217
+ A cell is either in the mid (0) or at one of the borders (1-8)
1218
+
1219
+ # Numbers are assigned clockwise, starting from top left
1220
+ # i.e., top left = 1, top = 2, top right = 3, right = 4, bottom right = 5 bottom = 6, bottom left = 7, left = 8
1221
+ # Mid status is denoted by 0
1222
+
1223
+ Args:
1224
+ bbox (np.ndarray): Bounding Box of cell
1225
+ patch_size (int, optional): Patch-Size. Defaults to 1024.
1226
+ margin (int, optional): Margin-Size. Defaults to 64.
1227
+
1228
+ Returns:
1229
+ int: Cell Status
1230
+ """
1231
+ cell_status = None
1232
+ if np.max(bbox) > patch_size - margin or np.min(bbox) < margin:
1233
+ if bbox[0, 0] < margin:
1234
+ # top left, top or top right
1235
+ if bbox[0, 1] < margin:
1236
+ # top left
1237
+ cell_status = 1
1238
+ elif bbox[1, 1] > patch_size - margin:
1239
+ # top right
1240
+ cell_status = 3
1241
+ else:
1242
+ # top
1243
+ cell_status = 2
1244
+ elif bbox[1, 1] > patch_size - margin:
1245
+ # top right, right or bottom right
1246
+ if bbox[1, 0] > patch_size - margin:
1247
+ # bottom right
1248
+ cell_status = 5
1249
+ else:
1250
+ # right
1251
+ cell_status = 4
1252
+ elif bbox[1, 0] > patch_size - margin:
1253
+ # bottom right, bottom, bottom left
1254
+ if bbox[0, 1] < margin:
1255
+ # bottom left
1256
+ cell_status = 7
1257
+ else:
1258
+ # bottom
1259
+ cell_status = 6
1260
+ elif bbox[0, 1] < margin:
1261
+ # bottom left, left, top left, but only left is left
1262
+ cell_status = 8
1263
+ else:
1264
+ cell_status = 0
1265
+
1266
+ return cell_status
1267
+
1268
+
1269
+ def get_edge_patch(position, row, col):
1270
+ # row starting on bottom or on top?
1271
+ if position == [1, 0, 0, 0]:
1272
+ # top
1273
+ return [[row - 1, col]]
1274
+ if position == [1, 1, 0, 0]:
1275
+ # top and right
1276
+ return [[row - 1, col], [row - 1, col + 1], [row, col + 1]]
1277
+ if position == [0, 1, 0, 0]:
1278
+ # right
1279
+ return [[row, col + 1]]
1280
+ if position == [0, 1, 1, 0]:
1281
+ # right and down
1282
+ return [[row, col + 1], [row + 1, col + 1], [row + 1, col]]
1283
+ if position == [0, 0, 1, 0]:
1284
+ # down
1285
+ return [[row + 1, col]]
1286
+ if position == [0, 0, 1, 1]:
1287
+ # down and left
1288
+ return [[row + 1, col], [row + 1, col - 1], [row, col - 1]]
1289
+ if position == [0, 0, 0, 1]:
1290
+ # left
1291
+ return [[row, col - 1]]
1292
+ if position == [1, 0, 0, 1]:
1293
+ # left and top
1294
+ return [[row, col - 1], [row - 1, col - 1], [row - 1, col]]
1295
+
1296
+
1297
+ # CLI
1298
+ class InferenceWSIParser:
1299
+ """Parser"""
1300
+
1301
+ def __init__(self) -> None:
1302
+ parser = argparse.ArgumentParser(
1303
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
1304
+ description="Perform CellViT inference for given run-directory with model checkpoints and logs",
1305
+ )
1306
+ requiredNamed = parser.add_argument_group("required named arguments")
1307
+ requiredNamed.add_argument(
1308
+ "--model",
1309
+ type=str,
1310
+ help="Model checkpoint file that is used for inference",
1311
+ required=True,
1312
+ )
1313
+ parser.add_argument(
1314
+ "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0
1315
+ )
1316
+ parser.add_argument(
1317
+ "--magnification",
1318
+ type=float,
1319
+ help="Network magnification. Is used for checking patch magnification such that we use the correct resolution for network. Default: 40",
1320
+ default=40,
1321
+ )
1322
+ parser.add_argument(
1323
+ "--enforce_amp",
1324
+ action="store_true",
1325
+ help="Whether to use mixed precision for inference (enforced). Otherwise network default training settings are used."
1326
+ " Default: False",
1327
+ )
1328
+ parser.add_argument(
1329
+ "--torch_compile",
1330
+ action="store_true",
1331
+ help="Whether to use torch.compile to compile the model before inference. Has an large overhead for single predictions but leads to a significant speedup when predicting on multiple images."
1332
+ " Default: False",
1333
+ )
1334
+
1335
+ parser.add_argument(
1336
+ "--batch_size",
1337
+ type=int,
1338
+ help="Inference batch-size. Default: 8",
1339
+ default=8,
1340
+ )
1341
+
1342
+ parser.add_argument(
1343
+ "--n_postprocess_workers",
1344
+ type=int,
1345
+ help="Number of processes to dedicate to post processing. Set to 0 to disable multiprocessing for post processing. Default: 8",
1346
+ default=8,
1347
+ )
1348
+
1349
+ parser.add_argument(
1350
+ "--n_dataloader_workers",
1351
+ type=int,
1352
+ help="Number of workers to use for the pytorch patch dataloader. Default: 4",
1353
+ default=4,
1354
+ )
1355
+
1356
+ parser.add_argument(
1357
+ "--outdir_subdir",
1358
+ type=str,
1359
+ help="If provided, a subdir with the given name is created in the cell_detection folder where the results are stored. Default: None",
1360
+ default=None,
1361
+ )
1362
+ parser.add_argument(
1363
+ "--geojson",
1364
+ action="store_true",
1365
+ help="Set this flag to export results as additional geojson files for loading them into Software like QuPath.",
1366
+ )
1367
+
1368
+ parser.add_argument(
1369
+ "--overwrite",
1370
+ action="store_true",
1371
+ help=f"If set, include all found pre-processed files even if they include a \"{FLAG_FILE_NAME}\" file.",
1372
+ )
1373
+
1374
+ # subparsers for either loading a WSI or a WSI folder
1375
+
1376
+ # WSI
1377
+ subparsers = parser.add_subparsers(
1378
+ dest="command",
1379
+ description="Main run command for either performing inference on single WSI-file or on whole dataset",
1380
+ )
1381
+ subparser_wsi = subparsers.add_parser(
1382
+ "process_wsi", description="Process a single WSI file"
1383
+ )
1384
+ subparser_wsi.add_argument(
1385
+ "--wsi_path",
1386
+ type=str,
1387
+ help="Path to WSI file",
1388
+ )
1389
+ subparser_wsi.add_argument(
1390
+ "--patched_slide_path",
1391
+ type=str,
1392
+ help="Path to patched WSI file (specific WSI file, not parent path of patched slide dataset)",
1393
+ )
1394
+
1395
+ # Dataset
1396
+ subparser_dataset = subparsers.add_parser(
1397
+ "process_dataset",
1398
+ description="Process a whole dataset",
1399
+ )
1400
+ subparser_dataset.add_argument(
1401
+ "--wsi_paths", type=str, help="Path to the folder where all WSI are stored"
1402
+ )
1403
+ subparser_dataset.add_argument(
1404
+ "--patch_dataset_path",
1405
+ type=str,
1406
+ help="Path to the folder where the patch dataset is stored",
1407
+ )
1408
+ subparser_dataset.add_argument(
1409
+ "--filelist",
1410
+ type=str,
1411
+ help="Filelist with WSI to process. Must be a .csv file with one row denoting the filenames (named 'Filename')."
1412
+ "If not provided, all WSI files with given ending in the filelist are processed.",
1413
+ default=None,
1414
+ )
1415
+ subparser_dataset.add_argument(
1416
+ "--wsi_extension",
1417
+ type=str,
1418
+ help="The extension types used for the WSI files, see configs.python.config (WSI_EXT)",
1419
+ default="svs",
1420
+ )
1421
+
1422
+ self.parser = parser
1423
+
1424
+ def parse_arguments(self) -> dict:
1425
+ opt = self.parser.parse_args()
1426
+ return vars(opt)
1427
+
1428
+
1429
+ def check_wsi(wsi: WSI, magnification: float = 40.0):
1430
+ """Check if provided patched WSI is having the right settings
1431
+
1432
+ Args:
1433
+ wsi (WSI): WSI to check
1434
+ magnification (float, optional): Check magnification. Defaults to 40.0.
1435
+
1436
+ Raises:
1437
+ RuntimeError: The magnification is not matching to the network input magnification.
1438
+ RuntimeError: The patch-size is not devisible by 256.
1439
+ RunTimeError: The patch-size is not 1024
1440
+ RunTimeError: The overlap is not 64px sized
1441
+ """
1442
+ if wsi.metadata["magnification"] is not None:
1443
+ patch_magnification = float(wsi.metadata["magnification"])
1444
+ else:
1445
+ patch_magnification = float(
1446
+ float(wsi.metadata["base_magnification"]) / wsi.metadata["downsampling"]
1447
+ )
1448
+ patch_size = int(wsi.metadata["patch_size"])
1449
+
1450
+ if patch_magnification != magnification:
1451
+ raise RuntimeError(
1452
+ "The magnification is not matching to the network input magnification."
1453
+ )
1454
+ if (patch_size % 256) != 0:
1455
+ raise RuntimeError("The patch-size must be devisible by 256.")
1456
+ if wsi.metadata["patch_size"] != 1024:
1457
+ raise RuntimeError("The patch-size must be 1024.")
1458
+ if wsi.metadata["patch_overlap"] != 64:
1459
+ raise RuntimeError("The patch-overlap must be 64")
1460
+
1461
+
1462
+ if __name__ == "__main__":
1463
+ configuration_parser = InferenceWSIParser()
1464
+ configuration = configuration_parser.parse_arguments()
1465
+ command = configuration["command"]
1466
+
1467
+ cell_segmentation = CellSegmentationInference(
1468
+ model_path=configuration["model"],
1469
+ gpu=configuration["gpu"],
1470
+ enforce_mixed_precision=configuration["enforce_amp"],
1471
+ )
1472
+
1473
+ if command.lower() == "process_wsi":
1474
+ cell_segmentation.logger.info("Processing single WSI file")
1475
+ wsi_path = Path(configuration["wsi_path"])
1476
+ wsi_name = wsi_path.stem
1477
+ wsi_file = WSI(
1478
+ name=wsi_name,
1479
+ patient=wsi_name,
1480
+ slide_path=wsi_path,
1481
+ patched_slide_path=configuration["patched_slide_path"],
1482
+ )
1483
+ check_wsi(wsi=wsi_file, magnification=configuration["magnification"])
1484
+ cell_segmentation.process_wsi(
1485
+ wsi_file,
1486
+ subdir_name=configuration["outdir_subdir"],
1487
+ geojson=configuration["geojson"],
1488
+ batch_size=configuration["batch_size"],
1489
+ )
1490
+
1491
+ elif command.lower() == "process_dataset":
1492
+ cell_segmentation.logger.info("Processing whole dataset")
1493
+ if configuration["filelist"] is not None:
1494
+ if Path(configuration["filelist"]).suffix != ".csv":
1495
+ raise ValueError("Filelist must be a .csv file!")
1496
+ cell_segmentation.logger.info(
1497
+ f"Loading files from filelist {configuration['filelist']}"
1498
+ )
1499
+ wsi_filelist = load_wsi_files_from_csv(
1500
+ csv_path=configuration["filelist"],
1501
+ wsi_extension=configuration["wsi_extension"],
1502
+ )
1503
+ else:
1504
+ cell_segmentation.logger.info(
1505
+ f"Loading all files from folder {configuration['wsi_paths']}. No filelist provided."
1506
+ )
1507
+ wsi_filelist = [
1508
+ f
1509
+ for f in sorted(
1510
+ Path(configuration["wsi_paths"]).glob(
1511
+ f"**/*.{configuration['wsi_extension']}"
1512
+ )
1513
+ )
1514
+ ]
1515
+ #if not configuration["overwrite"]:
1516
+ # wsi_filelist = filter_processed_file(wsi_filelist)
1517
+
1518
+ cell_segmentation.process_wsi_filelist(
1519
+ wsi_filelist,
1520
+ subdir_name=configuration["outdir_subdir"],
1521
+ geojson=configuration["geojson"],
1522
+ batch_size=configuration["batch_size"],
1523
+ torch_compile=configuration["torch_compile"],
1524
+ n_postprocess_workers=configuration["n_postprocess_workers"],
1525
+ n_dataloader_workers=configuration["n_dataloader_workers"],
1526
+ overwrite=configuration["overwrite"]
1527
+ )
cell_segmentation/inference/inference_cellvit_experiment_monuseg.py ADDED
@@ -0,0 +1,1002 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Inference Method for Patch-Wise Inference on MoNuSeg dataset
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import argparse
9
+ import inspect
10
+ import os
11
+ import sys
12
+
13
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
14
+ parentdir = os.path.dirname(currentdir)
15
+ sys.path.insert(0, parentdir)
16
+ parentdir = os.path.dirname(parentdir)
17
+ sys.path.insert(0, parentdir)
18
+
19
+ from base_ml.base_experiment import BaseExperiment
20
+
21
+ BaseExperiment.seed_run(1232)
22
+
23
+ from pathlib import Path
24
+ from typing import List, Union, Tuple
25
+
26
+ import albumentations as A
27
+ import cv2 as cv2
28
+ import numpy as np
29
+ import torch
30
+ import torch.nn as nn
31
+ import torch.nn.functional as F
32
+ import tqdm
33
+ from einops import rearrange
34
+ from matplotlib import pyplot as plt
35
+ from PIL import Image, ImageDraw
36
+ from skimage.color import rgba2rgb
37
+ from torch.utils.data import DataLoader
38
+ from torchmetrics.functional import dice
39
+ from torchmetrics.functional.classification import binary_jaccard_index
40
+ from torchvision import transforms
41
+
42
+ from cell_segmentation.datasets.monuseg import MoNuSegDataset
43
+ from cell_segmentation.inference.cell_detection import (
44
+ CellPostProcessor,
45
+ get_cell_position,
46
+ get_cell_position_marging,
47
+ get_edge_patch,
48
+ )
49
+ from cell_segmentation.utils.metrics import (
50
+ cell_detection_scores,
51
+ get_fast_pq,
52
+ remap_label,
53
+ )
54
+ from cell_segmentation.utils.post_proc_cellvit import calculate_instances
55
+ from cell_segmentation.utils.tools import pair_coordinates
56
+ from models.segmentation.cell_segmentation.cellvit import CellViT
57
+
58
+ from utils.logger import Logger
59
+ from utils.tools import unflatten_dict
60
+
61
+
62
+ class MoNuSegInference:
63
+ def __init__(
64
+ self,
65
+ model_path: Union[Path, str],
66
+ dataset_path: Union[Path, str],
67
+ outdir: Union[Path, str],
68
+ gpu: int,
69
+ patching: bool = False,
70
+ overlap: int = 0,
71
+ magnification: int = 40,
72
+ ) -> None:
73
+ """Cell Segmentation Inference class for MoNuSeg dataset
74
+
75
+ Args:
76
+ model_path (Union[Path, str]): Path to model checkpoint
77
+ dataset_path (Union[Path, str]): Path to dataset
78
+ outdir (Union[Path, str]): Output directory
79
+ gpu (int): CUDA GPU id to use
80
+ patching (bool, optional): If dataset should be pacthed to 256px. Defaults to False.
81
+ overlap (int, optional): If overlap should be used. Recommed (next to no overlap) is 64 px. Overlap in px.
82
+ If overlap is used, patching must be True. Defaults to 0.
83
+ magnification (int, optional): Dataset magnification. Defaults to 40.
84
+ """
85
+ self.model_path = Path(model_path)
86
+ self.device = "cpu"
87
+ self.magnification = magnification
88
+ self.overlap = overlap
89
+ self.patching = patching
90
+ if overlap > 0:
91
+ assert patching, "Patching must be activated"
92
+
93
+ # self.__instantiate_logger()
94
+ self.__load_model()
95
+ self.__load_inference_transforms()
96
+ self.__setup_amp()
97
+ self.inference_dataset = MoNuSegDataset(
98
+ dataset_path=dataset_path,
99
+ transforms=self.inference_transforms,
100
+ patching=patching,
101
+ overlap=overlap,
102
+ )
103
+ self.inference_dataloader = DataLoader(
104
+ self.inference_dataset,
105
+ batch_size=1,
106
+ num_workers=8,
107
+ pin_memory=False,
108
+ shuffle=False,
109
+ )
110
+
111
+ def __instantiate_logger(self) -> None:
112
+ """Instantiate logger
113
+
114
+ Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log
115
+ """
116
+ logger = Logger(
117
+ level="INFO",
118
+ log_dir=self.outdir,
119
+ comment="inference_monuseg",
120
+ use_timestamp=False,
121
+ formatter="%(message)s",
122
+ )
123
+ self.logger = logger.create_logger()
124
+
125
+ def __load_model(self) -> None:
126
+ """Load model and checkpoint and load the state_dict"""
127
+ self.logger.info(f"Loading model: {self.model_path}")
128
+
129
+ model_checkpoint = torch.load(self.model_path, map_location="cpu")
130
+
131
+ # unpack checkpoint
132
+ self.run_conf = unflatten_dict(model_checkpoint["config"], ".")
133
+ self.model = self.__get_model(model_type=model_checkpoint["arch"])
134
+ self.logger.info(
135
+ self.model.load_state_dict(model_checkpoint["model_state_dict"])
136
+ )
137
+ self.model.eval()
138
+ self.model.to(self.device)
139
+
140
+ def __get_model(
141
+ self, model_type: str
142
+ ) -> Union[
143
+ CellViT,
144
+ ]:
145
+ """Return the trained model for inference
146
+
147
+ Args:
148
+ model_type (str): Name of the model. Must either be one of:
149
+ CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared
150
+
151
+ Returns:
152
+ Union[CellViT, CellViTShared, CellViT256, CellViTShared, CellViTSAM, CellViTSAMShared]: Model
153
+ """
154
+ implemented_models = [
155
+ "CellViT",
156
+ ]
157
+ if model_type not in implemented_models:
158
+ raise NotImplementedError(
159
+ f"Unknown model type. Please select one of {implemented_models}"
160
+ )
161
+
162
+ if model_type in ["CellViT", "CellViTShared"]:
163
+ if model_type == "CellViT":
164
+ model_class = CellViT
165
+ model = model_class(
166
+ model256_path=self.run_conf["model"].get("pretrained_encoder"),
167
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
168
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
169
+ #embed_dim=self.run_conf["model"]["embed_dim"],
170
+ in_channels=self.run_conf["model"].get("input_channels", 3),
171
+ #depth=self.run_conf["model"]["depth"],
172
+ #num_heads=self.run_conf["model"]["num_heads"],
173
+ #extract_layers=self.run_conf["model"]["extract_layers"],
174
+ #regression_loss=self.run_conf["model"].get("regression_loss", False),
175
+ )
176
+
177
+ return model
178
+
179
+ def __load_inference_transforms(self) -> None:
180
+ """Load the inference transformations from the run_configuration"""
181
+ self.logger.info("Loading inference transformations")
182
+
183
+ transform_settings = self.run_conf["transformations"]
184
+ if "normalize" in transform_settings:
185
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
186
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
187
+ else:
188
+ mean = (0.5, 0.5, 0.5)
189
+ std = (0.5, 0.5, 0.5)
190
+ self.inference_transforms = A.Compose([A.Normalize(mean=mean, std=std)])
191
+
192
+ def __setup_amp(self) -> None:
193
+ """Setup automated mixed precision (amp) for inference."""
194
+ self.mixed_precision = self.run_conf["training"].get("mixed_precision", False)
195
+
196
+ def run_inference(self, generate_plots: bool = False) -> None:
197
+ """Run inference
198
+
199
+ Args:
200
+ generate_plots (bool, optional): If plots should be generated. Defaults to False.
201
+ """
202
+ self.model.eval()
203
+
204
+ # setup score tracker
205
+ image_names = [] # image names as str
206
+ binary_dice_scores = [] # binary dice scores per image
207
+ binary_jaccard_scores = [] # binary jaccard scores per image
208
+ pq_scores = [] # pq-scores per image
209
+ dq_scores = [] # dq-scores per image
210
+ sq_scores = [] # sq-scores per image
211
+ f1_ds = [] # f1-scores per image
212
+ prec_ds = [] # precision per image
213
+ rec_ds = [] # recall per image
214
+
215
+ inference_loop = tqdm.tqdm(
216
+ enumerate(self.inference_dataloader), total=len(self.inference_dataloader)
217
+ )
218
+
219
+ with torch.no_grad():
220
+ for image_idx, batch in inference_loop:
221
+ image_metrics = self.inference_step(
222
+ model=self.model, batch=batch, generate_plots=generate_plots
223
+ )
224
+ image_names.append(image_metrics["image_name"])
225
+ binary_dice_scores.append(image_metrics["binary_dice_score"])
226
+ binary_jaccard_scores.append(image_metrics["binary_jaccard_score"])
227
+ pq_scores.append(image_metrics["pq_score"])
228
+ dq_scores.append(image_metrics["dq_score"])
229
+ sq_scores.append(image_metrics["sq_score"])
230
+ f1_ds.append(image_metrics["f1_d"])
231
+ prec_ds.append(image_metrics["prec_d"])
232
+ rec_ds.append(image_metrics["rec_d"])
233
+
234
+ # average metrics for dataset
235
+ binary_dice_scores = np.array(binary_dice_scores)
236
+ binary_jaccard_scores = np.array(binary_jaccard_scores)
237
+ pq_scores = np.array(pq_scores)
238
+ dq_scores = np.array(dq_scores)
239
+ sq_scores = np.array(sq_scores)
240
+ f1_ds = np.array(f1_ds)
241
+ prec_ds = np.array(prec_ds)
242
+ rec_ds = np.array(rec_ds)
243
+
244
+ dataset_metrics = {
245
+ "Binary-Cell-Dice-Mean": float(np.nanmean(binary_dice_scores)),
246
+ "Binary-Cell-Jacard-Mean": float(np.nanmean(binary_jaccard_scores)),
247
+ "bPQ": float(np.nanmean(pq_scores)),
248
+ "bDQ": float(np.nanmean(dq_scores)),
249
+ "bSQ": float(np.nanmean(sq_scores)),
250
+ "f1_detection": float(np.nanmean(f1_ds)),
251
+ "precision_detection": float(np.nanmean(prec_ds)),
252
+ "recall_detection": float(np.nanmean(rec_ds)),
253
+ }
254
+ self.logger.info(f"{20*'*'} Binary Dataset metrics {20*'*'}")
255
+ [self.logger.info(f"{f'{k}:': <25} {v}") for k, v in dataset_metrics.items()]
256
+
257
+ def inference_step(
258
+ self, model: nn.Module, batch: object, generate_plots: bool = False
259
+ ) -> dict:
260
+ """Inference step
261
+
262
+ Args:
263
+ model (nn.Module): Training model, must return "nuclei_binary_map", "nuclei_type_map", "tissue_type" and "hv_map"
264
+ batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3])
265
+ generate_plots (bool, optional): If plots should be generated. Defaults to False.
266
+
267
+ Returns:
268
+ Dict: Image_metrics with keys:
269
+
270
+ """
271
+ img = batch[0].to(self.device)
272
+ if len(img.shape) > 4:
273
+ img = img[0]
274
+ img = rearrange(img, "c i j w h -> (i j) c w h")
275
+ mask = batch[1]
276
+ image_name = list(batch[2])
277
+ mask["instance_types"] = calculate_instances(
278
+ torch.unsqueeze(mask["nuclei_binary_map"], dim=0), mask["instance_map"]
279
+ )
280
+
281
+ model.zero_grad()
282
+
283
+ if self.mixed_precision:
284
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
285
+ predictions_ = model.forward(img)
286
+ else:
287
+ predictions_ = model.forward(img)
288
+
289
+ if self.overlap == 0:
290
+ if self.patching:
291
+ predictions_ = self.post_process_patching(predictions_)
292
+ predictions = self.get_cell_predictions(predictions_)
293
+ image_metrics = self.calculate_step_metric(
294
+ predictions=predictions, gt=mask, image_name=image_name
295
+ )
296
+
297
+ elif self.patching and self.overlap != 0:
298
+ cell_list = self.post_process_patching_overlap(
299
+ predictions_, overlap=self.overlap
300
+ )
301
+ image_metrics, predictions = self.calculate_step_metric_overlap(
302
+ cell_list=cell_list, gt=mask, image_name=image_name
303
+ )
304
+
305
+ scores = [
306
+ float(image_metrics["binary_dice_score"].detach().cpu()),
307
+ float(image_metrics["binary_jaccard_score"].detach().cpu()),
308
+ image_metrics["pq_score"],
309
+ ]
310
+ if generate_plots:
311
+ if self.overlap == 0 and self.patching:
312
+ batch_size = img.shape[0]
313
+ num_elems = int(np.sqrt(batch_size))
314
+ img = torch.permute(img, (0, 2, 3, 1))
315
+ img = rearrange(
316
+ img, "(i j) h w c -> (i h) (j w) c", i=num_elems, j=num_elems
317
+ )
318
+ img = torch.unsqueeze(img, dim=0)
319
+ img = torch.permute(img, (0, 3, 1, 2))
320
+ elif self.overlap != 0 and self.patching:
321
+ h, w = mask["nuclei_binary_map"].shape[1:]
322
+ total_img = torch.zeros((3, h, w))
323
+ decomposed_patch_num = int(np.sqrt(img.shape[0]))
324
+ for i in range(decomposed_patch_num):
325
+ for j in range(decomposed_patch_num):
326
+ x_global = i * 256 - i * self.overlap
327
+ y_global = j * 256 - j * self.overlap
328
+ total_img[
329
+ :, x_global : x_global + 256, y_global : y_global + 256
330
+ ] = img[i * decomposed_patch_num + j]
331
+ img = total_img
332
+ img = img[None, :, :, :]
333
+ self.plot_results(
334
+ img=img,
335
+ predictions=predictions,
336
+ ground_truth=mask,
337
+ img_name=image_name[0],
338
+ scores=scores,
339
+ )
340
+
341
+ return image_metrics
342
+
343
+ def run_single_image_inference(self, model: nn.Module, image: np.ndarray, generate_plots: bool = True,
344
+ ) -> dict:
345
+ """Inference step
346
+
347
+ Args:
348
+ model (nn.Module): Training model, must return "nuclei_binary_map", "nuclei_type_map", "tissue_type" and "hv_map"
349
+ batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3])
350
+ generate_plots (bool, optional): If plots should be generated. Defaults to False.
351
+
352
+ Returns:
353
+ Dict: Image_metrics with keys:
354
+
355
+ """
356
+ # set image transforms
357
+ transform_settings = self.run_conf["transformations"]
358
+ if "normalize" in transform_settings:
359
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
360
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
361
+ else:
362
+ mean = (0.5, 0.5, 0.5)
363
+ std = (0.5, 0.5, 0.5)
364
+ transforms = A.Compose([A.Normalize(mean=mean, std=std)])
365
+
366
+ transformed_img = transforms(image=image)["image"]
367
+ image = torch.from_numpy(transformed_img).permute(2, 0, 1).unsqueeze(0).float()
368
+ img = image.to(self.device)
369
+
370
+
371
+ model.zero_grad()
372
+ predictions_ = model.forward(img)
373
+
374
+ if self.overlap == 0:
375
+ if self.patching:
376
+ predictions_ = self.post_process_patching(predictions_)
377
+ predictions = self.get_cell_predictions(predictions_)
378
+
379
+
380
+
381
+ image_output = self.plot_results(
382
+ img=img,
383
+ predictions=predictions
384
+ )
385
+
386
+ return image_output
387
+
388
+
389
+ def calculate_step_metric(
390
+ self, predictions: dict, gt: dict, image_name: List[str]
391
+ ) -> dict:
392
+ """Calculate step metric for one MoNuSeg image.
393
+
394
+ Args:
395
+ predictions (dict): Necssary keys:
396
+ * instance_map: Pixel-wise nuclear instance segmentation.
397
+ Each instance has its own integer, starting from 1. Shape: (1, H, W)
398
+ * nuclei_binary_map: Softmax output for binary nuclei branch. Shape: (1, 2, H, W)
399
+ * instance_types: Instance type prediction list.
400
+ Each list entry stands for one image. Each list entry is a dictionary with the following structure:
401
+ Main Key is the nuclei instance number (int), with a dict as value.
402
+ For each instance, the dictionary contains the keys: bbox (bounding box), centroid (centroid coordinates),
403
+ contour, type_prob (probability), type (nuclei type). Actually just one list entry, as we expecting batch-size=1 (one image)
404
+ gt (dict): Necessary keys:
405
+ * instance_map
406
+ * nuclei_binary_map
407
+ * instance_types
408
+ image_name (List[str]): Name of the image, list with [str]. List is necessary for backward compatibility
409
+
410
+ Returns:
411
+ dict: Image metrics for one MoNuSeg image. Keys are:
412
+ * image_name
413
+ * binary_dice_score
414
+ * binary_jaccard_score
415
+ * pq_score
416
+ * dq_score
417
+ * sq_score
418
+ * f1_d
419
+ * prec_d
420
+ * rec_d
421
+ """
422
+ predictions["instance_map"] = predictions["instance_map"].detach().cpu()
423
+ instance_maps_gt = gt["instance_map"].detach().cpu()
424
+
425
+ pred_binary_map = torch.argmax(predictions["nuclei_binary_map"], dim=1)
426
+ target_binary_map = gt["nuclei_binary_map"].to(self.device)
427
+
428
+ cell_dice = (
429
+ dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0)
430
+ .detach()
431
+ .cpu()
432
+ )
433
+ cell_jaccard = (
434
+ binary_jaccard_index(
435
+ preds=pred_binary_map,
436
+ target=target_binary_map,
437
+ )
438
+ .detach()
439
+ .cpu()
440
+ )
441
+ remapped_instance_pred = remap_label(predictions["instance_map"])
442
+ remapped_gt = remap_label(instance_maps_gt)
443
+ [dq, sq, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred)
444
+
445
+ # detection scores
446
+ true_centroids = np.array(
447
+ [v["centroid"] for k, v in gt["instance_types"][0].items()]
448
+ )
449
+ pred_centroids = np.array(
450
+ [v["centroid"] for k, v in predictions["instance_types"].items()]
451
+ )
452
+ if true_centroids.shape[0] == 0:
453
+ true_centroids = np.array([[0, 0]])
454
+ if pred_centroids.shape[0] == 0:
455
+ pred_centroids = np.array([[0, 0]])
456
+
457
+ if self.magnification == 40:
458
+ pairing_radius = 12
459
+ else:
460
+ pairing_radius = 6
461
+ paired, unpaired_true, unpaired_pred = pair_coordinates(
462
+ true_centroids, pred_centroids, pairing_radius
463
+ )
464
+ f1_d, prec_d, rec_d = cell_detection_scores(
465
+ paired_true=paired[:, 0],
466
+ paired_pred=paired[:, 1],
467
+ unpaired_true=unpaired_true,
468
+ unpaired_pred=unpaired_pred,
469
+ )
470
+
471
+ image_metrics = {
472
+ "image_name": image_name,
473
+ "binary_dice_score": cell_dice,
474
+ "binary_jaccard_score": cell_jaccard,
475
+ "pq_score": pq,
476
+ "dq_score": dq,
477
+ "sq_score": sq,
478
+ "f1_d": f1_d,
479
+ "prec_d": prec_d,
480
+ "rec_d": rec_d,
481
+ }
482
+
483
+ return image_metrics
484
+
485
+ def convert_binary_type(self, instance_types: dict) -> dict:
486
+ """Clean nuclei detection from type prediction to binary prediction
487
+
488
+ Args:
489
+ instance_types (dict): Dictionary with cells
490
+
491
+ Returns:
492
+ dict: Cleaned with just one class
493
+ """
494
+ cleaned_instance_types = {}
495
+ for key, elem in instance_types.items():
496
+ if elem["type"] == 0:
497
+ continue
498
+ else:
499
+ elem["type"] = 0
500
+ cleaned_instance_types[key] = elem
501
+
502
+ return cleaned_instance_types
503
+
504
+ def get_cell_predictions(self, predictions: dict) -> dict:
505
+ """Reshaping predictions and calculating instance maps and instance types
506
+
507
+ Args:
508
+ predictions (dict): Dictionary with the following keys:
509
+ * tissue_types: Logit tissue prediction output. Shape: (B, num_tissue_classes)
510
+ * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (B, H, W, 2)
511
+ * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W)
512
+ * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (B, num_nuclei_classes, H, W)
513
+
514
+ Returns:
515
+ dict:
516
+ * nuclei_binary_map: Softmax binary prediction. Shape: (B, 2, H, W
517
+ * nuclei_type_map: Softmax nuclei type map. Shape: (B, num_nuclei_classes, H, W)
518
+ * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W)
519
+ * tissue_types: Logit tissue prediction output. Shape: (B, num_tissue_classes)
520
+ * instance_map: Instance map, each instance has one integer. Shape: (B, H, W)
521
+ * instance_types: Instance type dict, cleaned. Keys:
522
+ 'bbox', 'centroid', 'contour', 'type_prob', 'type'
523
+ """
524
+ predictions["nuclei_binary_map"] = F.softmax(
525
+ predictions["nuclei_binary_map"], dim=1
526
+ )
527
+ predictions["nuclei_type_map"] = F.softmax(
528
+ predictions["nuclei_type_map"], dim=1
529
+ )
530
+ (
531
+ predictions["instance_map"],
532
+ predictions["instance_types"],
533
+ ) = self.model.calculate_instance_map(
534
+ predictions, magnification=self.magnification
535
+ )
536
+ predictions["instance_types"] = self.convert_binary_type(
537
+ predictions["instance_types"][0]
538
+ )
539
+
540
+ return predictions
541
+
542
+ def post_process_patching(self, predictions: dict) -> dict:
543
+ """Post-process patching by reassamble (without overlap) stitched predictions to one big image prediction
544
+
545
+ Args:
546
+ predictions (dict): Necessary keys:
547
+ * nuclei_binary_map: Logit binary prediction. Shape: (B, 2, 256, 256)
548
+ * hv_map: Logit output for hv-prediction. Shape: (B, 2, H, W)
549
+ * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (B, num_nuclei_classes, 256, 256)
550
+ Returns:
551
+ dict: Return elements that have been changed:
552
+ * nuclei_binary_map: Shape: (1, 2, H, W)
553
+ * hv_map: Shape: (1, 2, H, W)
554
+ * nuclei_type_map: (1, num_nuclei_classes, H, W)
555
+ """
556
+ batch_size = predictions["nuclei_binary_map"].shape[0]
557
+ num_elems = int(np.sqrt(batch_size))
558
+ predictions["nuclei_binary_map"] = rearrange(
559
+ predictions["nuclei_binary_map"],
560
+ "(i j) d w h ->d (i w) (j h)",
561
+ i=num_elems,
562
+ j=num_elems,
563
+ )
564
+ predictions["hv_map"] = rearrange(
565
+ predictions["hv_map"],
566
+ "(i j) d w h -> d (i w) (j h)",
567
+ i=num_elems,
568
+ j=num_elems,
569
+ )
570
+ predictions["nuclei_type_map"] = rearrange(
571
+ predictions["nuclei_type_map"],
572
+ "(i j) d w h -> d (i w) (j h)",
573
+ i=num_elems,
574
+ j=num_elems,
575
+ )
576
+
577
+ predictions["nuclei_binary_map"] = torch.unsqueeze(
578
+ predictions["nuclei_binary_map"], dim=0
579
+ )
580
+ predictions["hv_map"] = torch.unsqueeze(predictions["hv_map"], dim=0)
581
+ predictions["nuclei_type_map"] = torch.unsqueeze(
582
+ predictions["nuclei_type_map"], dim=0
583
+ )
584
+
585
+ return predictions
586
+
587
+ def post_process_patching_overlap(self, predictions: dict, overlap: int) -> List:
588
+ """Post processing overlapping cells by merging overlap. Use same merging strategy as for our
589
+
590
+ Args:
591
+ predictions (dict): Predictions with necessary keys:
592
+ * nuclei_binary_map: Binary nuclei prediction, Shape: (B, 2, H, W)
593
+ * nuclei_type_map: Nuclei type prediction, Shape: (B, num_nuclei_classes, H, W)
594
+ * hv_map: Binary HV Map predictions. Shape: (B, 2, H, W)
595
+ overlap (int): Used overlap as integer
596
+
597
+ Returns:
598
+ List: Cleaned (merged) cell list with each entry beeing one detected cell with dictionary as entries.
599
+ """
600
+ predictions["nuclei_binary_map"] = F.softmax(
601
+ predictions["nuclei_binary_map"], dim=1
602
+ )
603
+ predictions["nuclei_type_map"] = F.softmax(
604
+ predictions["nuclei_type_map"], dim=1
605
+ )
606
+ (
607
+ predictions["instance_map"],
608
+ predictions["instance_types"],
609
+ ) = self.model.calculate_instance_map(
610
+ predictions, magnification=self.magnification
611
+ )
612
+ predictions = self.merge_predictions(predictions, overlap)
613
+
614
+ return predictions
615
+
616
+ def merge_predictions(self, predictions: dict, overlap: int) -> list:
617
+ """Merge overlapping cell predictions
618
+
619
+ Args:
620
+ predictions (dict): Predictions with necessary keys:
621
+ * nuclei_binary_map: Binary nuclei prediction, Shape: (B, 2, H, W)
622
+ * instance_types: Instance type dictionary with cell entries
623
+ overlap (int): Used overlap as integer
624
+
625
+ Returns:
626
+ list: Cleaned (merged) cell list with each entry beeing one detected cell with dictionary as entries.
627
+ """
628
+ cell_list = []
629
+ decomposed_patch_num = int(np.sqrt(predictions["nuclei_binary_map"].shape[0]))
630
+
631
+ for i in range(decomposed_patch_num):
632
+ for j in range(decomposed_patch_num):
633
+ x_global = i * 256 - i * overlap
634
+ y_global = j * 256 - j * overlap
635
+ patch_instance_types = predictions["instance_types"][
636
+ i * decomposed_patch_num + j
637
+ ]
638
+ for cell in patch_instance_types.values():
639
+ if cell["type"] == 0:
640
+ continue
641
+ offset_global = np.array([x_global, y_global])
642
+ centroid_global = cell["centroid"] + np.flip(offset_global)
643
+ contour_global = cell["contour"] + np.flip(offset_global)
644
+ bbox_global = cell["bbox"] + offset_global
645
+ cell_dict = {
646
+ "bbox": bbox_global.tolist(),
647
+ "centroid": centroid_global.tolist(),
648
+ "contour": contour_global.tolist(),
649
+ "type_prob": cell["type_prob"],
650
+ "type": cell["type"],
651
+ "patch_coordinates": [
652
+ i, # row
653
+ j, # col
654
+ ],
655
+ "cell_status": get_cell_position_marging(cell["bbox"], 256, 64),
656
+ "offset_global": offset_global.tolist(),
657
+ }
658
+ if np.max(cell["bbox"]) == 256 or np.min(cell["bbox"]) == 0:
659
+ position = get_cell_position(cell["bbox"], 256)
660
+ cell_dict["edge_position"] = True
661
+ cell_dict["edge_information"] = {}
662
+ cell_dict["edge_information"]["position"] = position
663
+ cell_dict["edge_information"]["edge_patches"] = get_edge_patch(
664
+ position, i, j # row, col
665
+ )
666
+ else:
667
+ cell_dict["edge_position"] = False
668
+ cell_list.append(cell_dict)
669
+ self.logger.info(f"Detected cells before cleaning: {len(cell_list)}")
670
+ cell_processor = CellPostProcessor(cell_list, self.logger)
671
+ cleaned_cells = cell_processor.post_process_cells()
672
+ cell_list = [cell_list[idx_c] for idx_c in cleaned_cells.index.values]
673
+ self.logger.info(f"Detected cells after cleaning: {len(cell_list)}")
674
+
675
+ return cell_list
676
+
677
+ def calculate_step_metric_overlap(
678
+ self, cell_list: List[dict], gt: dict, image_name: List[str]
679
+ ) -> Tuple[dict, dict]:
680
+ """Calculate step metric and return merged predictions for plotting
681
+
682
+ Args:
683
+ cell_list (List[dict]): List with cell dicts
684
+ gt (dict): Ground-Truth dictionary
685
+ image_name (List[str]): Image Name as list with just one entry
686
+
687
+ Returns:
688
+ Tuple[dict, dict]:
689
+ dict: Image metrics for one MoNuSeg image. Keys are:
690
+ * image_name
691
+ * binary_dice_score
692
+ * binary_jaccard_score
693
+ * pq_score
694
+ * dq_score
695
+ * sq_score
696
+ * f1_d
697
+ * prec_d
698
+ * rec_d
699
+ dict: Predictions, reshaped for one image and for plotting
700
+ * nuclei_binary_map: Shape (1, 2, 1024, 1024) or (1, 2, 1024, 1024)
701
+ * instance_map: Shape (1, 1024, 1024) or or (1, 2, 512, 512)
702
+ * instance_types: Dict for each nuclei
703
+ """
704
+ predictions = {}
705
+ h, w = gt["nuclei_binary_map"].shape[1:]
706
+ instance_type_map = np.zeros((h, w), dtype=np.int32)
707
+
708
+ for instance, cell in enumerate(cell_list):
709
+ contour = np.array(cell["contour"])[None, :, :]
710
+ cv2.fillPoly(instance_type_map, contour, instance)
711
+
712
+ predictions["instance_map"] = torch.Tensor(instance_type_map)
713
+ instance_maps_gt = gt["instance_map"].detach().cpu()
714
+
715
+ pred_arr = np.clip(instance_type_map, 0, 1)
716
+ target_binary_map = gt["nuclei_binary_map"].to(self.device).squeeze()
717
+ predictions["nuclei_binary_map"] = pred_arr
718
+
719
+ predictions["instance_types"] = cell_list
720
+
721
+ cell_dice = (
722
+ dice(
723
+ preds=torch.Tensor(pred_arr).to(self.device),
724
+ target=target_binary_map,
725
+ ignore_index=0,
726
+ )
727
+ .detach()
728
+ .cpu()
729
+ )
730
+ cell_jaccard = (
731
+ binary_jaccard_index(
732
+ preds=torch.Tensor(pred_arr).to(self.device),
733
+ target=target_binary_map,
734
+ )
735
+ .detach()
736
+ .cpu()
737
+ )
738
+ remapped_instance_pred = remap_label(predictions["instance_map"])[None, :, :]
739
+ remapped_gt = remap_label(instance_maps_gt)
740
+ [dq, sq, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred)
741
+
742
+ # detection scores
743
+ true_centroids = np.array(
744
+ [v["centroid"] for k, v in gt["instance_types"][0].items()]
745
+ )
746
+ pred_centroids = np.array([v["centroid"] for v in cell_list])
747
+ if true_centroids.shape[0] == 0:
748
+ true_centroids = np.array([[0, 0]])
749
+ if pred_centroids.shape[0] == 0:
750
+ pred_centroids = np.array([[0, 0]])
751
+
752
+ if self.magnification == 40:
753
+ pairing_radius = 12
754
+ else:
755
+ pairing_radius = 6
756
+ paired, unpaired_true, unpaired_pred = pair_coordinates(
757
+ true_centroids, pred_centroids, pairing_radius
758
+ )
759
+ f1_d, prec_d, rec_d = cell_detection_scores(
760
+ paired_true=paired[:, 0],
761
+ paired_pred=paired[:, 1],
762
+ unpaired_true=unpaired_true,
763
+ unpaired_pred=unpaired_pred,
764
+ )
765
+
766
+ image_metrics = {
767
+ "image_name": image_name,
768
+ "binary_dice_score": cell_dice,
769
+ "binary_jaccard_score": cell_jaccard,
770
+ "pq_score": pq,
771
+ "dq_score": dq,
772
+ "sq_score": sq,
773
+ "f1_d": f1_d,
774
+ "prec_d": prec_d,
775
+ "rec_d": rec_d,
776
+ }
777
+
778
+ # align to common shapes
779
+ cleaned_instance_types = {
780
+ k + 1: v for k, v in enumerate(predictions["instance_types"])
781
+ }
782
+ for cell, results in cleaned_instance_types.items():
783
+ results["contour"] = np.array(results["contour"])
784
+ cleaned_instance_types[cell] = results
785
+ predictions["instance_types"] = cleaned_instance_types
786
+ predictions["instance_map"] = predictions["instance_map"][None, :, :]
787
+ predictions["nuclei_binary_map"] = F.one_hot(
788
+ torch.Tensor(predictions["nuclei_binary_map"]).type(torch.int64),
789
+ num_classes=2,
790
+ ).permute(2, 0, 1)[None, :, :, :]
791
+
792
+ return image_metrics, predictions
793
+
794
+ def plot_results(
795
+ self,
796
+ img: torch.Tensor,
797
+ predictions: dict,
798
+ ) -> None:
799
+ """Plot MoNuSeg results
800
+
801
+ Args:
802
+ img (torch.Tensor): Image as torch.Tensor, with Shape (1, 3, 1024, 1024) or (1, 3, 512, 512)
803
+ predictions (dict): Prediction dictionary. Necessary keys:
804
+ * nuclei_binary_map: Shape (1, 2, 1024, 1024) or (1, 2, 512, 512)
805
+ * instance_map: Shape (1, 1024, 1024) or (1, 512, 512)
806
+ * instance_types: List[dict], but just one entry in list
807
+ ground_truth (dict): Ground-Truth dictionary. Necessary keys:
808
+ * nuclei_binary_map: (1, 1024, 1024) or or (1, 512, 512)
809
+ * instance_map: (1, 1024, 1024) or or (1, 512, 512)
810
+ * instance_types: List[dict], but just one entry in list
811
+ img_name (str): Image name as string
812
+ outdir (Path): Output directory for storing
813
+ scores (List[float]): Scores as list [Dice, Jaccard, bPQ]
814
+ """
815
+
816
+ predictions["nuclei_binary_map"] = predictions["nuclei_binary_map"].permute(
817
+ 0, 2, 3, 1
818
+ )
819
+
820
+ h = predictions["instance_map"].shape[1]
821
+ w = predictions["instance_map"].shape[2]
822
+
823
+ # process image and other maps
824
+ sample_image = img.permute(0, 2, 3, 1).contiguous().cpu().numpy()
825
+
826
+ pred_sample_binary_map = (
827
+ predictions["nuclei_binary_map"][:, :, :, 1].detach().cpu().numpy()
828
+ )[0]
829
+ pred_sample_instance_maps = (
830
+ predictions["instance_map"].detach().cpu().numpy()[0]
831
+ )
832
+
833
+
834
+ binary_cmap = plt.get_cmap("Greys_r")
835
+ instance_map = plt.get_cmap("viridis")
836
+
837
+ # invert the normalization of the sample images
838
+ transform_settings = self.run_conf["transformations"]
839
+ if "normalize" in transform_settings:
840
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
841
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
842
+ else:
843
+ mean = (0.5, 0.5, 0.5)
844
+ std = (0.5, 0.5, 0.5)
845
+ inv_normalize = transforms.Normalize(
846
+ mean=[-0.5 / mean[0], -0.5 / mean[1], -0.5 / mean[2]],
847
+ std=[1 / std[0], 1 / std[1], 1 / std[2]],
848
+ )
849
+ inv_samples = inv_normalize(torch.tensor(sample_image).permute(0, 3, 1, 2))
850
+ sample_image = inv_samples.permute(0, 2, 3, 1).detach().cpu().numpy()[0]
851
+
852
+ # start overlaying on image
853
+ placeholder = np.zeros(( h, 4 * w, 3))
854
+ # orig image
855
+ placeholder[:h, :w, :3] = sample_image
856
+ # binary prediction
857
+ placeholder[:h, w : 2 * w, :3] = rgba2rgb(
858
+ binary_cmap(pred_sample_binary_map * 255)
859
+ )
860
+ # instance_predictions
861
+ placeholder[:h, 2 * w : 3 * w, :3] = rgba2rgb(
862
+ instance_map(
863
+ (pred_sample_instance_maps - np.min(pred_sample_instance_maps))
864
+ / (
865
+ np.max(pred_sample_instance_maps)
866
+ - np.min(pred_sample_instance_maps + 1e-10)
867
+ )
868
+ )
869
+ )
870
+ # pred
871
+ pred_contours_polygon = [
872
+ v["contour"] for v in predictions["instance_types"].values()
873
+ ]
874
+ pred_contours_polygon = [
875
+ list(zip(poly[:, 0], poly[:, 1])) for poly in pred_contours_polygon
876
+ ]
877
+ pred_contour_colors_polygon = [
878
+ "#70c6ff" for i in range(len(pred_contours_polygon))
879
+ ]
880
+ pred_cell_image = Image.fromarray(
881
+ (sample_image * 255).astype(np.uint8)
882
+ ).convert("RGB")
883
+ pred_drawing = ImageDraw.Draw(pred_cell_image)
884
+ add_patch = lambda poly, color: pred_drawing.polygon(
885
+ poly, outline=color, width=2
886
+ )
887
+ [
888
+ add_patch(poly, c)
889
+ for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon)
890
+ ]
891
+ placeholder[: h, 3 * w : 4 * w, :3] = np.asarray(pred_cell_image) / 255
892
+
893
+ # plotting
894
+ test_image = Image.fromarray((placeholder * 255).astype(np.uint8))
895
+ fig, axs = plt.subplots(figsize=(3, 2), dpi=1200)
896
+ axs.imshow(placeholder)
897
+ axs.set_xticks(np.arange(w / 2, 4 * w, w))
898
+ axs.set_xticklabels(
899
+ [
900
+ "Image",
901
+ "Binary-Cells",
902
+ "Instances",
903
+ "Countours",
904
+ ],
905
+ fontsize=6,
906
+ )
907
+ axs.xaxis.tick_top()
908
+
909
+ axs.set_yticks([h / 2])
910
+ axs.set_yticklabels([ "Pred."], fontsize=6)
911
+ axs.tick_params(axis="both", which="both", length=0)
912
+ grid_x = np.arange(w, 3 * w, w)
913
+ grid_y = np.arange(h, 2 * h, h)
914
+
915
+ for x_seg in grid_x:
916
+ axs.axvline(x_seg, color="black")
917
+ for y_seg in grid_y:
918
+ axs.axhline(y_seg, color="black")
919
+
920
+ fig.suptitle(f"Patch Predictions for input image", fontsize=6)
921
+ fig.tight_layout()
922
+ fig.savefig("pred_img.png")
923
+ plt.close()
924
+
925
+
926
+ # CLI
927
+ class InferenceCellViTMoNuSegParser:
928
+ def __init__(self) -> None:
929
+ parser = argparse.ArgumentParser(
930
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
931
+ description="Perform CellViT inference for MoNuSeg dataset",
932
+ )
933
+
934
+ parser.add_argument(
935
+ "--model",
936
+ type=str,
937
+ help="Model checkpoint file that is used for inference",
938
+ default="./model_best.pth",
939
+ )
940
+ parser.add_argument(
941
+ "--dataset",
942
+ type=str,
943
+ help="Path to MoNuSeg dataset.",
944
+ default="/data/lunbinzeng/datasets/monuseg/testing/",
945
+ )
946
+ parser.add_argument(
947
+ "--outdir",
948
+ type=str,
949
+ help="Path to output directory to store results.",
950
+ default="/data/lunbinzeng/results/lkcell/small/2024-04-22T232903_CellViT-unireplknet-fold1-final/monuseg/inference/",
951
+ )
952
+ parser.add_argument(
953
+ "--gpu", type=int, help="Cuda-GPU ID for inference. Default: 0", default=0
954
+ )
955
+ parser.add_argument(
956
+ "--magnification",
957
+ type=int,
958
+ help="Dataset Magnification. Either 20 or 40. Default: 40",
959
+ choices=[20, 40],
960
+ default=20,
961
+ )
962
+ parser.add_argument(
963
+ "--patching",
964
+ type=bool,
965
+ help="Patch to 256px images. Default: False",
966
+ default=False,
967
+ )
968
+ parser.add_argument(
969
+ "--overlap",
970
+ type=int,
971
+ help="Patch overlap, just valid for patching",
972
+ default=0,
973
+ )
974
+ parser.add_argument(
975
+ "--plots",
976
+ type=bool,
977
+ help="Generate result plots. Default: False",
978
+ default=True,
979
+ )
980
+
981
+ self.parser = parser
982
+
983
+ def parse_arguments(self) -> dict:
984
+ opt = self.parser.parse_args()
985
+ return vars(opt)
986
+
987
+
988
+ if __name__ == "__main__":
989
+ configuration_parser = InferenceCellViTMoNuSegParser()
990
+ configuration = configuration_parser.parse_arguments()
991
+ print(configuration)
992
+
993
+ inf = MoNuSegInference(
994
+ model_path=configuration["model"],
995
+ dataset_path=configuration["dataset"],
996
+ outdir=configuration["outdir"],
997
+ gpu=configuration["gpu"],
998
+ patching=configuration["patching"],
999
+ magnification=configuration["magnification"],
1000
+ overlap=configuration["overlap"],
1001
+ )
1002
+ inf.run_inference(generate_plots=configuration["plots"])
cell_segmentation/inference/inference_cellvit_experiment_pannuke.py ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Inference Method for Patch-Wise Inference on a test set
3
+ # Without merging WSI
4
+ #
5
+ # Aim is to calculate metrics as defined for the PanNuke dataset
6
+ #
7
+ # @ Fabian Hörst, [email protected]
8
+ # Institute for Artifical Intelligence in Medicine,
9
+ # University Medicine Essen
10
+
11
+ import argparse
12
+ import inspect
13
+ import os
14
+ import sys
15
+
16
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
17
+ parentdir = os.path.dirname(currentdir)
18
+ sys.path.insert(0, parentdir)
19
+ parentdir = os.path.dirname(parentdir)
20
+ sys.path.insert(0, parentdir)
21
+
22
+ from base_ml.base_experiment import BaseExperiment
23
+
24
+ BaseExperiment.seed_run(1232)
25
+
26
+ import json
27
+ from pathlib import Path
28
+ from typing import List, Tuple, Union
29
+
30
+ import albumentations as A
31
+ import numpy as np
32
+ import torch
33
+ import torch.nn.functional as F
34
+ import tqdm
35
+ import yaml
36
+ from matplotlib import pyplot as plt
37
+ from PIL import Image, ImageDraw
38
+ from skimage.color import rgba2rgb
39
+ from sklearn.metrics import accuracy_score
40
+ from tabulate import tabulate
41
+ from torch.utils.data import DataLoader
42
+ from torchmetrics.functional import dice
43
+ from torchmetrics.functional.classification import binary_jaccard_index
44
+ from torchvision import transforms
45
+
46
+ from cell_segmentation.datasets.dataset_coordinator import select_dataset
47
+ from models.segmentation.cell_segmentation.cellvit import DataclassHVStorage
48
+ from cell_segmentation.utils.metrics import (
49
+ cell_detection_scores,
50
+ cell_type_detection_scores,
51
+ get_fast_pq,
52
+ remap_label,
53
+ binarize,
54
+ )
55
+ from cell_segmentation.utils.post_proc_cellvit import calculate_instances
56
+ from cell_segmentation.utils.tools import cropping_center, pair_coordinates
57
+ from models.segmentation.cell_segmentation.cellvit import CellViT
58
+ from utils.logger import Logger
59
+
60
+
61
+ class InferenceCellViT:
62
+ def __init__(
63
+ self,
64
+ run_dir: Union[Path, str],
65
+ gpu: int,
66
+ magnification: int = 40,
67
+ checkpoint_name: str = "model_best.pth",
68
+ ) -> None:
69
+ """Inference for HoverNet
70
+
71
+ Args:
72
+ run_dir (Union[Path, str]): logging directory with checkpoints and configs
73
+ gpu (int): CUDA GPU device to use for inference
74
+ magnification (int, optional): Dataset magnification. Defaults to 40.
75
+ checkpoint_name (str, optional): Select name of the model to load. Defaults to model_best.pth
76
+ """
77
+ self.run_dir = Path(run_dir)
78
+ self.device = "cpu"
79
+ self.run_conf: dict = None
80
+ self.logger: Logger = None
81
+ self.magnification = magnification
82
+ self.checkpoint_name = checkpoint_name
83
+
84
+ self.__load_run_conf()
85
+ # self.__instantiate_logger()
86
+ self.__setup_amp()
87
+
88
+ self.num_classes = self.run_conf["data"]["num_nuclei_classes"]
89
+
90
+ def __load_run_conf(self) -> None:
91
+ """Load the config.yaml file with the run setup
92
+
93
+ Be careful with loading and usage, since original None values in the run configuration are not stored when dumped to yaml file.
94
+ If you want to check if a key is not defined, first check if the key does exists in the dict.
95
+ """
96
+ with open((self.run_dir / "config.yaml").resolve(), "r") as run_config_file:
97
+ yaml_config = yaml.safe_load(run_config_file)
98
+ self.run_conf = dict(yaml_config)
99
+
100
+ def __load_dataset_setup(self, dataset_path: Union[Path, str]) -> None:
101
+ """Load the configuration of the cell segmentation dataset.
102
+
103
+ The dataset must have a dataset_config.yaml file in their dataset path with the following entries:
104
+ * tissue_types: describing the present tissue types with corresponding integer
105
+ * nuclei_types: describing the present nuclei types with corresponding integer
106
+
107
+ Args:
108
+ dataset_path (Union[Path, str]): Path to dataset folder
109
+ """
110
+ dataset_config_path = Path(dataset_path) / "dataset_config.yaml"
111
+ with open(dataset_config_path, "r") as dataset_config_file:
112
+ yaml_config = yaml.safe_load(dataset_config_file)
113
+ self.dataset_config = dict(yaml_config)
114
+
115
+ def __instantiate_logger(self) -> None:
116
+ """Instantiate logger
117
+
118
+ Logger is using no formatters. Logs are stored in the run directory under the filename: inference.log
119
+ """
120
+ logger = Logger(
121
+ level=self.run_conf["logging"]["level"].upper(),
122
+ log_dir=Path(self.run_dir).resolve(),
123
+ comment="inference",
124
+ use_timestamp=False,
125
+ formatter="%(message)s",
126
+ )
127
+ self.logger = logger.create_logger()
128
+
129
+ def __check_eval_model(self) -> None:
130
+ """Check if there is a best model pytorch file"""
131
+ assert (self.run_dir / "checkpoints" / self.checkpoint_name).is_file()
132
+
133
+ def __setup_amp(self) -> None:
134
+ """Setup automated mixed precision (amp) for inference."""
135
+ self.mixed_precision = self.run_conf["training"].get("mixed_precision", False)
136
+
137
+ def get_model(
138
+ self, model_type: str
139
+ ) -> CellViT:
140
+ """Return the trained model for inference
141
+
142
+ Args:
143
+ model_type (str): Name of the model. Must either be one of:
144
+ CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared
145
+
146
+ Returns:
147
+ Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Model
148
+ """
149
+ implemented_models = [
150
+ "CellViT",
151
+ ]
152
+ if model_type not in implemented_models:
153
+ raise NotImplementedError(
154
+ f"Unknown model type. Please select one of {implemented_models}"
155
+ )
156
+ if model_type in ["CellViT", "CellViTShared"]:
157
+ if model_type == "CellViT":
158
+ model_class = CellViT
159
+
160
+ model = model_class(
161
+ model256_path=self.run_conf["model"].get("pretrained_encoder"),
162
+ num_nuclei_classes=self.run_conf["data"]["num_nuclei_classes"],
163
+ num_tissue_classes=self.run_conf["data"]["num_tissue_classes"],
164
+ #embed_dim=self.run_conf["model"]["embed_dim"],
165
+ in_channels=self.run_conf["model"].get("input_chanels", 3),
166
+ #embed_dim=self.run_conf["model"]["embed_dim"],
167
+ #input_channels=self.run_conf["model"].get("input_channels", 3),
168
+ #depth=self.run_conf["model"]["depth"],
169
+ #num_heads=self.run_conf["model"]["num_heads"],
170
+ #extract_layers=self.run_conf["model"]["extract_layers"],
171
+ #regression_loss=self.run_conf["model"].get("regression_loss", False),
172
+ )
173
+
174
+
175
+ return model
176
+
177
+ def setup_patch_inference(
178
+ self, test_folds: List[int] = None
179
+ ) -> Tuple[
180
+ CellViT,
181
+ DataLoader,
182
+ dict,
183
+ ]:
184
+ """Setup patch inference by defining a patch-wise datalaoder and loading the model checkpoint
185
+
186
+ Args:
187
+ test_folds (List[int], optional): Test fold to use. Otherwise defined folds from config.yaml (in run_dir) are loaded. Defaults to None.
188
+
189
+ Returns:
190
+ tuple[Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared], DataLoader, dict]:
191
+ Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]: Best model loaded form checkpoint
192
+ DataLoader: Inference DataLoader
193
+ dict: Dataset configuration. Keys are:
194
+ * "tissue_types": describing the present tissue types with corresponding integer
195
+ * "nuclei_types": describing the present nuclei types with corresponding integer
196
+
197
+ """
198
+
199
+ # get dataset
200
+ if test_folds is None:
201
+ if "test_folds" in self.run_conf["data"]:
202
+ if self.run_conf["data"]["test_folds"] is None:
203
+ self.logger.info(
204
+ "There was no test set provided. We now use the validation dataset for testing"
205
+ )
206
+ self.run_conf["data"]["test_folds"] = self.run_conf["data"][
207
+ "val_folds"
208
+ ]
209
+ else:
210
+ self.logger.info(
211
+ "There was no test set provided. We now use the validation dataset for testing"
212
+ )
213
+ self.run_conf["data"]["test_folds"] = self.run_conf["data"]["val_folds"]
214
+ else:
215
+ self.run_conf["data"]["test_folds"] = self.run_conf["data"]["val_folds"]
216
+ self.logger.info(
217
+ f"Performing Inference on test set: {self.run_conf['data']['test_folds']}"
218
+ )
219
+
220
+
221
+ inference_dataset = select_dataset(
222
+ dataset_name=self.run_conf["data"]["dataset"],
223
+ split="test",
224
+ dataset_config=self.run_conf["data"],
225
+ transforms=transforms,
226
+ )
227
+
228
+ inference_dataloader = DataLoader(
229
+ inference_dataset,
230
+ batch_size=1,
231
+ num_workers=12,
232
+ pin_memory=False,
233
+ shuffle=False,
234
+ )
235
+
236
+ return inference_dataloader, self.dataset_config
237
+
238
+ def run_patch_inference(
239
+ self,
240
+ model: CellViT,
241
+ inference_dataloader: DataLoader,
242
+ dataset_config: dict,
243
+ generate_plots: bool = False,
244
+ ) -> None:
245
+ """Run Patch inference with given setup
246
+
247
+ Args:
248
+ model (Union[CellViT, CellViTShared, CellViT256, CellViT256Shared, CellViTSAM, CellViTSAMShared]): Model to use for inference
249
+ inference_dataloader (DataLoader): Inference Dataloader. Must return a batch with the following structure:
250
+ * Images (torch.Tensor)
251
+ * Masks (dict)
252
+ * Tissue types as str
253
+ * Image name as str
254
+ dataset_config (dict): Dataset configuration. Required keys are:
255
+ * "tissue_types": describing the present tissue types with corresponding integer
256
+ * "nuclei_types": describing the present nuclei types with corresponding integer
257
+ generate_plots (bool, optional): If inference plots should be generated. Defaults to False.
258
+ """
259
+ # put model in eval mode
260
+ model.to(device=self.device)
261
+ model.eval()
262
+
263
+ # setup score tracker
264
+ image_names = [] # image names as str
265
+ binary_dice_scores = [] # binary dice scores per image
266
+ binary_jaccard_scores = [] # binary jaccard scores per image
267
+ pq_scores = [] # pq-scores per image
268
+ dq_scores = [] # dq-scores per image
269
+ sq_scores = [] # sq-scores per image
270
+ cell_type_pq_scores = [] # pq-scores per cell type and image
271
+ cell_type_dq_scores = [] # dq-scores per cell type and image
272
+ cell_type_sq_scores = [] # sq-scores per cell type and image
273
+ tissue_pred = [] # tissue predictions for each image
274
+ tissue_gt = [] # ground truth tissue image class
275
+ tissue_types_inf = [] # string repr of ground truth tissue image class
276
+
277
+ paired_all_global = [] # unique matched index pair
278
+ unpaired_true_all_global = (
279
+ []
280
+ ) # the index must exist in `true_inst_type_all` and unique
281
+ unpaired_pred_all_global = (
282
+ []
283
+ ) # the index must exist in `pred_inst_type_all` and unique
284
+ true_inst_type_all_global = [] # each index is 1 independent data point
285
+ pred_inst_type_all_global = [] # each index is 1 independent data point
286
+
287
+ # for detections scores
288
+ true_idx_offset = 0
289
+ pred_idx_offset = 0
290
+
291
+ inference_loop = tqdm.tqdm(
292
+ enumerate(inference_dataloader), total=len(inference_dataloader)
293
+ )
294
+
295
+ with torch.no_grad():
296
+ for batch_idx, batch in inference_loop:
297
+ batch_metrics = self.inference_step(
298
+ model, batch, generate_plots=generate_plots
299
+ )
300
+ # unpack batch_metrics
301
+ image_names = image_names + batch_metrics["image_names"]
302
+
303
+ # dice scores
304
+ binary_dice_scores = (
305
+ binary_dice_scores + batch_metrics["binary_dice_scores"]
306
+ )
307
+ binary_jaccard_scores = (
308
+ binary_jaccard_scores + batch_metrics["binary_jaccard_scores"]
309
+ )
310
+
311
+ # pq scores
312
+ pq_scores = pq_scores + batch_metrics["pq_scores"]
313
+ dq_scores = dq_scores + batch_metrics["dq_scores"]
314
+ sq_scores = sq_scores + batch_metrics["sq_scores"]
315
+ tissue_types_inf = tissue_types_inf + batch_metrics["tissue_types"]
316
+ cell_type_pq_scores = (
317
+ cell_type_pq_scores + batch_metrics["cell_type_pq_scores"]
318
+ )
319
+ cell_type_dq_scores = (
320
+ cell_type_dq_scores + batch_metrics["cell_type_dq_scores"]
321
+ )
322
+ cell_type_sq_scores = (
323
+ cell_type_sq_scores + batch_metrics["cell_type_sq_scores"]
324
+ )
325
+ tissue_pred.append(batch_metrics["tissue_pred"])
326
+ tissue_gt.append(batch_metrics["tissue_gt"])
327
+
328
+ # detection scores
329
+ true_idx_offset = (
330
+ true_idx_offset + true_inst_type_all_global[-1].shape[0]
331
+ if batch_idx != 0
332
+ else 0
333
+ )
334
+ pred_idx_offset = (
335
+ pred_idx_offset + pred_inst_type_all_global[-1].shape[0]
336
+ if batch_idx != 0
337
+ else 0
338
+ )
339
+ true_inst_type_all_global.append(batch_metrics["true_inst_type_all"])
340
+ pred_inst_type_all_global.append(batch_metrics["pred_inst_type_all"])
341
+ # increment the pairing index statistic
342
+ batch_metrics["paired_all"][:, 0] += true_idx_offset
343
+ batch_metrics["paired_all"][:, 1] += pred_idx_offset
344
+ paired_all_global.append(batch_metrics["paired_all"])
345
+
346
+ batch_metrics["unpaired_true_all"] += true_idx_offset
347
+ batch_metrics["unpaired_pred_all"] += pred_idx_offset
348
+ unpaired_true_all_global.append(batch_metrics["unpaired_true_all"])
349
+ unpaired_pred_all_global.append(batch_metrics["unpaired_pred_all"])
350
+
351
+ # assemble batches to datasets (global)
352
+ tissue_types_inf = [t.lower() for t in tissue_types_inf]
353
+
354
+ paired_all = np.concatenate(paired_all_global, axis=0)
355
+ unpaired_true_all = np.concatenate(unpaired_true_all_global, axis=0)
356
+ unpaired_pred_all = np.concatenate(unpaired_pred_all_global, axis=0)
357
+ true_inst_type_all = np.concatenate(true_inst_type_all_global, axis=0)
358
+ pred_inst_type_all = np.concatenate(pred_inst_type_all_global, axis=0)
359
+ paired_true_type = true_inst_type_all[paired_all[:, 0]]
360
+ paired_pred_type = pred_inst_type_all[paired_all[:, 1]]
361
+ unpaired_true_type = true_inst_type_all[unpaired_true_all]
362
+ unpaired_pred_type = pred_inst_type_all[unpaired_pred_all]
363
+
364
+ binary_dice_scores = np.array(binary_dice_scores)
365
+ binary_jaccard_scores = np.array(binary_jaccard_scores)
366
+ pq_scores = np.array(pq_scores)
367
+ dq_scores = np.array(dq_scores)
368
+ sq_scores = np.array(sq_scores)
369
+
370
+ tissue_detection_accuracy = accuracy_score(
371
+ y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred)
372
+ )
373
+ f1_d, prec_d, rec_d = cell_detection_scores(
374
+ paired_true=paired_true_type,
375
+ paired_pred=paired_pred_type,
376
+ unpaired_true=unpaired_true_type,
377
+ unpaired_pred=unpaired_pred_type,
378
+ )
379
+ dataset_metrics = {
380
+ "Binary-Cell-Dice-Mean": float(np.nanmean(binary_dice_scores)),
381
+ "Binary-Cell-Jacard-Mean": float(np.nanmean(binary_jaccard_scores)),
382
+ "Tissue-Multiclass-Accuracy": tissue_detection_accuracy,
383
+ "bPQ": float(np.nanmean(pq_scores)),
384
+ "bDQ": float(np.nanmean(dq_scores)),
385
+ "bSQ": float(np.nanmean(sq_scores)),
386
+ "mPQ": float(np.nanmean([np.nanmean(pq) for pq in cell_type_pq_scores])),
387
+ "mDQ": float(np.nanmean([np.nanmean(dq) for dq in cell_type_dq_scores])),
388
+ "mSQ": float(np.nanmean([np.nanmean(sq) for sq in cell_type_sq_scores])),
389
+ "f1_detection": float(f1_d),
390
+ "precision_detection": float(prec_d),
391
+ "recall_detection": float(rec_d),
392
+ }
393
+
394
+ # calculate tissue metrics
395
+ tissue_types = dataset_config["tissue_types"]
396
+ tissue_metrics = {}
397
+ for tissue in tissue_types.keys():
398
+ tissue = tissue.lower()
399
+ tissue_ids = np.where(np.asarray(tissue_types_inf) == tissue)
400
+ tissue_metrics[f"{tissue}"] = {}
401
+ tissue_metrics[f"{tissue}"]["Dice"] = float(
402
+ np.nanmean(binary_dice_scores[tissue_ids])
403
+ )
404
+ tissue_metrics[f"{tissue}"]["Jaccard"] = float(
405
+ np.nanmean(binary_jaccard_scores[tissue_ids])
406
+ )
407
+ tissue_metrics[f"{tissue}"]["mPQ"] = float(
408
+ np.nanmean(
409
+ [np.nanmean(pq) for pq in np.array(cell_type_pq_scores)[tissue_ids]]
410
+ )
411
+ )
412
+ tissue_metrics[f"{tissue}"]["bPQ"] = float(
413
+ np.nanmean(pq_scores[tissue_ids])
414
+ )
415
+
416
+ # calculate nuclei metrics
417
+ nuclei_types = dataset_config["nuclei_types"]
418
+ nuclei_metrics_d = {}
419
+ nuclei_metrics_pq = {}
420
+ nuclei_metrics_dq = {}
421
+ nuclei_metrics_sq = {}
422
+ for nuc_name, nuc_type in nuclei_types.items():
423
+ if nuc_name.lower() == "background":
424
+ continue
425
+ nuclei_metrics_pq[nuc_name] = np.nanmean(
426
+ [pq[nuc_type] for pq in cell_type_pq_scores]
427
+ )
428
+ nuclei_metrics_dq[nuc_name] = np.nanmean(
429
+ [dq[nuc_type] for dq in cell_type_dq_scores]
430
+ )
431
+ nuclei_metrics_sq[nuc_name] = np.nanmean(
432
+ [sq[nuc_type] for sq in cell_type_sq_scores]
433
+ )
434
+ f1_cell, prec_cell, rec_cell = cell_type_detection_scores(
435
+ paired_true_type,
436
+ paired_pred_type,
437
+ unpaired_true_type,
438
+ unpaired_pred_type,
439
+ nuc_type,
440
+ )
441
+ nuclei_metrics_d[nuc_name] = {
442
+ "f1_cell": f1_cell,
443
+ "prec_cell": prec_cell,
444
+ "rec_cell": rec_cell,
445
+ }
446
+
447
+ # print final results
448
+ # binary
449
+ self.logger.info(f"{20*'*'} Binary Dataset metrics {20*'*'}")
450
+ [self.logger.info(f"{f'{k}:': <25} {v}") for k, v in dataset_metrics.items()]
451
+ # tissue -> the PQ values are bPQ values -> what about mBQ?
452
+ self.logger.info(f"{20*'*'} Tissue metrics {20*'*'}")
453
+ flattened_tissue = []
454
+ for key in tissue_metrics:
455
+ flattened_tissue.append(
456
+ [
457
+ key,
458
+ tissue_metrics[key]["Dice"],
459
+ tissue_metrics[key]["Jaccard"],
460
+ tissue_metrics[key]["mPQ"],
461
+ tissue_metrics[key]["bPQ"],
462
+ ]
463
+ )
464
+ self.logger.info(
465
+ tabulate(
466
+ flattened_tissue, headers=["Tissue", "Dice", "Jaccard", "mPQ", "bPQ"]
467
+ )
468
+ )
469
+ # nuclei types
470
+ self.logger.info(f"{20*'*'} Nuclei Type Metrics {20*'*'}")
471
+ flattened_nuclei_type = []
472
+ for key in nuclei_metrics_pq:
473
+ flattened_nuclei_type.append(
474
+ [
475
+ key,
476
+ nuclei_metrics_dq[key],
477
+ nuclei_metrics_sq[key],
478
+ nuclei_metrics_pq[key],
479
+ ]
480
+ )
481
+ self.logger.info(
482
+ tabulate(flattened_nuclei_type, headers=["Nuclei Type", "DQ", "SQ", "PQ"])
483
+ )
484
+ # nuclei detection metrics
485
+ self.logger.info(f"{20*'*'} Nuclei Detection Metrics {20*'*'}")
486
+ flattened_detection = []
487
+ for key in nuclei_metrics_d:
488
+ flattened_detection.append(
489
+ [
490
+ key,
491
+ nuclei_metrics_d[key]["prec_cell"],
492
+ nuclei_metrics_d[key]["rec_cell"],
493
+ nuclei_metrics_d[key]["f1_cell"],
494
+ ]
495
+ )
496
+ self.logger.info(
497
+ tabulate(
498
+ flattened_detection,
499
+ headers=["Nuclei Type", "Precision", "Recall", "F1"],
500
+ )
501
+ )
502
+
503
+ # save all folds
504
+ image_metrics = {}
505
+ for idx, image_name in enumerate(image_names):
506
+ image_metrics[image_name] = {
507
+ "Dice": float(binary_dice_scores[idx]),
508
+ "Jaccard": float(binary_jaccard_scores[idx]),
509
+ "bPQ": float(pq_scores[idx]),
510
+ }
511
+ all_metrics = {
512
+ "dataset": dataset_metrics,
513
+ "tissue_metrics": tissue_metrics,
514
+ "image_metrics": image_metrics,
515
+ "nuclei_metrics_pq": nuclei_metrics_pq,
516
+ "nuclei_metrics_d": nuclei_metrics_d,
517
+ }
518
+
519
+ # saving
520
+ with open(str(self.run_dir / "inference_results.json"), "w") as outfile:
521
+ json.dump(all_metrics, outfile, indent=2)
522
+
523
+ def inference_step(
524
+ self,
525
+ model: CellViT,
526
+ batch: tuple,
527
+ generate_plots: bool = False,
528
+ ) -> None:
529
+ """Inference step for a patch-wise batch
530
+
531
+ Args:
532
+ model (CellViT): Model to use for inference
533
+ batch (tuple): Batch with the following structure:
534
+ * Images (torch.Tensor)
535
+ * Masks (dict)
536
+ * Tissue types as str
537
+ * Image name as str
538
+ generate_plots (bool, optional): If inference plots should be generated. Defaults to False.
539
+ """
540
+ # unpack batch, for shape compare train_step method
541
+ imgs = batch[0].to(self.device)
542
+ masks = batch[1]
543
+ tissue_types = list(batch[2])
544
+ image_names = list(batch[3])
545
+
546
+ model.zero_grad()
547
+ if self.mixed_precision:
548
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
549
+ predictions = model.forward(imgs)
550
+ else:
551
+ predictions = model.forward(imgs)
552
+ predictions = self.unpack_predictions(predictions=predictions, model=model)
553
+ gt = self.unpack_masks(masks=masks, tissue_types=tissue_types, model=model)
554
+
555
+ # scores
556
+ batch_metrics, scores = self.calculate_step_metric(predictions, gt, image_names)
557
+ batch_metrics["tissue_types"] = tissue_types
558
+ if generate_plots:
559
+ self.plot_results(
560
+ imgs=imgs,
561
+ predictions=predictions,
562
+ ground_truth=gt,
563
+ img_names=image_names,
564
+ num_nuclei_classes=self.num_classes,
565
+ outdir=Path(self.run_dir / "inference_predictions"),
566
+ scores=scores,
567
+ )
568
+
569
+ return batch_metrics
570
+
571
+ def run_single_image_inference( self, model: CellViT, image: np.ndarray, generate_plots: bool = True, ) -> None:
572
+
573
+
574
+
575
+ # set image transforms
576
+ transform_settings = self.run_conf["transformations"]
577
+ if "normalize" in transform_settings:
578
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
579
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
580
+ else:
581
+ mean = (0.5, 0.5, 0.5)
582
+ std = (0.5, 0.5, 0.5)
583
+ transforms = A.Compose([A.Normalize(mean=mean, std=std)])
584
+
585
+ transformed_img = transforms(image=image)["image"]
586
+ image = torch.from_numpy(transformed_img).permute(2, 0, 1).unsqueeze(0).float()
587
+ imgs = image.to(self.device)
588
+
589
+ model.zero_grad()
590
+ predictions = model.forward(imgs)
591
+ predictions = self.unpack_predictions(predictions=predictions, model=model)
592
+
593
+
594
+
595
+ image_output = self.plot_results(
596
+ imgs=imgs,
597
+ predictions=predictions,
598
+ num_nuclei_classes=self.num_classes,
599
+ outdir=Path(self.run_dir),
600
+ )
601
+
602
+ return image_output
603
+
604
+
605
+
606
+
607
+ def unpack_predictions(
608
+ self, predictions: dict, model: CellViT
609
+ ) -> DataclassHVStorage:
610
+ """Unpack the given predictions. Main focus lays on reshaping and postprocessing predictions, e.g. separating instances
611
+
612
+ Args:
613
+ predictions (dict): Dictionary with the following keys:
614
+ * tissue_types: Logit tissue prediction output. Shape: (batch_size, num_tissue_classes)
615
+ * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (batch_size, H, W, 2)
616
+ * hv_map: Logit output for hv-prediction. Shape: (batch_size, H, W, 2)
617
+ * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (batch_size, num_nuclei_classes, H, W)
618
+ model (CellViT): Current model
619
+
620
+ Returns:
621
+ DataclassHVStorage: Processed network output
622
+
623
+ """
624
+ predictions["tissue_types"] = predictions["tissue_types"].to(self.device)
625
+ predictions["nuclei_binary_map"] = F.softmax(
626
+ predictions["nuclei_binary_map"], dim=1
627
+ ) # shape: (batch_size, 2, H, W)
628
+ predictions["nuclei_type_map"] = F.softmax(
629
+ predictions["nuclei_type_map"], dim=1
630
+ ) # shape: (batch_size, num_nuclei_classes, H, W)
631
+ (
632
+ predictions["instance_map"],
633
+ predictions["instance_types"],
634
+ ) = model.calculate_instance_map(
635
+ predictions, magnification=self.magnification
636
+ ) # shape: (batch_size, H', W')
637
+ predictions["instance_types_nuclei"] = model.generate_instance_nuclei_map(
638
+ predictions["instance_map"], predictions["instance_types"]
639
+ ).permute(0, 3, 1, 2).to(
640
+ self.device
641
+ ) # shape: (batch_size, num_nuclei_classes, H, W) change
642
+ predictions = DataclassHVStorage(
643
+ nuclei_binary_map=predictions["nuclei_binary_map"], #[64, 2, 256, 256]
644
+ hv_map=predictions["hv_map"], #[64, 2, 256, 256]
645
+ nuclei_type_map=predictions["nuclei_type_map"], #[64, 6, 256, 256]
646
+ tissue_types=predictions["tissue_types"], #[64,19]
647
+ instance_map=predictions["instance_map"], #[64, 256, 256]
648
+ instance_types=predictions["instance_types"], #list of 64 tensors, each tensor is [256,256]
649
+ instance_types_nuclei=predictions["instance_types_nuclei"], #[64,256,256,6]
650
+ batch_size=predictions["tissue_types"].shape[0],#64
651
+ )
652
+
653
+ return predictions
654
+
655
+ def unpack_masks(
656
+ self, masks: dict, tissue_types: list, model: CellViT
657
+ ) -> DataclassHVStorage:
658
+ # get ground truth values, perform one hot encoding for segmentation maps
659
+ gt_nuclei_binary_map_onehot = (
660
+ F.one_hot(masks["nuclei_binary_map"], num_classes=2)
661
+ ).type(
662
+ torch.float32
663
+ ) # background, nuclei #[64, 256,256,2]
664
+ nuclei_type_maps = torch.squeeze(masks["nuclei_type_map"]).type(torch.int64) #[64,256,256]
665
+ gt_nuclei_type_maps_onehot = F.one_hot(
666
+ nuclei_type_maps, num_classes=self.num_classes
667
+ ).type(
668
+ torch.float32
669
+ ) # background + nuclei types [64, 256, 256, 6]
670
+
671
+ # assemble ground truth dictionary
672
+ gt = {
673
+ "nuclei_type_map": gt_nuclei_type_maps_onehot.permute(0, 3, 1, 2).to(
674
+ self.device
675
+ ), # shape: (batch_size, H, W, num_nuclei_classes) #[64,256,256,6] ->[64,6,256,256]
676
+ "nuclei_binary_map": gt_nuclei_binary_map_onehot.permute(0, 3, 1, 2).to(
677
+ self.device
678
+ ), # shape: (batch_size, H, W, 2) #[64,256,256,2] ->[64,2,256,256]
679
+ "hv_map": masks["hv_map"].to(self.device), # shape: (batch_size, H, W, 2)原来的是错的 [64, 2, 256, 256]
680
+ "instance_map": masks["instance_map"].to(
681
+ self.device
682
+ ), # shape: (batch_size, H, W) -> each instance has one integer (64,256,256)
683
+ "instance_types_nuclei": (
684
+ gt_nuclei_type_maps_onehot * masks["instance_map"][..., None]
685
+ )
686
+ .permute(0, 3, 1, 2)
687
+ .to(
688
+ self.device
689
+ ), # shape: (batch_size, num_nuclei_classes, H, W) -> instance has one integer, for each nuclei class (64,256,256,6)
690
+ "tissue_types": torch.Tensor(
691
+ [self.dataset_config["tissue_types"][t] for t in tissue_types]
692
+ )
693
+ .type(torch.LongTensor)
694
+ .to(self.device), # shape: batch_size 64
695
+ }
696
+ gt["instance_types"] = calculate_instances(
697
+ gt["nuclei_type_map"], gt["instance_map"]
698
+ )
699
+ gt = DataclassHVStorage(**gt, batch_size=gt["tissue_types"].shape[0])
700
+ return gt
701
+
702
+ def calculate_step_metric(
703
+ self,
704
+ predictions: DataclassHVStorage,
705
+ gt: DataclassHVStorage,
706
+ image_names: List[str],
707
+ ) -> Tuple[dict, list]:
708
+ """Calculate the metrics for the validation step
709
+
710
+ Args:
711
+ predictions (DataclassHVStorage): Processed network output
712
+ gt (DataclassHVStorage): Ground truth values
713
+ image_names (list(str)): List with image names
714
+
715
+ Returns:
716
+ Tuple[dict, list]:
717
+ * dict: Dictionary with metrics. Structure not fixed yet
718
+ * list with cell_dice, cell_jaccard and pq for each image
719
+ """
720
+ predictions = predictions.get_dict()
721
+ gt = gt.get_dict()
722
+
723
+ # preparation and device movement
724
+ predictions["tissue_types_classes"] = F.softmax(
725
+ predictions["tissue_types"], dim=-1
726
+ )
727
+ pred_tissue = (
728
+ torch.argmax(predictions["tissue_types_classes"], dim=-1)
729
+ .detach()
730
+ .cpu()
731
+ .numpy()
732
+ .astype(np.uint8)
733
+ )
734
+ predictions["instance_map"] = predictions["instance_map"].detach().cpu()
735
+ predictions["instance_types_nuclei"] = (
736
+ predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
737
+ ) # shape: (batch_size, num_nuclei_classes, H, W) [64,256,256,6]
738
+ instance_maps_gt = gt["instance_map"].detach().cpu() #[64,256,256]
739
+ gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8)
740
+ gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type(
741
+ torch.uint8
742
+ )
743
+ gt["instance_types_nuclei"] = (
744
+ gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
745
+ ) # shape: (batch_size, num_nuclei_classes, H, W) [64,6,256,256] ################################与前面的predictions的形状不同
746
+
747
+ # segmentation scores
748
+ binary_dice_scores = [] # binary dice scores per image
749
+ binary_jaccard_scores = [] # binary jaccard scores per image
750
+ pq_scores = [] # pq-scores per image
751
+ dq_scores = [] # dq-scores per image
752
+ sq_scores = [] # sq_scores per image
753
+ cell_type_pq_scores = [] # pq-scores per cell type and image
754
+ cell_type_dq_scores = [] # dq-scores per cell type and image
755
+ cell_type_sq_scores = [] # sq-scores per cell type and image
756
+ scores = [] # all scores in one list
757
+
758
+ # detection scores
759
+ paired_all = [] # unique matched index pair
760
+ unpaired_true_all = (
761
+ []
762
+ ) # the index must exist in `true_inst_type_all` and unique
763
+ unpaired_pred_all = (
764
+ []
765
+ ) # the index must exist in `pred_inst_type_all` and unique
766
+ true_inst_type_all = [] # each index is 1 independent data point
767
+ pred_inst_type_all = [] # each index is 1 independent data point
768
+
769
+ # for detections scores
770
+ true_idx_offset = 0
771
+ pred_idx_offset = 0
772
+
773
+ for i in range(len(pred_tissue)):
774
+ # binary dice score: Score for cell detection per image, without background
775
+ pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0)
776
+ target_binary_map = gt["nuclei_binary_map"][i]
777
+ cell_dice = (
778
+ dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0)
779
+ .detach()
780
+ .cpu()
781
+ )
782
+ binary_dice_scores.append(float(cell_dice))
783
+
784
+ # binary aji
785
+ cell_jaccard = (
786
+ binary_jaccard_index(
787
+ preds=pred_binary_map,
788
+ target=target_binary_map,
789
+ )
790
+ .detach()
791
+ .cpu()
792
+ )
793
+ binary_jaccard_scores.append(float(cell_jaccard))
794
+
795
+ # pq values
796
+ if len(np.unique(instance_maps_gt[i])) == 1:
797
+ dq, sq, pq = np.nan, np.nan, np.nan
798
+ else:
799
+ remapped_instance_pred = binarize(
800
+ predictions["instance_types_nuclei"][i][1:].transpose(1, 2, 0)
801
+ ) #(256,6)
802
+ remapped_gt = remap_label(instance_maps_gt[i]) #(256,256)
803
+ # remapped_instance_pred = binarize(predictions["instance_types_nuclei"][i].transpose(2,1,0)[1:]) #[64,256,256,6]
804
+
805
+ [dq, sq, pq], _ = get_fast_pq(
806
+ true=remapped_gt, pred=remapped_instance_pred
807
+ ) #(256,256) (256,256) true是instance map,在这里true的形状应该是真实的实例图,pred是预测的实例图,形状应该相等,都为(256,256)
808
+ pq_scores.append(pq)
809
+ dq_scores.append(dq)
810
+ sq_scores.append(sq)
811
+ scores.append(
812
+ [
813
+ cell_dice.detach().cpu().numpy(),
814
+ cell_jaccard.detach().cpu().numpy(),
815
+ pq,
816
+ ]
817
+ )
818
+
819
+ # pq values per class (with class 0 beeing background -> should be skipped in the future)
820
+ nuclei_type_pq = []
821
+ nuclei_type_dq = []
822
+ nuclei_type_sq = []
823
+ for j in range(0, self.num_classes):
824
+ pred_nuclei_instance_class = remap_label(
825
+ predictions["instance_types_nuclei"][i][j, ...]
826
+ )
827
+ target_nuclei_instance_class = remap_label(
828
+ gt["instance_types_nuclei"][i][j, ...]
829
+ )
830
+
831
+ # if ground truth is empty, skip from calculation
832
+ if len(np.unique(target_nuclei_instance_class)) == 1:
833
+ pq_tmp = np.nan
834
+ dq_tmp = np.nan
835
+ sq_tmp = np.nan
836
+ else:
837
+ [dq_tmp, sq_tmp, pq_tmp], _ = get_fast_pq(
838
+ pred_nuclei_instance_class,
839
+ target_nuclei_instance_class,
840
+ match_iou=0.5,
841
+ )
842
+ nuclei_type_pq.append(pq_tmp)
843
+ nuclei_type_dq.append(dq_tmp)
844
+ nuclei_type_sq.append(sq_tmp)
845
+
846
+ # detection scores
847
+ true_centroids = np.array(
848
+ [v["centroid"] for k, v in gt["instance_types"][i].items()]
849
+ )
850
+ true_instance_type = np.array(
851
+ [v["type"] for k, v in gt["instance_types"][i].items()]
852
+ )
853
+ pred_centroids = np.array(
854
+ [v["centroid"] for k, v in predictions["instance_types"][i].items()]
855
+ )
856
+ pred_instance_type = np.array(
857
+ [v["type"] for k, v in predictions["instance_types"][i].items()]
858
+ )
859
+
860
+ if true_centroids.shape[0] == 0:
861
+ true_centroids = np.array([[0, 0]])
862
+ true_instance_type = np.array([0])
863
+ if pred_centroids.shape[0] == 0:
864
+ pred_centroids = np.array([[0, 0]])
865
+ pred_instance_type = np.array([0])
866
+ if self.magnification == 40:
867
+ pairing_radius = 12
868
+ else:
869
+ pairing_radius = 6
870
+ paired, unpaired_true, unpaired_pred = pair_coordinates(
871
+ true_centroids, pred_centroids, pairing_radius
872
+ )
873
+ true_idx_offset = (
874
+ true_idx_offset + true_inst_type_all[-1].shape[0] if i != 0 else 0
875
+ )
876
+ pred_idx_offset = (
877
+ pred_idx_offset + pred_inst_type_all[-1].shape[0] if i != 0 else 0
878
+ )
879
+ true_inst_type_all.append(true_instance_type)
880
+ pred_inst_type_all.append(pred_instance_type)
881
+
882
+ # increment the pairing index statistic
883
+ if paired.shape[0] != 0: # ! sanity
884
+ paired[:, 0] += true_idx_offset
885
+ paired[:, 1] += pred_idx_offset
886
+ paired_all.append(paired)
887
+
888
+ unpaired_true += true_idx_offset
889
+ unpaired_pred += pred_idx_offset
890
+ unpaired_true_all.append(unpaired_true)
891
+ unpaired_pred_all.append(unpaired_pred)
892
+
893
+ cell_type_pq_scores.append(nuclei_type_pq)
894
+ cell_type_dq_scores.append(nuclei_type_dq)
895
+ cell_type_sq_scores.append(nuclei_type_sq)
896
+
897
+ paired_all = np.concatenate(paired_all, axis=0)
898
+ unpaired_true_all = np.concatenate(unpaired_true_all, axis=0)
899
+ unpaired_pred_all = np.concatenate(unpaired_pred_all, axis=0)
900
+ true_inst_type_all = np.concatenate(true_inst_type_all, axis=0)
901
+ pred_inst_type_all = np.concatenate(pred_inst_type_all, axis=0)
902
+
903
+ batch_metrics = {
904
+ "image_names": image_names,
905
+ "binary_dice_scores": binary_dice_scores,
906
+ "binary_jaccard_scores": binary_jaccard_scores,
907
+ "pq_scores": pq_scores,
908
+ "dq_scores": dq_scores,
909
+ "sq_scores": sq_scores,
910
+ "cell_type_pq_scores": cell_type_pq_scores,
911
+ "cell_type_dq_scores": cell_type_dq_scores,
912
+ "cell_type_sq_scores": cell_type_sq_scores,
913
+ "tissue_pred": pred_tissue,
914
+ "tissue_gt": gt["tissue_types"],
915
+ "paired_all": paired_all,
916
+ "unpaired_true_all": unpaired_true_all,
917
+ "unpaired_pred_all": unpaired_pred_all,
918
+ "true_inst_type_all": true_inst_type_all,
919
+ "pred_inst_type_all": pred_inst_type_all,
920
+ }
921
+
922
+ return batch_metrics, scores
923
+
924
+ def plot_results(
925
+ self,
926
+ imgs: Union[torch.Tensor, np.ndarray],
927
+ predictions: dict,
928
+ num_nuclei_classes: int,
929
+ outdir: Union[Path, str],
930
+ ) -> None:
931
+ # TODO: Adapt Docstring and function, currently not working with our shape
932
+ """Generate example plot with image, binary_pred, hv-map and instance map from prediction and ground-truth
933
+
934
+ Args:
935
+ imgs (Union[torch.Tensor, np.ndarray]): Images to process, a random number (num_images) is selected from this stack
936
+ Shape: (batch_size, 3, H', W')
937
+ predictions (dict): Predictions of models. Keys:
938
+ "nuclei_type_map": Shape: (batch_size, H', W', num_nuclei)
939
+ "nuclei_binary_map": Shape: (batch_size, H', W', 2)
940
+ "hv_map": Shape: (batch_size, H', W', 2)
941
+ "instance_map": Shape: (batch_size, H', W')
942
+ ground_truth (dict): Ground truth values. Keys:
943
+ "nuclei_type_map": Shape: (batch_size, H', W', num_nuclei)
944
+ "nuclei_binary_map": Shape: (batch_size, H', W', 2)
945
+ "hv_map": Shape: (batch_size, H', W', 2)
946
+ "instance_map": Shape: (batch_size, H', W')
947
+ img_names (List): Names of images as list
948
+ num_nuclei_classes (int): Number of total nuclei classes including background
949
+ outdir (Union[Path, str]): Output directory where images should be stored
950
+ scores (List[List[float]], optional): List with scores for each image.
951
+ Each list entry is a list with 3 scores: Dice, Jaccard and bPQ for the image.
952
+ Defaults to None.
953
+ """
954
+ outdir = Path(outdir)
955
+ outdir.mkdir(exist_ok=True, parents=True)
956
+
957
+ # permute for gt and predictions
958
+ predictions.hv_map = predictions.hv_map.permute(0, 2, 3, 1)
959
+ predictions.nuclei_binary_map = predictions.nuclei_binary_map.permute(0, 2, 3, 1)
960
+ predictions.nuclei_type_map = predictions.nuclei_type_map.permute(0, 2, 3, 1)
961
+
962
+ h = predictions.hv_map.shape[1]
963
+ w = predictions.hv_map.shape[2]
964
+
965
+ # convert to rgb and crop to selection
966
+ sample_images = (
967
+ imgs.permute(0, 2, 3, 1).contiguous().cpu().numpy()
968
+ ) # convert to rgb
969
+ sample_images = cropping_center(sample_images, (h, w), True)
970
+
971
+ pred_sample_binary_map = (
972
+ predictions.nuclei_binary_map[:, :, :, 1].detach().cpu().numpy()
973
+ )
974
+ pred_sample_hv_map = predictions.hv_map.detach().cpu().numpy()
975
+ pred_sample_instance_maps = predictions.instance_map.detach().cpu().numpy()
976
+ pred_sample_type_maps = (
977
+ torch.argmax(predictions.nuclei_type_map, dim=-1).detach().cpu().numpy()
978
+ )
979
+
980
+ # create colormaps
981
+ hv_cmap = plt.get_cmap("jet")
982
+ binary_cmap = plt.get_cmap("jet")
983
+ instance_map = plt.get_cmap("viridis")
984
+ cell_colors = ["#ffffff", "#ff0000", "#00ff00", "#1e00ff", "#feff00", "#ffbf00"]
985
+
986
+ # invert the normalization of the sample images
987
+ transform_settings = self.run_conf["transformations"]
988
+ if "normalize" in transform_settings:
989
+ mean = transform_settings["normalize"].get("mean", (0.5, 0.5, 0.5))
990
+ std = transform_settings["normalize"].get("std", (0.5, 0.5, 0.5))
991
+ else:
992
+ mean = (0.5, 0.5, 0.5)
993
+ std = (0.5, 0.5, 0.5)
994
+ inv_normalize = transforms.Normalize(
995
+ mean=[-0.5 / mean[0], -0.5 / mean[1], -0.5 / mean[2]],
996
+ std=[1 / std[0], 1 / std[1], 1 / std[2]],
997
+ )
998
+ inv_samples = inv_normalize(torch.tensor(sample_images).permute(0, 3, 1, 2))
999
+ sample_images = inv_samples.permute(0, 2, 3, 1).detach().cpu().numpy()
1000
+
1001
+ for i in range(len(imgs)):
1002
+ fig, axs = plt.subplots(figsize=(6, 2), dpi=300)
1003
+ placeholder = np.zeros((h, 7 * w, 3))
1004
+ # orig image
1005
+ placeholder[:h, :w, :3] = sample_images[i]
1006
+ # binary prediction
1007
+ placeholder[: h, w : 2 * w, :3] = rgba2rgb(
1008
+ binary_cmap(pred_sample_binary_map[i])
1009
+ ) # *255?
1010
+ # hv maps
1011
+ placeholder[: h, 2 * w : 3 * w, :3] = rgba2rgb(
1012
+ hv_cmap((pred_sample_hv_map[i, :, :, 0] + 1) / 2)
1013
+ )
1014
+ placeholder[: h, 3 * w : 4 * w, :3] = rgba2rgb(
1015
+ hv_cmap((pred_sample_hv_map[i, :, :, 1] + 1) / 2)
1016
+ )
1017
+ # instance_predictions
1018
+ placeholder[: h, 4 * w : 5 * w, :3] = rgba2rgb(
1019
+ instance_map(
1020
+ (
1021
+ pred_sample_instance_maps[i]
1022
+ - np.min(pred_sample_instance_maps[i])
1023
+ )
1024
+ / (
1025
+ np.max(pred_sample_instance_maps[i])
1026
+ - np.min(pred_sample_instance_maps[i] + 1e-10)
1027
+ )
1028
+ )
1029
+ )
1030
+ # type_predictions
1031
+ placeholder[: h, 5 * w : 6 * w, :3] = rgba2rgb(
1032
+ binary_cmap(pred_sample_type_maps[i] / num_nuclei_classes)
1033
+ )
1034
+
1035
+ # contours
1036
+ # pred
1037
+ pred_contours_polygon = [
1038
+ v["contour"] for v in predictions.instance_types[i].values()
1039
+ ]
1040
+ pred_contours_polygon = [
1041
+ list(zip(poly[:, 0], poly[:, 1])) for poly in pred_contours_polygon
1042
+ ]
1043
+ pred_contour_colors_polygon = [
1044
+ cell_colors[v["type"]]
1045
+ for v in predictions.instance_types[i].values()
1046
+ ]
1047
+ pred_cell_image = Image.fromarray(
1048
+ (sample_images[i] * 255).astype(np.uint8)
1049
+ ).convert("RGB")
1050
+ pred_drawing = ImageDraw.Draw(pred_cell_image)
1051
+ add_patch = lambda poly, color: pred_drawing.polygon(
1052
+ poly, outline=color, width=2
1053
+ )
1054
+ [
1055
+ add_patch(poly, c)
1056
+ for poly, c in zip(pred_contours_polygon, pred_contour_colors_polygon)
1057
+ ]
1058
+ pred_cell_image.save("raw_pred.png")
1059
+ placeholder[: h, 6 * w : 7 * w, :3] = (
1060
+ np.asarray(pred_cell_image) / 255
1061
+ )
1062
+
1063
+ # plotting
1064
+ axs.imshow(placeholder)
1065
+ axs.set_xticks(np.arange(w / 2, 7 * w, w))
1066
+ axs.set_xticklabels(
1067
+ [
1068
+ "Image",
1069
+ "Binary-Cells",
1070
+ "HV-Map-0",
1071
+ "HV-Map-1",
1072
+ "Instances",
1073
+ "Nuclei-Pred",
1074
+ "Countours",
1075
+ ],
1076
+ fontsize=6,
1077
+ )
1078
+ axs.xaxis.tick_top()
1079
+
1080
+ axs.set_yticks([ h /2 ])
1081
+ axs.set_yticklabels(["Pred."], fontsize=6)
1082
+ axs.tick_params(axis="both", which="both", length=0)
1083
+ grid_x = np.arange(w, 6 * w, w)
1084
+ grid_y = np.arange(h, 2 * h, h)
1085
+
1086
+ for x_seg in grid_x:
1087
+ axs.axvline(x_seg, color="black")
1088
+ for y_seg in grid_y:
1089
+ axs.axhline(y_seg, color="black")
1090
+
1091
+ fig.suptitle(f"Predictions for input image")
1092
+ fig.tight_layout()
1093
+ fig.savefig("pred_img.png")
1094
+ plt.close()
1095
+
1096
+
1097
+ # CLI
1098
+ class InferenceCellViTParser:
1099
+ def __init__(self) -> None:
1100
+ parser = argparse.ArgumentParser(
1101
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter,
1102
+ description="Perform CellViT inference for given run-directory with model checkpoints and logs",
1103
+ )
1104
+
1105
+ parser.add_argument(
1106
+ "--run_dir",
1107
+ type=str,
1108
+ help="Logging directory of a training run.",
1109
+ default="./",
1110
+ )
1111
+ parser.add_argument(
1112
+ "--checkpoint_name",
1113
+ type=str,
1114
+ help="Name of the checkpoint. Either select 'best_checkpoint.pth',"
1115
+ "'latest_checkpoint.pth' or one of the intermediate checkpoint names,"
1116
+ "e.g., 'checkpoint_100.pth'",
1117
+ default="model_best.pth",
1118
+ )
1119
+ parser.add_argument(
1120
+ "--gpu", type=int, help="Cuda-GPU ID for inference", default=0
1121
+ )
1122
+ parser.add_argument(
1123
+ "--magnification",
1124
+ type=int,
1125
+ help="Dataset Magnification. Either 20 or 40. Default: 40",
1126
+ choices=[20, 40],
1127
+ default=40,
1128
+ )
1129
+ parser.add_argument(
1130
+ "--plots",
1131
+ action="store_true",
1132
+ help="Generate inference plots in run_dir",
1133
+ default=True,
1134
+ )
1135
+
1136
+ self.parser = parser
1137
+
1138
+ def parse_arguments(self) -> dict:
1139
+ opt = self.parser.parse_args()
1140
+ return vars(opt)
1141
+
1142
+
1143
+ if __name__ == "__main__":
1144
+ configuration_parser = InferenceCellViTParser()
1145
+ configuration = configuration_parser.parse_arguments()
1146
+ print(configuration)
1147
+ inf = InferenceCellViT(
1148
+ run_dir=configuration["run_dir"],
1149
+ checkpoint_name=configuration["checkpoint_name"],
1150
+ gpu=configuration["gpu"],
1151
+ magnification=configuration["magnification"],
1152
+ )
1153
+ model, dataloader, conf = inf.setup_patch_inference()
1154
+
1155
+ inf.run_patch_inference(
1156
+ model, dataloader, conf, generate_plots=configuration["plots"]
1157
+ )
cell_segmentation/run_cellvit.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Running an Experiment Using CellViT cell segmentation network
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import inspect
9
+ import os
10
+ import sys
11
+
12
+ currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
13
+ parentdir = os.path.dirname(currentdir)
14
+ sys.path.insert(0, parentdir)
15
+
16
+ import wandb
17
+
18
+ from base_ml.base_cli import ExperimentBaseParser
19
+ from cell_segmentation.experiments.experiment_cellvit_pannuke import (
20
+ ExperimentCellVitPanNuke,
21
+ )
22
+ from cell_segmentation.experiments.experiment_cellvit_conic import (
23
+ ExperimentCellViTCoNic,
24
+ )
25
+
26
+ from cell_segmentation.inference.inference_cellvit_experiment_pannuke import (
27
+ InferenceCellViT,
28
+ )
29
+
30
+ if __name__ == "__main__":
31
+ # Parse arguments
32
+ configuration_parser = ExperimentBaseParser()
33
+ configuration = configuration_parser.parse_arguments()
34
+
35
+ if configuration["data"]["dataset"].lower() == "pannuke":
36
+ experiment_class = ExperimentCellVitPanNuke
37
+ elif configuration["data"]["dataset"].lower() == "conic":
38
+ experiment_class = ExperimentCellViTCoNic
39
+ # Setup experiment
40
+ if "checkpoint" in configuration:
41
+ # continue checkpoint
42
+ experiment = experiment_class(
43
+ default_conf=configuration, checkpoint=configuration["checkpoint"]
44
+ )
45
+ outdir = experiment.run_experiment()
46
+ inference = InferenceCellViT(
47
+ run_dir=outdir,
48
+ gpu=configuration["gpu"],
49
+ checkpoint_name=configuration["eval_checkpoint"],
50
+ magnification=configuration["data"].get("magnification", 40),
51
+ )
52
+ (
53
+ trained_model,
54
+ inference_dataloader,
55
+ dataset_config,
56
+ ) = inference.setup_patch_inference()
57
+ inference.run_patch_inference(
58
+ trained_model, inference_dataloader, dataset_config, generate_plots=False
59
+ )
60
+ else:
61
+ experiment = experiment_class(default_conf=configuration)
62
+ if configuration["run_sweep"] is True:
63
+ # run new sweep
64
+ sweep_configuration = experiment_class.extract_sweep_arguments(
65
+ configuration
66
+ )
67
+ os.environ["WANDB_DIR"] = os.path.abspath(
68
+ configuration["logging"]["wandb_dir"]
69
+ )
70
+ sweep_id = wandb.sweep(
71
+ sweep=sweep_configuration, project=configuration["logging"]["project"]
72
+ )
73
+ wandb.agent(sweep_id=sweep_id, function=experiment.run_experiment)
74
+ elif "agent" in configuration and configuration["agent"] is not None:
75
+ # add agent to already existing sweep, not run sweep must be set to true
76
+ configuration["run_sweep"] = True
77
+ os.environ["WANDB_DIR"] = os.path.abspath(
78
+ configuration["logging"]["wandb_dir"]
79
+ )
80
+ wandb.agent(
81
+ sweep_id=configuration["agent"], function=experiment.run_experiment
82
+ )
83
+ else:
84
+ # casual run
85
+ outdir = experiment.run_experiment()
86
+ inference = InferenceCellViT(
87
+ run_dir=outdir,
88
+ gpu=configuration["gpu"],
89
+ checkpoint_name=configuration["eval_checkpoint"],
90
+ magnification=configuration["data"].get("magnification", 40),
91
+ )
92
+ (
93
+ trained_model,
94
+ inference_dataloader,
95
+ dataset_config,
96
+ ) = inference.setup_patch_inference()
97
+ inference.run_patch_inference(
98
+ trained_model,
99
+ inference_dataloader,
100
+ dataset_config,
101
+ generate_plots=False,
102
+ )
103
+ wandb.finish()
cell_segmentation/trainer/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Trainer for each network type
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
cell_segmentation/trainer/trainer_cellvit.py ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # CellViT Trainer Class
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+ import logging
9
+ from pathlib import Path
10
+ from typing import Tuple, Union
11
+
12
+ import numpy as np
13
+ import torch
14
+ import torch.nn.functional as F
15
+ import tqdm
16
+ import math
17
+ import csv
18
+
19
+ # import wandb
20
+ from matplotlib import pyplot as plt
21
+ from skimage.color import rgba2rgb
22
+ from sklearn.metrics import accuracy_score
23
+ from torch.optim import Optimizer
24
+ from torch.optim.lr_scheduler import _LRScheduler
25
+ from torch.utils.data import DataLoader
26
+ from torchmetrics.functional import dice
27
+ from torchmetrics.functional.classification import binary_jaccard_index
28
+
29
+ from base_ml.base_early_stopping import EarlyStopping
30
+ from base_ml.base_trainer import BaseTrainer
31
+ from models.segmentation.cell_segmentation.cellvit import DataclassHVStorage
32
+ from cell_segmentation.utils.metrics import get_fast_pq, remap_label
33
+ from cell_segmentation.utils.tools import cropping_center
34
+ from models.segmentation.cell_segmentation.cellvit import CellViT
35
+ from utils.tools import AverageMeter
36
+ from timm.utils import ModelEma
37
+ from torch.cuda.amp import GradScaler, autocast
38
+
39
+ class CellViTTrainer(BaseTrainer):
40
+ """CellViT trainer class
41
+
42
+ Args:
43
+ model (CellViT): CellViT model that should be trained
44
+ loss_fn_dict (dict): Dictionary with loss functions for each branch with a dictionary of loss functions.
45
+ Name of branch as top-level key, followed by a dictionary with loss name, loss fn and weighting factor
46
+ Example:
47
+ {
48
+ "nuclei_binary_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}},
49
+ "hv_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}},
50
+ "nuclei_type_map": {"bce": {loss_fn(Callable), weight_factor(float)}, "dice": {loss_fn(Callable), weight_factor(float)}}
51
+ "tissue_types": {"ce": {loss_fn(Callable), weight_factor(float)}}
52
+ }
53
+ Required Keys are:
54
+ * nuclei_binary_map
55
+ * hv_map
56
+ * nuclei_type_map
57
+ * tissue types
58
+ optimizer (Optimizer): Optimizer
59
+ scheduler (_LRScheduler): Learning rate scheduler
60
+ device (str): Cuda device to use, e.g., cuda:0.
61
+ logger (logging.Logger): Logger module
62
+ logdir (Union[Path, str]): Logging directory
63
+ num_classes (int): Number of nuclei classes
64
+ dataset_config (dict): Dataset configuration. Required Keys are:
65
+ * "tissue_types": describing the present tissue types with corresponding integer
66
+ * "nuclei_types": describing the present nuclei types with corresponding integer
67
+ experiment_config (dict): Configuration of this experiment
68
+ early_stopping (EarlyStopping, optional): Early Stopping Class. Defaults to None.
69
+ log_images (bool, optional): If images should be logged to WandB. Defaults to False.
70
+ magnification (int, optional): Image magnification. Please select either 40 or 20. Defaults to 40.
71
+ mixed_precision (bool, optional): If mixed-precision should be used. Defaults to False.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ model: CellViT,
77
+ loss_fn_dict: dict,
78
+ optimizer: Optimizer,
79
+ scheduler: _LRScheduler,
80
+ device: str,
81
+ logger: logging.Logger,
82
+ logdir: Union[Path, str],
83
+ num_classes: int,
84
+ dataset_config: dict,
85
+ experiment_config: dict,
86
+ early_stopping: EarlyStopping = None,
87
+ log_images: bool = False,
88
+ magnification: int = 40,
89
+ mixed_precision: bool = False,
90
+ #model_ema : bool = True,
91
+ ):
92
+ super().__init__(
93
+ model=model,
94
+ loss_fn=None,
95
+ optimizer=optimizer,
96
+ scheduler=scheduler,
97
+ device=device,
98
+ logger=logger,
99
+ logdir=logdir,
100
+ experiment_config=experiment_config,
101
+ early_stopping=early_stopping,
102
+ accum_iter=1,
103
+ log_images=log_images,
104
+ mixed_precision=mixed_precision,
105
+
106
+ )
107
+ self.loss_fn_dict = loss_fn_dict
108
+ self.num_classes = num_classes
109
+ self.dataset_config = dataset_config
110
+ self.tissue_types = dataset_config["tissue_types"]
111
+ self.reverse_tissue_types = {v: k for k, v in self.tissue_types.items()}
112
+ self.nuclei_types = dataset_config["nuclei_types"]
113
+ self.magnification = magnification
114
+ #self.model_ema = model_ema
115
+
116
+ # setup logging objects
117
+ self.loss_avg_tracker = {"Total_Loss": AverageMeter("Total_Loss", ":.4f")}
118
+ for branch, loss_fns in self.loss_fn_dict.items():
119
+ for loss_name in loss_fns:
120
+ self.loss_avg_tracker[f"{branch}_{loss_name}"] = AverageMeter(
121
+ f"{branch}_{loss_name}", ":.4f"
122
+ )
123
+
124
+ self.batch_avg_tissue_acc = AverageMeter("Batch_avg_tissue_ACC", ":4.f")
125
+
126
+ def train_epoch(
127
+ self, epoch: int, train_dataloader: DataLoader, unfreeze_epoch: int = 50
128
+ ) -> Tuple[dict, dict]:
129
+ """Training logic for a training epoch
130
+
131
+ Args:
132
+ epoch (int): Current epoch number
133
+ train_dataloader (DataLoader): Train dataloader
134
+ unfreeze_epoch (int, optional): Epoch to unfreeze layers
135
+ Returns:
136
+ Tuple[dict, dict]: wandb logging dictionaries
137
+ * Scalar metrics
138
+ * Image metrics
139
+ """
140
+ self.model.train()
141
+ if epoch >= unfreeze_epoch:
142
+ self.model.unfreeze_encoder()
143
+
144
+
145
+ # if self.model_ema and epoch == 0:
146
+ # self.model_ema_instance = ModelEma(
147
+ # model=self.model,
148
+ # decay=0.9999,
149
+ # device='cuda',
150
+ # resume=''
151
+ # )
152
+
153
+ binary_dice_scores = []
154
+ binary_jaccard_scores = []
155
+ tissue_pred = []
156
+ tissue_gt = []
157
+ train_example_img = None
158
+
159
+ # reset metrics
160
+ self.loss_avg_tracker["Total_Loss"].reset()
161
+ for branch, loss_fns in self.loss_fn_dict.items():
162
+ for loss_name in loss_fns:
163
+ self.loss_avg_tracker[f"{branch}_{loss_name}"].reset()
164
+ self.batch_avg_tissue_acc.reset()
165
+
166
+ # randomly select a batch that should be displayed
167
+ if self.log_images:
168
+ select_example_image = int(torch.randint(0, len(train_dataloader), (1,)))
169
+ else:
170
+ select_example_image = None
171
+ train_loop = tqdm.tqdm(enumerate(train_dataloader), total=len(train_dataloader))
172
+
173
+ for batch_idx, batch in train_loop:
174
+ return_example_images = batch_idx == select_example_image
175
+ batch_metrics, example_img = self.train_step(
176
+ batch,
177
+ batch_idx,
178
+ len(train_dataloader),
179
+ return_example_images=return_example_images,
180
+ )
181
+ if example_img is not None:
182
+ train_example_img = example_img
183
+ binary_dice_scores = (
184
+ binary_dice_scores + batch_metrics["binary_dice_scores"]
185
+ )
186
+ binary_jaccard_scores = (
187
+ binary_jaccard_scores + batch_metrics["binary_jaccard_scores"]
188
+ )
189
+ tissue_pred.append(batch_metrics["tissue_pred"])
190
+ tissue_gt.append(batch_metrics["tissue_gt"])
191
+ train_loop.set_postfix(
192
+ {
193
+ "Loss": np.round(self.loss_avg_tracker["Total_Loss"].avg, 3),
194
+ "Dice": np.round(np.nanmean(binary_dice_scores), 3),
195
+ "Pred-Acc": np.round(self.batch_avg_tissue_acc.avg, 3),
196
+ }
197
+ )
198
+
199
+ # calculate global metrics
200
+ binary_dice_scores = np.array(binary_dice_scores)
201
+ binary_jaccard_scores = np.array(binary_jaccard_scores)
202
+ tissue_detection_accuracy = accuracy_score(
203
+ y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred)
204
+ )
205
+
206
+ scalar_metrics = {
207
+ "Loss/Train": self.loss_avg_tracker["Total_Loss"].avg,
208
+ "Binary-Cell-Dice-Mean/Train": np.nanmean(binary_dice_scores),
209
+ "Binary-Cell-Jacard-Mean/Train": np.nanmean(binary_jaccard_scores),
210
+ "Tissue-Multiclass-Accuracy/Train": tissue_detection_accuracy,
211
+ }
212
+
213
+ for branch, loss_fns in self.loss_fn_dict.items():
214
+ for loss_name in loss_fns:
215
+ scalar_metrics[f"{branch}_{loss_name}/Train"] = self.loss_avg_tracker[
216
+ f"{branch}_{loss_name}"
217
+ ].avg
218
+
219
+
220
+ self.logger.info(
221
+ f"{'Training epoch stats:' : <25} "
222
+ f"Loss: {self.loss_avg_tracker['Total_Loss'].avg:.4f} - "
223
+ f"Binary-Cell-Dice: {np.nanmean(binary_dice_scores):.4f} - "
224
+ f"Binary-Cell-Jacard: {np.nanmean(binary_jaccard_scores):.4f} - "
225
+ f"Tissue-MC-Acc.: {tissue_detection_accuracy:.4f}"
226
+ )
227
+
228
+ image_metrics = {"Example-Predictions/Train": train_example_img}
229
+
230
+ return scalar_metrics, image_metrics
231
+
232
+ def train_step(
233
+ self,
234
+ batch: object,
235
+ batch_idx: int,
236
+ num_batches: int,
237
+ return_example_images: bool,
238
+ ) -> Tuple[dict, Union[plt.Figure, None]]:
239
+ """Training step
240
+
241
+ Args:
242
+ batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3])
243
+ batch_idx (int): Batch index
244
+ num_batches (int): Total number of batches in epoch
245
+ return_example_images (bool): If an example preciction image should be returned
246
+
247
+ Returns:
248
+ Tuple[dict, Union[plt.Figure, None]]:
249
+ * Batch-Metrics: dictionary with the following keys:
250
+ * Example prediction image
251
+ """
252
+ # unpack batch
253
+ imgs = batch[0].to(self.device) # imgs shape: (batch_size, 3, H, W) (16,3,256,256)
254
+ masks = batch[
255
+ 1
256
+ ] # dict: keys: "instance_map", [16,256,256],"nuclei_map",[16,256,256], "nuclei_binary_map",[16,256,256], "hv_map"[16,2,256,256]
257
+ tissue_types = batch[2] # list[str]
258
+ #change
259
+ #scaler = GradScaler(init_scale=2.0)
260
+
261
+ if self.mixed_precision:
262
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
263
+ #with torch.cuda.amp.autocast(False):
264
+ # make predictions
265
+ predictions_ = self.model.forward(imgs) #img.shape=(16,3,256,256) model.forward(imgs) 'tissue_types'(16,19),'nuclei_binary_map'(16,2,128,128),'hv_map'(16,2,128,128),'nuclei_type_map'(16,6,128,128)
266
+
267
+ # reshaping and postprocessing
268
+ predictions = self.unpack_predictions(predictions=predictions_)
269
+ gt = self.unpack_masks(masks=masks, tissue_types=tissue_types)
270
+
271
+ # calculate loss
272
+ total_loss = self.calculate_loss(predictions, gt)
273
+ # if torch.isnan(total_loss):
274
+ # print("nan in loss")
275
+ #if math.isnan(total_loss.item()):
276
+ #print("nan")
277
+ # import pdb; pdb.set_trace()
278
+
279
+ # backward pass
280
+ self.scaler.scale(total_loss).backward()
281
+ # 阈值剪切梯度
282
+ #torch.nn.utils.clip_grad_value_(self.model.parameters(), clip_value=1.0)
283
+ # if torch.any(torch.tensor([torch.any(torch.isnan(param.data)) for param in self.model.parameters()])):
284
+ # print("nan in model parameters")
285
+ if (
286
+ ((batch_idx + 1) % self.accum_iter == 0)
287
+ or ((batch_idx + 1) == num_batches)
288
+ or (self.accum_iter == 1)
289
+ ):
290
+ # self.scaler.unscale_(self.optimizer)
291
+ # torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
292
+ self.scaler.step(self.optimizer)
293
+ self.scaler.update()
294
+ # if self.model_ema:
295
+ # self.model_ema_instance.update(self.model)
296
+ self.optimizer.zero_grad(set_to_none=True)
297
+ self.model.zero_grad()
298
+ else:
299
+ predictions_ = self.model.forward(imgs)
300
+ predictions = self.unpack_predictions(predictions=predictions_)
301
+ gt = self.unpack_masks(masks=masks, tissue_types=tissue_types)
302
+
303
+ # calculate loss
304
+ total_loss = self.calculate_loss(predictions, gt)
305
+
306
+ total_loss.backward()
307
+ if (
308
+ ((batch_idx + 1) % self.accum_iter == 0)
309
+ or ((batch_idx + 1) == num_batches)
310
+ or (self.accum_iter == 1)
311
+ ):
312
+ self.optimizer.step()
313
+ # if self.model_ema:
314
+ # self.model_ema_instance.update(self.model)
315
+ self.optimizer.zero_grad(set_to_none=True)
316
+ self.model.zero_grad()
317
+ with torch.cuda.device(self.device):
318
+ torch.cuda.empty_cache()
319
+
320
+ batch_metrics = self.calculate_step_metric_train(predictions, gt)
321
+
322
+ if return_example_images:
323
+ return_example_images = self.generate_example_image(
324
+ imgs, predictions, gt, num_images=4, num_nuclei_classes=self.num_classes
325
+ )
326
+ else:
327
+ return_example_images = None
328
+
329
+ return batch_metrics, return_example_images
330
+
331
+ def validation_epoch(
332
+ self, epoch: int, val_dataloader: DataLoader
333
+ ) -> Tuple[dict, dict, float]:
334
+ """Validation logic for a validation epoch
335
+
336
+ Args:
337
+ epoch (int): Current epoch number
338
+ val_dataloader (DataLoader): Validation dataloader
339
+
340
+ Returns:
341
+ Tuple[dict, dict, float]: wandb logging dictionaries
342
+ * Scalar metrics
343
+ * Image metrics
344
+ * Early stopping metric
345
+ """
346
+ self.model.eval()
347
+
348
+ binary_dice_scores = []
349
+ binary_jaccard_scores = []
350
+ pq_scores = []
351
+ cell_type_pq_scores = []
352
+ tissue_pred = []
353
+ tissue_gt = []
354
+ val_example_img = None
355
+
356
+
357
+ # reset metrics
358
+ self.loss_avg_tracker["Total_Loss"].reset()
359
+ for branch, loss_fns in self.loss_fn_dict.items():
360
+ for loss_name in loss_fns:
361
+ self.loss_avg_tracker[f"{branch}_{loss_name}"].reset()
362
+ self.batch_avg_tissue_acc.reset()
363
+
364
+ # randomly select a batch that should be displayed
365
+ if self.log_images:
366
+ select_example_image = int(torch.randint(0, len(val_dataloader), (1,)))
367
+ else:
368
+ select_example_image = None
369
+
370
+ val_loop = tqdm.tqdm(enumerate(val_dataloader), total=len(val_dataloader))
371
+
372
+
373
+ with torch.no_grad():
374
+ for batch_idx, batch in val_loop:
375
+ return_example_images = batch_idx == select_example_image
376
+ batch_metrics, example_img= self.validation_step(
377
+ batch, batch_idx, return_example_images
378
+ )
379
+
380
+ # 检查总体损失是否为NaN
381
+ # if np.isnan(self.loss_avg_tracker["Total_Loss"].avg):
382
+ # print("NaN loss for image:", batch_idx)
383
+
384
+
385
+ if example_img is not None:
386
+ val_example_img = example_img
387
+ binary_dice_scores = (
388
+ binary_dice_scores + batch_metrics["binary_dice_scores"]
389
+ )
390
+ binary_jaccard_scores = (
391
+ binary_jaccard_scores + batch_metrics["binary_jaccard_scores"]
392
+ )
393
+ pq_scores = pq_scores + batch_metrics["pq_scores"]
394
+ cell_type_pq_scores = (
395
+ cell_type_pq_scores + batch_metrics["cell_type_pq_scores"]
396
+ )
397
+ tissue_pred.append(batch_metrics["tissue_pred"])
398
+ tissue_gt.append(batch_metrics["tissue_gt"])
399
+ val_loop.set_postfix(
400
+ {
401
+ "Loss": np.round(self.loss_avg_tracker["Total_Loss"].avg, 3),
402
+ "Dice": np.round(np.nanmean(binary_dice_scores), 3),
403
+ "Pred-Acc": np.round(self.batch_avg_tissue_acc.avg, 3),
404
+ }
405
+ )
406
+ tissue_types_val = [
407
+ self.reverse_tissue_types[t].lower() for t in np.concatenate(tissue_gt)
408
+ ]
409
+
410
+ # calculate global metrics
411
+ binary_dice_scores = np.array(binary_dice_scores)
412
+ binary_jaccard_scores = np.array(binary_jaccard_scores)
413
+ pq_scores = np.array(pq_scores)
414
+ tissue_detection_accuracy = accuracy_score(
415
+ y_true=np.concatenate(tissue_gt), y_pred=np.concatenate(tissue_pred)
416
+ )
417
+
418
+ scalar_metrics = {
419
+ "Loss/Validation": self.loss_avg_tracker["Total_Loss"].avg,
420
+ "Binary-Cell-Dice-Mean/Validation": np.nanmean(binary_dice_scores),
421
+ "Binary-Cell-Jacard-Mean/Validation": np.nanmean(binary_jaccard_scores),
422
+ "Tissue-Multiclass-Accuracy/Validation": tissue_detection_accuracy,
423
+ "bPQ/Validation": np.nanmean(pq_scores),
424
+ "mPQ/Validation": np.nanmean(
425
+ [np.nanmean(pq) for pq in cell_type_pq_scores]
426
+ ),
427
+ }
428
+
429
+ for branch, loss_fns in self.loss_fn_dict.items():
430
+ for loss_name in loss_fns:
431
+ scalar_metrics[
432
+ f"{branch}_{loss_name}/Validation"
433
+ ] = self.loss_avg_tracker[f"{branch}_{loss_name}"].avg #这里的loss_avg_tracker是在train_step中定义的
434
+
435
+ # calculate local metrics
436
+ # per tissue class
437
+ for tissue in self.tissue_types.keys():
438
+ tissue = tissue.lower()
439
+ tissue_ids = np.where(np.asarray(tissue_types_val) == tissue)
440
+ scalar_metrics[f"{tissue}-Dice/Validation"] = np.nanmean(
441
+ binary_dice_scores[tissue_ids]
442
+ )
443
+ scalar_metrics[f"{tissue}-Jaccard/Validation"] = np.nanmean(
444
+ binary_jaccard_scores[tissue_ids]
445
+ )
446
+ scalar_metrics[f"{tissue}-bPQ/Validation"] = np.nanmean(
447
+ pq_scores[tissue_ids]
448
+ )
449
+ scalar_metrics[f"{tissue}-mPQ/Validation"] = np.nanmean(
450
+ [np.nanmean(pq) for pq in np.array(cell_type_pq_scores)[tissue_ids]]
451
+ )
452
+
453
+ # calculate nuclei metrics
454
+ for nuc_name, nuc_type in self.nuclei_types.items():
455
+ if nuc_name.lower() == "background":
456
+ continue
457
+ scalar_metrics[f"{nuc_name}-PQ/Validation"] = np.nanmean(
458
+ [pq[nuc_type] for pq in cell_type_pq_scores]
459
+ )
460
+
461
+ self.logger.info(
462
+ f"{'Validation epoch stats:' : <25} "
463
+ f"Loss: {self.loss_avg_tracker['Total_Loss'].avg:.4f} - "
464
+ f"Binary-Cell-Dice: {np.nanmean(binary_dice_scores):.4f} - "
465
+ f"Binary-Cell-Jacard: {np.nanmean(binary_jaccard_scores):.4f} - "
466
+ f"bPQ-Score: {np.nanmean(pq_scores):.4f} - "
467
+ f"mPQ-Score: {scalar_metrics['mPQ/Validation']:.4f} - "
468
+ f"Tissue-MC-Acc.: {tissue_detection_accuracy:.4f}"
469
+ )
470
+
471
+ image_metrics = {"Example-Predictions/Validation": val_example_img}
472
+
473
+ return scalar_metrics, image_metrics, np.nanmean(pq_scores)
474
+
475
+ def validation_step(
476
+ self,
477
+ batch: object,
478
+ batch_idx: int,
479
+ return_example_images: bool,
480
+ ):
481
+ """Validation step
482
+
483
+ Args:
484
+ batch (object): Training batch, consisting of images ([0]), masks ([1]), tissue_types ([2]) and figure filenames ([3])
485
+ batch_idx (int): Batch index
486
+ return_example_images (bool): If an example preciction image should be returned
487
+
488
+ Returns:
489
+ Tuple[dict, Union[plt.Figure, None]]:
490
+ * Batch-Metrics: dictionary, structure not fixed yet
491
+ * Example prediction image
492
+ """
493
+ # unpack batch, for shape compare train_step method
494
+ imgs = batch[0].to(self.device)
495
+ masks = batch[1]
496
+ tissue_types = batch[2]
497
+ # nan_loss_images = []
498
+ # csv_file = "/data3/ziweicui/PanNuke/cellvit-png/fold1_nan_loss_images.csv"
499
+
500
+
501
+ self.model.zero_grad()
502
+ self.optimizer.zero_grad()
503
+ # with open(csv_file, 'a') as f:
504
+ # csv_write = csv.writer(f)
505
+ if self.mixed_precision:
506
+ with torch.autocast(device_type="cuda", dtype=torch.float16):
507
+ # make predictions
508
+ predictions_ = self.model.forward(imgs)
509
+ # reshaping and postprocessing
510
+ predictions = self.unpack_predictions(predictions=predictions_)
511
+ gt = self.unpack_masks(masks=masks, tissue_types=tissue_types)
512
+ # calculate loss
513
+ _ = self.calculate_loss(predictions, gt)
514
+ # 检查损失是否为NaN
515
+ #loss_value = _.item()
516
+ # if math.isnan(loss_value):
517
+ # print("NaN loss for image:", batch[3])
518
+ #nan_loss_images.append(batch[3])
519
+
520
+
521
+ else:
522
+ predictions_ = self.model.forward(imgs)
523
+ # reshaping and postprocessing
524
+ predictions = self.unpack_predictions(predictions=predictions_)
525
+ gt = self.unpack_masks(masks=masks, tissue_types=tissue_types)
526
+ # calculate loss
527
+ _ = self.calculate_loss(predictions, gt)
528
+ # 检查损失是否为NaN
529
+ loss_value = _.item()
530
+ if math.isnan(loss_value):
531
+ print("NaN loss for image:", batch[3])
532
+
533
+
534
+
535
+
536
+ # get metrics for this batch
537
+ batch_metrics = self.calculate_step_metric_validation(predictions, gt)
538
+
539
+ if return_example_images:
540
+ try:
541
+ return_example_images = self.generate_example_image(
542
+ imgs,
543
+ predictions,
544
+ gt,
545
+ num_images=4,
546
+ num_nuclei_classes=self.num_classes,
547
+ )
548
+ except AssertionError:
549
+ self.logger.error(
550
+ "AssertionError for Example Image. Please check. Continue without image."
551
+ )
552
+ return_example_images = None
553
+ else:
554
+ return_example_images = None
555
+
556
+ return batch_metrics, return_example_images
557
+
558
+ def unpack_predictions(self, predictions: dict) -> DataclassHVStorage:
559
+ """Unpack the given predictions. Main focus lays on reshaping and postprocessing predictions, e.g. separating instances
560
+
561
+ Args:
562
+ predictions (dict): Dictionary with the following keys:
563
+ * tissue_types: Logit tissue prediction output. Shape: (batch_size, num_tissue_classes)
564
+ * nuclei_binary_map: Logit output for binary nuclei prediction branch. Shape: (batch_size, 2, H, W)
565
+ * hv_map: Logit output for hv-prediction. Shape: (batch_size, 2, H, W)
566
+ * nuclei_type_map: Logit output for nuclei instance-prediction. Shape: (batch_size, num_nuclei_classes, H, W)
567
+
568
+ Returns:
569
+ DataclassHVStorage: Processed network output
570
+ """
571
+ predictions["tissue_types"] = predictions["tissue_types"].to(self.device)
572
+ predictions["nuclei_binary_map"] = F.softmax(
573
+ predictions["nuclei_binary_map"], dim=1
574
+ ) # shape: (batch_size, 2, H, W)
575
+ predictions["nuclei_type_map"] = F.softmax(
576
+ predictions["nuclei_type_map"], dim=1
577
+ ) # shape: (batch_size, num_nuclei_classes, H, W)
578
+ (
579
+ predictions["instance_map"],
580
+ predictions["instance_types"],
581
+ ) = self.model.calculate_instance_map(
582
+ predictions, self.magnification
583
+ ) # shape: (batch_size, H, W)
584
+ predictions["instance_types_nuclei"] = self.model.generate_instance_nuclei_map(
585
+ predictions["instance_map"], predictions["instance_types"]
586
+ ).to(
587
+ self.device
588
+ ) # shape: (batch_size, num_nuclei_classes, H, W) (32, 256, 256, 6)
589
+
590
+ if "regression_map" not in predictions.keys():
591
+ predictions["regression_map"] = None
592
+
593
+ predictions = DataclassHVStorage(
594
+ nuclei_binary_map=predictions["nuclei_binary_map"],
595
+ hv_map=predictions["hv_map"],
596
+ nuclei_type_map=predictions["nuclei_type_map"],
597
+ tissue_types=predictions["tissue_types"],
598
+ instance_map=predictions["instance_map"],
599
+ instance_types=predictions["instance_types"],
600
+ instance_types_nuclei=predictions["instance_types_nuclei"],
601
+ batch_size=predictions["tissue_types"].shape[0],
602
+ regression_map=predictions["regression_map"],
603
+ num_nuclei_classes=self.num_classes,
604
+ )
605
+
606
+ return predictions
607
+
608
+ def unpack_masks(self, masks: dict, tissue_types: list) -> DataclassHVStorage:
609
+ """Unpack the given masks. Main focus lays on reshaping and postprocessing masks to generate one dict
610
+
611
+ Args:
612
+ masks (dict): Required keys are:
613
+ * instance_map: Pixel-wise nuclear instance segmentations. Shape: (batch_size, H, W)
614
+ * nuclei_binary_map: Binary nuclei segmentations. Shape: (batch_size, H, W)
615
+ * hv_map: HV-Map. Shape: (batch_size, 2, H, W)
616
+ * nuclei_type_map: Nuclei instance-prediction and segmentation (not binary, each instance has own integer).
617
+ Shape: (batch_size, num_nuclei_classes, H, W)
618
+
619
+ tissue_types (list): List of string names of ground-truth tissue types
620
+
621
+ Returns:
622
+ DataclassHVStorage: GT-Results with matching shapes and output types
623
+ """
624
+ # get ground truth values, perform one hot encoding for segmentation maps
625
+ gt_nuclei_binary_map_onehot = (
626
+ F.one_hot(masks["nuclei_binary_map"], num_classes=2)
627
+ ).type(
628
+ torch.float32
629
+ ) # background, nuclei
630
+ #nuclei_type_maps = torch.squeeze(masks["nuclei_type_map"]).type(torch.int64)
631
+ nuclei_type_maps = masks["nuclei_type_map"].type(torch.int64)
632
+ gt_nuclei_type_maps_onehot = F.one_hot(
633
+ nuclei_type_maps, num_classes=self.num_classes
634
+ ).type(
635
+ torch.float32
636
+ ) # background + nuclei types
637
+
638
+ # assemble ground truth dictionary
639
+ gt = {
640
+ "nuclei_type_map": gt_nuclei_type_maps_onehot.permute(0, 3, 1, 2).to(
641
+ self.device
642
+ ), # shape: (batch_size, H, W, num_nuclei_classes)
643
+ "nuclei_binary_map": gt_nuclei_binary_map_onehot.permute(0, 3, 1, 2).to(
644
+ self.device
645
+ ), # shape: (batch_size, H, W, 2)
646
+ "hv_map": masks["hv_map"].to(self.device), # shape: (batch_size,2, H, W)
647
+ "instance_map": masks["instance_map"].to(
648
+ self.device
649
+ ), # shape: (batch_size, H, W) -> each instance has one integer
650
+ "instance_types_nuclei": (
651
+ gt_nuclei_type_maps_onehot * masks["instance_map"][..., None]
652
+ )
653
+ .permute(0, 3, 1, 2)
654
+ .to(
655
+ self.device
656
+ ), # shape: (batch_size, num_nuclei_classes, H, W) -> instance has one integer, for each nuclei class
657
+ "tissue_types": torch.Tensor([self.tissue_types[t] for t in tissue_types])
658
+ .type(torch.LongTensor)
659
+ .to(self.device), # shape: batch_size
660
+ }
661
+ if "regression_map" in masks:
662
+ gt["regression_map"] = masks["regression_map"].to(self.device)
663
+
664
+ gt = DataclassHVStorage(
665
+ **gt,
666
+ batch_size=gt["tissue_types"].shape[0],
667
+ num_nuclei_classes=self.num_classes,
668
+ )
669
+ return gt
670
+
671
+ def calculate_loss(
672
+ self, predictions: DataclassHVStorage, gt: DataclassHVStorage
673
+ ) -> torch.Tensor:
674
+ """Calculate the loss
675
+
676
+ Args:
677
+ predictions (DataclassHVStorage): Predictions
678
+ gt (DataclassHVStorage): Ground-Truth values
679
+
680
+ Returns:
681
+ torch.Tensor: Loss
682
+ """
683
+ predictions = predictions.get_dict()
684
+ gt = gt.get_dict()
685
+
686
+ total_loss = 0
687
+
688
+ for branch, pred in predictions.items():
689
+ if branch in [
690
+ "instance_map",
691
+ "instance_types",
692
+ "instance_types_nuclei",
693
+ ]:
694
+ continue
695
+ if branch not in self.loss_fn_dict:
696
+ continue
697
+ branch_loss_fns = self.loss_fn_dict[branch]
698
+ for loss_name, loss_setting in branch_loss_fns.items():
699
+ loss_fn = loss_setting["loss_fn"]
700
+ weight = loss_setting["weight"]
701
+ if loss_name == "msge":
702
+ loss_value = loss_fn(
703
+ input=pred,
704
+ target=gt[branch],
705
+ focus=gt["nuclei_binary_map"],
706
+ device=self.device,
707
+ )
708
+ else:
709
+ loss_value = loss_fn(input=pred, target=gt[branch])
710
+ total_loss = total_loss + weight * loss_value
711
+ self.loss_avg_tracker[f"{branch}_{loss_name}"].update(
712
+ loss_value.detach().cpu().numpy()
713
+ )
714
+ self.loss_avg_tracker["Total_Loss"].update(total_loss.detach().cpu().numpy())
715
+
716
+ return total_loss
717
+
718
+ def calculate_step_metric_train(
719
+ self, predictions: DataclassHVStorage, gt: DataclassHVStorage
720
+ ) -> dict:
721
+ """Calculate the metrics for the training step
722
+
723
+ Args:
724
+ predictions (DataclassHVStorage): Processed network output
725
+ gt (DataclassHVStorage): Ground truth values
726
+ Returns:
727
+ dict: Dictionary with metrics. Keys:
728
+ binary_dice_scores, binary_jaccard_scores, tissue_pred, tissue_gt
729
+ """
730
+ predictions = predictions.get_dict()
731
+ gt = gt.get_dict()
732
+
733
+ # Tissue Tpyes logits to probs and argmax to get class
734
+ predictions["tissue_types_classes"] = F.softmax(
735
+ predictions["tissue_types"], dim=-1
736
+ )
737
+ pred_tissue = (
738
+ torch.argmax(predictions["tissue_types_classes"], dim=-1)
739
+ .detach()
740
+ .cpu()
741
+ .numpy()
742
+ .astype(np.uint8)
743
+ )
744
+ predictions["instance_map"] = predictions["instance_map"].detach().cpu()
745
+ predictions["instance_types_nuclei"] = (
746
+ predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
747
+ )
748
+ gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8)
749
+ gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type(
750
+ torch.uint8
751
+ )
752
+ gt["instance_types_nuclei"] = (
753
+ gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
754
+ )
755
+
756
+ tissue_detection_accuracy = accuracy_score(
757
+ y_true=gt["tissue_types"], y_pred=pred_tissue
758
+ )
759
+ self.batch_avg_tissue_acc.update(tissue_detection_accuracy)
760
+
761
+ binary_dice_scores = []
762
+ binary_jaccard_scores = []
763
+
764
+ for i in range(len(pred_tissue)):
765
+ # binary dice score: Score for cell detection per image, without background
766
+ pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0)
767
+ target_binary_map = gt["nuclei_binary_map"][i]
768
+ cell_dice = (
769
+ dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0)
770
+ .detach()
771
+ .cpu()
772
+ )
773
+ binary_dice_scores.append(float(cell_dice))
774
+
775
+ # binary aji
776
+ cell_jaccard = (
777
+ binary_jaccard_index(
778
+ preds=pred_binary_map,
779
+ target=target_binary_map,
780
+ )
781
+ .detach()
782
+ .cpu()
783
+ )
784
+ binary_jaccard_scores.append(float(cell_jaccard))
785
+
786
+ batch_metrics = {
787
+ "binary_dice_scores": binary_dice_scores,
788
+ "binary_jaccard_scores": binary_jaccard_scores,
789
+ "tissue_pred": pred_tissue,
790
+ "tissue_gt": gt["tissue_types"],
791
+ }
792
+
793
+ return batch_metrics
794
+
795
+ def calculate_step_metric_validation(self, predictions: dict, gt: dict) -> dict:
796
+ """Calculate the metrics for the training step
797
+
798
+ Args:
799
+ predictions (DataclassHVStorage): OrderedDict: Processed network output
800
+ gt (DataclassHVStorage): Ground truth values
801
+ Returns:
802
+ dict: Dictionary with metrics. Keys:
803
+ binary_dice_scores, binary_jaccard_scores, tissue_pred, tissue_gt
804
+ """
805
+ predictions = predictions.get_dict()
806
+ gt = gt.get_dict()
807
+
808
+ # Tissue Tpyes logits to probs and argmax to get class
809
+ predictions["tissue_types_classes"] = F.softmax(
810
+ predictions["tissue_types"], dim=-1
811
+ )
812
+ pred_tissue = (
813
+ torch.argmax(predictions["tissue_types_classes"], dim=-1)
814
+ .detach()
815
+ .cpu()
816
+ .numpy()
817
+ .astype(np.uint8)
818
+ )
819
+ predictions["instance_map"] = predictions["instance_map"].detach().cpu()
820
+ predictions["instance_types_nuclei"] = (
821
+ predictions["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
822
+ )
823
+ #change
824
+ predictions["instance_types_nuclei"] = predictions["instance_types_nuclei"].transpose(0, 3, 1, 2)
825
+ instance_maps_gt = gt["instance_map"].detach().cpu()
826
+ gt["tissue_types"] = gt["tissue_types"].detach().cpu().numpy().astype(np.uint8)
827
+ gt["nuclei_binary_map"] = torch.argmax(gt["nuclei_binary_map"], dim=1).type(
828
+ torch.uint8
829
+ )
830
+ gt["instance_types_nuclei"] = (
831
+ gt["instance_types_nuclei"].detach().cpu().numpy().astype("int32")
832
+ )
833
+
834
+ tissue_detection_accuracy = accuracy_score(
835
+ y_true=gt["tissue_types"], y_pred=pred_tissue
836
+ )
837
+ self.batch_avg_tissue_acc.update(tissue_detection_accuracy)
838
+
839
+ binary_dice_scores = []
840
+ binary_jaccard_scores = []
841
+ cell_type_pq_scores = []
842
+ pq_scores = []
843
+
844
+ for i in range(len(pred_tissue)):
845
+ # binary dice score: Score for cell detection per image, without background
846
+ pred_binary_map = torch.argmax(predictions["nuclei_binary_map"][i], dim=0)
847
+ target_binary_map = gt["nuclei_binary_map"][i]
848
+ cell_dice = (
849
+ dice(preds=pred_binary_map, target=target_binary_map, ignore_index=0)
850
+ .detach()
851
+ .cpu()
852
+ )
853
+ binary_dice_scores.append(float(cell_dice))
854
+
855
+ # binary aji
856
+ cell_jaccard = (
857
+ binary_jaccard_index(
858
+ preds=pred_binary_map,
859
+ target=target_binary_map,
860
+ )
861
+ .detach()
862
+ .cpu()
863
+ )
864
+ binary_jaccard_scores.append(float(cell_jaccard))
865
+ # pq values
866
+ remapped_instance_pred = remap_label(predictions["instance_map"][i])
867
+ remapped_gt = remap_label(instance_maps_gt[i])
868
+ [_, _, pq], _ = get_fast_pq(true=remapped_gt, pred=remapped_instance_pred)
869
+ pq_scores.append(pq)
870
+
871
+ #pq values per class (skip background)
872
+ nuclei_type_pq = []
873
+ for j in range(0, self.num_classes):
874
+ pred_nuclei_instance_class = remap_label(
875
+ predictions["instance_types_nuclei"][i][j, ...]
876
+ )
877
+ target_nuclei_instance_class = remap_label(
878
+ gt["instance_types_nuclei"][i][j, ...]
879
+ )
880
+
881
+ # if ground truth is empty, skip from calculation
882
+ if len(np.unique(target_nuclei_instance_class)) == 1:
883
+ pq_tmp = np.nan
884
+ else:
885
+ [_, _, pq_tmp], _ = get_fast_pq(
886
+ pred_nuclei_instance_class,
887
+ target_nuclei_instance_class,
888
+ match_iou=0.5,
889
+ )
890
+ nuclei_type_pq.append(pq_tmp)
891
+
892
+ cell_type_pq_scores.append(nuclei_type_pq)
893
+
894
+ batch_metrics = {
895
+ "binary_dice_scores": binary_dice_scores,
896
+ "binary_jaccard_scores": binary_jaccard_scores,
897
+ "pq_scores": pq_scores,
898
+ "cell_type_pq_scores": cell_type_pq_scores,
899
+ "tissue_pred": pred_tissue,
900
+ "tissue_gt": gt["tissue_types"],
901
+ }
902
+
903
+ return batch_metrics
904
+
905
+ @staticmethod
906
+ def generate_example_image(
907
+ imgs: Union[torch.Tensor, np.ndarray],
908
+ predictions: DataclassHVStorage,
909
+ gt: DataclassHVStorage,
910
+ num_nuclei_classes: int,
911
+ num_images: int = 2,
912
+ ) -> plt.Figure:
913
+ """Generate example plot with image, binary_pred, hv-map and instance map from prediction and ground-truth
914
+
915
+ Args:
916
+ imgs (Union[torch.Tensor, np.ndarray]): Images to process, a random number (num_images) is selected from this stack
917
+ Shape: (batch_size, 3, H', W')
918
+ predictions (DataclassHVStorage): Predictions
919
+ gt (DataclassHVStorage): gt
920
+ num_nuclei_classes (int): Number of total nuclei classes including background
921
+ num_images (int, optional): Number of example patches to display. Defaults to 2.
922
+
923
+ Returns:
924
+ plt.Figure: Figure with example patches
925
+ """
926
+ predictions = predictions.get_dict()
927
+ gt = gt.get_dict()
928
+
929
+ assert num_images <= imgs.shape[0]
930
+ num_images = 4
931
+
932
+ predictions["nuclei_binary_map"] = predictions["nuclei_binary_map"].permute(
933
+ 0, 2, 3, 1
934
+ )
935
+ predictions["hv_map"] = predictions["hv_map"].permute(0, 2, 3, 1)
936
+ predictions["nuclei_type_map"] = predictions["nuclei_type_map"].permute(
937
+ 0, 2, 3, 1
938
+ )
939
+ predictions["instance_types_nuclei"] = predictions[
940
+ "instance_types_nuclei"
941
+ ].transpose(0, 2, 3, 1)
942
+
943
+ gt["hv_map"] = gt["hv_map"].permute(0, 2, 3, 1)
944
+ gt["nuclei_type_map"] = gt["nuclei_type_map"].permute(0, 2, 3, 1)
945
+ predictions["instance_types_nuclei"] = predictions[
946
+ "instance_types_nuclei"
947
+ ].transpose(0, 2, 3, 1)
948
+
949
+ h = gt["hv_map"].shape[1]
950
+ w = gt["hv_map"].shape[2]
951
+
952
+ sample_indices = torch.randint(0, imgs.shape[0], (num_images,))
953
+ # convert to rgb and crop to selection
954
+ sample_images = (
955
+ imgs[sample_indices].permute(0, 2, 3, 1).contiguous().cpu().numpy()
956
+ ) # convert to rgb
957
+ sample_images = cropping_center(sample_images, (h, w), True)
958
+
959
+ # get predictions
960
+ pred_sample_binary_map = (
961
+ predictions["nuclei_binary_map"][sample_indices, :, :, 1]
962
+ .detach()
963
+ .cpu()
964
+ .numpy()
965
+ )
966
+ pred_sample_hv_map = (
967
+ predictions["hv_map"][sample_indices].detach().cpu().numpy()
968
+ )
969
+ pred_sample_instance_maps = (
970
+ predictions["instance_map"][sample_indices].detach().cpu().numpy()
971
+ )
972
+ pred_sample_type_maps = (
973
+ torch.argmax(predictions["nuclei_type_map"][sample_indices], dim=-1)
974
+ .detach()
975
+ .cpu()
976
+ .numpy()
977
+ )
978
+
979
+ # get ground truth labels
980
+ gt_sample_binary_map = (
981
+ gt["nuclei_binary_map"][sample_indices].detach().cpu().numpy()
982
+ )
983
+ gt_sample_hv_map = gt["hv_map"][sample_indices].detach().cpu().numpy()
984
+ gt_sample_instance_map = (
985
+ gt["instance_map"][sample_indices].detach().cpu().numpy()
986
+ )
987
+ gt_sample_type_map = (
988
+ torch.argmax(gt["nuclei_type_map"][sample_indices], dim=-1)
989
+ .detach()
990
+ .cpu()
991
+ .numpy()
992
+ )
993
+
994
+ # create colormaps
995
+ hv_cmap = plt.get_cmap("jet")
996
+ binary_cmap = plt.get_cmap("jet")
997
+ instance_map = plt.get_cmap("viridis")
998
+
999
+ # setup plot
1000
+ fig, axs = plt.subplots(num_images, figsize=(6, 2 * num_images), dpi=150)
1001
+
1002
+ for i in range(num_images):
1003
+ placeholder = np.zeros((2 * h, 6 * w, 3))
1004
+ # orig image
1005
+ placeholder[:h, :w, :3] = sample_images[i]
1006
+ placeholder[h : 2 * h, :w, :3] = sample_images[i]
1007
+ # binary prediction
1008
+ placeholder[:h, w : 2 * w, :3] = rgba2rgb(
1009
+ binary_cmap(gt_sample_binary_map[i] * 255)
1010
+ )
1011
+ placeholder[h : 2 * h, w : 2 * w, :3] = rgba2rgb(
1012
+ binary_cmap(pred_sample_binary_map[i])
1013
+ ) # *255?
1014
+ # hv maps
1015
+ placeholder[:h, 2 * w : 3 * w, :3] = rgba2rgb(
1016
+ hv_cmap((gt_sample_hv_map[i, :, :, 0] + 1) / 2)
1017
+ )
1018
+ placeholder[h : 2 * h, 2 * w : 3 * w, :3] = rgba2rgb(
1019
+ hv_cmap((pred_sample_hv_map[i, :, :, 0] + 1) / 2)
1020
+ )
1021
+ placeholder[:h, 3 * w : 4 * w, :3] = rgba2rgb(
1022
+ hv_cmap((gt_sample_hv_map[i, :, :, 1] + 1) / 2)
1023
+ )
1024
+ placeholder[h : 2 * h, 3 * w : 4 * w, :3] = rgba2rgb(
1025
+ hv_cmap((pred_sample_hv_map[i, :, :, 1] + 1) / 2)
1026
+ )
1027
+ # instance_predictions
1028
+ placeholder[:h, 4 * w : 5 * w, :3] = rgba2rgb(
1029
+ instance_map(
1030
+ (gt_sample_instance_map[i] - np.min(gt_sample_instance_map[i]))
1031
+ / (
1032
+ np.max(gt_sample_instance_map[i])
1033
+ - np.min(gt_sample_instance_map[i] + 1e-10)
1034
+ )
1035
+ )
1036
+ )
1037
+ placeholder[h : 2 * h, 4 * w : 5 * w, :3] = rgba2rgb(
1038
+ instance_map(
1039
+ (
1040
+ pred_sample_instance_maps[i]
1041
+ - np.min(pred_sample_instance_maps[i])
1042
+ )
1043
+ / (
1044
+ np.max(pred_sample_instance_maps[i])
1045
+ - np.min(pred_sample_instance_maps[i] + 1e-10)
1046
+ )
1047
+ )
1048
+ )
1049
+ # type_predictions
1050
+ placeholder[:h, 5 * w : 6 * w, :3] = rgba2rgb(
1051
+ binary_cmap(gt_sample_type_map[i] / num_nuclei_classes)
1052
+ )
1053
+ placeholder[h : 2 * h, 5 * w : 6 * w, :3] = rgba2rgb(
1054
+ binary_cmap(pred_sample_type_maps[i] / num_nuclei_classes)
1055
+ )
1056
+
1057
+ # plotting
1058
+ axs[i].imshow(placeholder)
1059
+ axs[i].set_xticks([], [])
1060
+
1061
+ # plot labels in first row
1062
+ if i == 0:
1063
+ axs[i].set_xticks(np.arange(w / 2, 6 * w, w))
1064
+ axs[i].set_xticklabels(
1065
+ [
1066
+ "Image",
1067
+ "Binary-Cells",
1068
+ "HV-Map-0",
1069
+ "HV-Map-1",
1070
+ "Cell Instances",
1071
+ "Nuclei-Instances",
1072
+ ],
1073
+ fontsize=6,
1074
+ )
1075
+ axs[i].xaxis.tick_top()
1076
+
1077
+ axs[i].set_yticks(np.arange(h / 2, 2 * h, h))
1078
+ axs[i].set_yticklabels(["GT", "Pred."], fontsize=6)
1079
+ axs[i].tick_params(axis="both", which="both", length=0)
1080
+ grid_x = np.arange(w, 6 * w, w)
1081
+ grid_y = np.arange(h, 2 * h, h)
1082
+
1083
+ for x_seg in grid_x:
1084
+ axs[i].axvline(x_seg, color="black")
1085
+ for y_seg in grid_y:
1086
+ axs[i].axhline(y_seg, color="black")
1087
+
1088
+ fig.suptitle(f"Patch Predictions for {num_images} Examples")
1089
+
1090
+ fig.tight_layout()
1091
+
1092
+ return fig
cell_segmentation/utils/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Utils
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
cell_segmentation/utils/metrics.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Implemented Metrics for Cell detection
3
+ #
4
+ # This code is based on the following repository: https://github.com/TissueImageAnalytics/PanNuke-metrics
5
+ #
6
+ # Implemented metrics are:
7
+ #
8
+ # Instance Segmentation Metrics
9
+ # Binary PQ
10
+ # Multiclass PQ
11
+ # Neoplastic PQ
12
+ # Non-Neoplastic PQ
13
+ # Inflammatory PQ
14
+ # Dead PQ
15
+ # Inflammatory PQ
16
+ # Dead PQ
17
+ #
18
+ # Detection and Classification Metrics
19
+ # Precision, Recall, F1
20
+ #
21
+ # Other
22
+ # dice1, dice2, aji, aji_plus
23
+ #
24
+ # Binary PQ (bPQ): Assumes all nuclei belong to same class and reports the average PQ across tissue types.
25
+ # Multi-Class PQ (mPQ): Reports the average PQ across the classes and tissue types.
26
+ # Neoplastic PQ: Reports the PQ for the neoplastic class on all tissues.
27
+ # Non-Neoplastic PQ: Reports the PQ for the non-neoplastic class on all tissues.
28
+ # Inflammatory PQ: Reports the PQ for the inflammatory class on all tissues.
29
+ # Connective PQ: Reports the PQ for the connective class on all tissues.
30
+ # Dead PQ: Reports the PQ for the dead class on all tissues.
31
+ #
32
+ # @ Fabian Hörst, [email protected]
33
+ # Institute for Artifical Intelligence in Medicine,
34
+ # University Medicine Essen
35
+
36
+ from typing import List
37
+ import numpy as np
38
+ from scipy.optimize import linear_sum_assignment
39
+
40
+
41
+ def get_fast_pq(true, pred, match_iou=0.5):
42
+ """
43
+ `match_iou` is the IoU threshold level to determine the pairing between
44
+ GT instances `p` and prediction instances `g`. `p` and `g` is a pair
45
+ if IoU > `match_iou`. However, pair of `p` and `g` must be unique
46
+ (1 prediction instance to 1 GT instance mapping).
47
+
48
+ If `match_iou` < 0.5, Munkres assignment (solving minimum weight matching
49
+ in bipartite graphs) is caculated to find the maximal amount of unique pairing.
50
+
51
+ If `match_iou` >= 0.5, all IoU(p,g) > 0.5 pairing is proven to be unique and
52
+ the number of pairs is also maximal.
53
+
54
+ Fast computation requires instance IDs are in contiguous orderding
55
+ i.e [1, 2, 3, 4] not [2, 3, 6, 10]. Please call `remap_label` beforehand
56
+ and `by_size` flag has no effect on the result.
57
+
58
+ Returns:
59
+ [dq, sq, pq]: measurement statistic
60
+
61
+ [paired_true, paired_pred, unpaired_true, unpaired_pred]:
62
+ pairing information to perform measurement
63
+
64
+ """
65
+ assert match_iou >= 0.0, "Cant' be negative"
66
+
67
+ true = np.copy(true) #[256,256]
68
+ pred = np.copy(pred) #(256,256) #pred是预测的mask
69
+ true_id_list = list(np.unique(true))
70
+ pred_id_list = list(np.unique(pred)) #pred_id_list是预测的mask的id
71
+
72
+ # if there is no background, fixing by adding it
73
+ if 0 not in pred_id_list:
74
+ pred_id_list = [0] + pred_id_list
75
+
76
+ true_masks = [
77
+ None,
78
+ ]
79
+ for t in true_id_list[1:]: #t最大8
80
+ t_mask = np.array(true == t, np.uint8)
81
+ true_masks.append(t_mask) #true_masks是真实的mask true_masks[1].shape =[256,256]
82
+
83
+ pred_masks = [
84
+ None,
85
+ ]
86
+ for p in pred_id_list[1:]: #p最大9
87
+ p_mask = np.array(pred == p, np.uint8)
88
+ pred_masks.append(p_mask) #pred_masks是预测的mask pred_masks[1].shape =[256,256]
89
+
90
+ # prefill with value重新填充值
91
+ pairwise_iou = np.zeros(
92
+ [len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64
93
+ )
94
+
95
+ # caching pairwise iou for all instances 为所有的实例缓存iou
96
+ for true_id in true_id_list[1:]: # 0-th is background 0是背景
97
+ #import pdb; pdb.set_trace()
98
+ t_mask = true_masks[true_id] # 256*256为true_id的mask,也就是找到正确的mask
99
+ #import pdb; pdb.set_trace()
100
+ pred_true_overlap = pred[t_mask > 0] # 256*256的mask中,找到预测的mask,这两者的交集也就是预测正确的mask,也就是说这个mask是正确的,
101
+ #t_mask是真实的mask,pred[t_mask > 0]是预测的mask中的pred是用来找到预测的mask的,也就是说pred的形状和t_mask的形状是一样的
102
+ #import pdb; pdb.set_trace()
103
+ pred_true_overlap_id = np.unique(pred_true_overlap)
104
+ pred_true_overlap_id = list(pred_true_overlap_id)
105
+ for pred_id in pred_true_overlap_id:
106
+ if pred_id == 0: # ignore
107
+ continue # overlaping background
108
+ p_mask = pred_masks[pred_id]
109
+ total = (t_mask + p_mask).sum()
110
+ inter = (t_mask * p_mask).sum()
111
+ iou = inter / (total - inter)
112
+ pairwise_iou[true_id - 1, pred_id - 1] = iou
113
+ #
114
+ if match_iou >= 0.5:
115
+ paired_iou = pairwise_iou[pairwise_iou > match_iou]
116
+ pairwise_iou[pairwise_iou <= match_iou] = 0.0
117
+ paired_true, paired_pred = np.nonzero(pairwise_iou)
118
+ paired_iou = pairwise_iou[paired_true, paired_pred]
119
+ paired_true += 1 # index is instance id - 1
120
+ paired_pred += 1 # hence return back to original
121
+ else: # * Exhaustive maximal unique pairing
122
+ #### Munkres pairing with scipy library
123
+ # the algorithm return (row indices, matched column indices)
124
+ # if there is multiple same cost in a row, index of first occurence
125
+ # is return, thus the unique pairing is ensure
126
+ # inverse pair to get high IoU as minimum
127
+ paired_true, paired_pred = linear_sum_assignment(-pairwise_iou)
128
+ ### extract the paired cost and remove invalid pair
129
+ paired_iou = pairwise_iou[paired_true, paired_pred]
130
+
131
+ # now select those above threshold level
132
+ # paired with iou = 0.0 i.e no intersection => FP or FN
133
+ paired_true = list(paired_true[paired_iou > match_iou] + 1)
134
+ paired_pred = list(paired_pred[paired_iou > match_iou] + 1)
135
+ paired_iou = paired_iou[paired_iou > match_iou]
136
+
137
+ # get the actual FP and FN
138
+ unpaired_true = [idx for idx in true_id_list[1:] if idx not in paired_true]
139
+ unpaired_pred = [idx for idx in pred_id_list[1:] if idx not in paired_pred]
140
+ # print(paired_iou.shape, paired_true.shape, len(unpaired_true), len(unpaired_pred))
141
+
142
+ #
143
+ tp = len(paired_true)
144
+ fp = len(unpaired_pred)
145
+ fn = len(unpaired_true)
146
+ # get the F1-score i.e DQ
147
+ dq = tp / (tp + 0.5 * fp + 0.5 * fn + 1.0e-6) # good practice?
148
+ # get the SQ, no paired has 0 iou so not impact
149
+ sq = paired_iou.sum() / (tp + 1.0e-6)
150
+
151
+ return [dq, sq, dq * sq], [paired_true, paired_pred, unpaired_true, unpaired_pred]
152
+
153
+
154
+ #####
155
+
156
+
157
+ def remap_label(pred, by_size=False):
158
+ """
159
+ Rename all instance id so that the id is contiguous i.e [0, 1, 2, 3]
160
+ not [0, 2, 4, 6]. The ordering of instances (which one comes first)
161
+ is preserved unless by_size=True, then the instances will be reordered
162
+ so that bigger nucler has smaller ID
163
+
164
+ Args:
165
+ pred : the 2d array contain instances where each instances is marked
166
+ by non-zero integer
167
+ by_size : renaming with larger nuclei has smaller id (on-top)
168
+ """
169
+ pred_id = list(np.unique(pred))
170
+ if 0 in pred_id:
171
+ pred_id.remove(0)
172
+ if len(pred_id) == 0:
173
+ return pred # no label
174
+ if by_size:
175
+ pred_size = []
176
+ for inst_id in pred_id:
177
+ size = (pred == inst_id).sum()
178
+ pred_size.append(size)
179
+ # sort the id by size in descending order
180
+ pair_list = zip(pred_id, pred_size)
181
+ pair_list = sorted(pair_list, key=lambda x: x[1], reverse=True)
182
+ pred_id, pred_size = zip(*pair_list)
183
+
184
+ new_pred = np.zeros(pred.shape, np.int32)
185
+ for idx, inst_id in enumerate(pred_id):
186
+ new_pred[pred == inst_id] = idx + 1
187
+ return new_pred
188
+
189
+
190
+ ####
191
+
192
+
193
+ def binarize(x):
194
+ """
195
+ convert multichannel (multiclass) instance segmetation tensor
196
+ to binary instance segmentation (bg and nuclei),
197
+
198
+ :param x: B*B*C (for PanNuke 256*256*5 )
199
+ :return: Instance segmentation 这段代码的作用是将多通道的mask转换为单通道的mask
200
+ """
201
+ #x = np.transpose(x, (1, 2, 0)) #[256,256,5]
202
+
203
+ out = np.zeros([x.shape[0], x.shape[1]]) #首先为out赋值为0,形状为256*256
204
+ count = 1
205
+ for i in range(x.shape[2]): #遍历通道数
206
+ x_ch = x[:, :, i] #[256,256] #取出每个通道的mask 形状为256*256
207
+ unique_vals = np.unique(x_ch) #找到每个通道的mask中的唯一值,形状为(1,)
208
+ unique_vals = unique_vals.tolist() #将unique_vals转换为list
209
+ unique_vals.remove(0) #移除0
210
+ for j in unique_vals: #遍历unique_vals,也就是遍历每个通道的mask中的唯一值
211
+ x_tmp = x_ch == j #找到每个通道的mask中的唯一值的mask,在创建一个布尔类型的数组,其中元素为 True 的位置表示原始数组 x_ch 中对应位置的元素等于 j,元素为 False 的位置表示不等于 j
212
+ x_tmp_c = 1 - x_tmp #找到每个通道的mask中的唯一值的mask的补集
213
+ out *= x_tmp_c #将out中的值乘以x_tmp_c
214
+ out += count * x_tmp #将out中的值加上count*x_tmp
215
+ count += 1
216
+ out = out.astype("int32")
217
+ return out
218
+
219
+
220
+ def get_tissue_idx(tissue_indices, idx):
221
+ for i in range(len(tissue_indices)):
222
+ if tissue_indices[i].count(idx) == 1:
223
+ tiss_idx = i
224
+ return tiss_idx
225
+
226
+
227
+ def cell_detection_scores(
228
+ paired_true, paired_pred, unpaired_true, unpaired_pred, w: List = [1, 1]
229
+ ):
230
+ tp_d = paired_pred.shape[0]
231
+ fp_d = unpaired_pred.shape[0]
232
+ fn_d = unpaired_true.shape[0]
233
+
234
+ # tp_tn_dt = (paired_pred == paired_true).sum()
235
+ # fp_fn_dt = (paired_pred != paired_true).sum()
236
+ prec_d = tp_d / (tp_d + fp_d)
237
+ rec_d = tp_d / (tp_d + fn_d)
238
+
239
+ f1_d = 2 * tp_d / (2 * tp_d + w[0] * fp_d + w[1] * fn_d)
240
+
241
+ return f1_d, prec_d, rec_d
242
+
243
+
244
+ def cell_type_detection_scores(
245
+ paired_true,
246
+ paired_pred,
247
+ unpaired_true,
248
+ unpaired_pred,
249
+ type_id,
250
+ w: List = [2, 2, 1, 1],
251
+ exhaustive: bool = True,
252
+ ):
253
+ type_samples = (paired_true == type_id) | (paired_pred == type_id)
254
+
255
+ paired_true = paired_true[type_samples]
256
+ paired_pred = paired_pred[type_samples]
257
+
258
+ tp_dt = ((paired_true == type_id) & (paired_pred == type_id)).sum()
259
+ tn_dt = ((paired_true != type_id) & (paired_pred != type_id)).sum()
260
+ fp_dt = ((paired_true != type_id) & (paired_pred == type_id)).sum()
261
+ fn_dt = ((paired_true == type_id) & (paired_pred != type_id)).sum()
262
+
263
+ if not exhaustive:
264
+ ignore = (paired_true == -1).sum()
265
+ fp_dt -= ignore
266
+
267
+ fp_d = (unpaired_pred == type_id).sum() #
268
+ fn_d = (unpaired_true == type_id).sum()
269
+
270
+ prec_type = (tp_dt + tn_dt) / (tp_dt + tn_dt + w[0] * fp_dt + w[2] * fp_d)
271
+ rec_type = (tp_dt + tn_dt) / (tp_dt + tn_dt + w[1] * fn_dt + w[3] * fn_d)
272
+
273
+ f1_type = (2 * (tp_dt + tn_dt)) / (
274
+ 2 * (tp_dt + tn_dt) + w[0] * fp_dt + w[1] * fn_dt + w[2] * fp_d + w[3] * fn_d
275
+ )
276
+ return f1_type, prec_type, rec_type
cell_segmentation/utils/post_proc_cellvit.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # PostProcessing Pipeline
3
+ #
4
+ # Adapted from HoverNet
5
+ # HoverNet Network (https://doi.org/10.1016/j.media.2019.101563)
6
+ # Code Snippet adapted from HoverNet implementation (https://github.com/vqdang/hover_net)
7
+ #
8
+ # @ Fabian Hörst, [email protected]
9
+ # Institute for Artifical Intelligence in Medicine,
10
+ # University Medicine Essen
11
+
12
+
13
+ import warnings
14
+ from typing import Tuple, Literal,List
15
+
16
+ import cv2
17
+ import numpy as np
18
+ from scipy.ndimage import measurements
19
+ from scipy.ndimage.morphology import binary_fill_holes
20
+ from skimage.segmentation import watershed
21
+ import torch
22
+
23
+ from .tools import get_bounding_box, remove_small_objects
24
+
25
+
26
+ def noop(*args, **kargs):
27
+ pass
28
+
29
+
30
+ warnings.warn = noop
31
+
32
+
33
+ class DetectionCellPostProcessor:
34
+ def __init__(
35
+ self,
36
+ nr_types: int = None,
37
+ magnification: Literal[20, 40] = 40,
38
+ gt: bool = False,
39
+ ) -> None:
40
+ """DetectionCellPostProcessor for postprocessing prediction maps and get detected cells
41
+
42
+ Args:
43
+ nr_types (int, optional): Number of cell types, including background (background = 0). Defaults to None.
44
+ magnification (Literal[20, 40], optional): Which magnification the data has. Defaults to 40.
45
+ gt (bool, optional): If this is gt data (used that we do not suppress tiny cells that may be noise in a prediction map).
46
+ Defaults to False.
47
+
48
+ Raises:
49
+ NotImplementedError: Unknown magnification
50
+ """
51
+ self.nr_types = nr_types
52
+ self.magnification = magnification
53
+ self.gt = gt
54
+
55
+ if magnification == 40:
56
+ self.object_size = 10
57
+ self.k_size = 21
58
+ elif magnification == 20:
59
+ self.object_size = 3 # 3 or 40, we used 5
60
+ self.k_size = 11 # 11 or 41, we used 13
61
+ else:
62
+ raise NotImplementedError("Unknown magnification")
63
+ if gt: # to not supress something in gt!
64
+ self.object_size = 100
65
+ self.k_size = 21
66
+
67
+ def post_process_cell_segmentation(
68
+ self,
69
+ pred_map: np.ndarray,
70
+ ) -> Tuple[np.ndarray, dict]:
71
+ """Post processing of one image tile
72
+
73
+ Args:
74
+ pred_map (np.ndarray): Combined output of tp, np and hv branches, in the same order. Shape: (H, W, 4)
75
+
76
+ Returns:
77
+ Tuple[np.ndarray, dict]:
78
+ np.ndarray: Instance map for one image. Each nuclei has own integer. Shape: (H, W)
79
+ dict: Instance dictionary. Main Key is the nuclei instance number (int), with a dict as value.
80
+ For each instance, the dictionary contains the keys: bbox (bounding box), centroid (centroid coordinates),
81
+ contour, type_prob (probability), type (nuclei type)
82
+ """
83
+ if self.nr_types is not None:
84
+ pred_type = pred_map[..., :1]
85
+ pred_inst = pred_map[..., 1:]
86
+ pred_type = pred_type.astype(np.int32)
87
+ else:
88
+ pred_inst = pred_map
89
+
90
+ pred_inst = np.squeeze(pred_inst)
91
+ pred_inst = self.__proc_np_hv(
92
+ pred_inst, object_size=self.object_size, ksize=self.k_size
93
+ )
94
+
95
+ inst_id_list = np.unique(pred_inst)[1:] # exlcude background
96
+ inst_info_dict = {}
97
+ for inst_id in inst_id_list:
98
+ inst_map = pred_inst == inst_id
99
+ rmin, rmax, cmin, cmax = get_bounding_box(inst_map)
100
+ inst_bbox = np.array([[rmin, cmin], [rmax, cmax]])
101
+ inst_map = inst_map[
102
+ inst_bbox[0][0] : inst_bbox[1][0], inst_bbox[0][1] : inst_bbox[1][1]
103
+ ]
104
+ inst_map = inst_map.astype(np.uint8)
105
+ inst_moment = cv2.moments(inst_map)
106
+ inst_contour = cv2.findContours(
107
+ inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
108
+ )
109
+ # * opencv protocol format may break
110
+ inst_contour = np.squeeze(inst_contour[0][0].astype("int32"))
111
+ # < 3 points dont make a contour, so skip, likely artifact too
112
+ # as the contours obtained via approximation => too small or sthg
113
+ if inst_contour.shape[0] < 3:
114
+ continue
115
+ if len(inst_contour.shape) != 2:
116
+ continue # ! check for trickery shape
117
+ inst_centroid = [
118
+ (inst_moment["m10"] / inst_moment["m00"]),
119
+ (inst_moment["m01"] / inst_moment["m00"]),
120
+ ]
121
+ inst_centroid = np.array(inst_centroid)
122
+ inst_contour[:, 0] += inst_bbox[0][1] # X
123
+ inst_contour[:, 1] += inst_bbox[0][0] # Y
124
+ inst_centroid[0] += inst_bbox[0][1] # X
125
+ inst_centroid[1] += inst_bbox[0][0] # Y
126
+ inst_info_dict[inst_id] = { # inst_id should start at 1
127
+ "bbox": inst_bbox,
128
+ "centroid": inst_centroid,
129
+ "contour": inst_contour,
130
+ "type_prob": None,
131
+ "type": None,
132
+ }
133
+
134
+ #### * Get class of each instance id, stored at index id-1 (inst_id = number of deteced nucleus)
135
+ for inst_id in list(inst_info_dict.keys()):
136
+ rmin, cmin, rmax, cmax = (inst_info_dict[inst_id]["bbox"]).flatten()
137
+ inst_map_crop = pred_inst[rmin:rmax, cmin:cmax]
138
+ inst_type_crop = pred_type[rmin:rmax, cmin:cmax]
139
+ inst_map_crop = inst_map_crop == inst_id
140
+ inst_type = inst_type_crop[inst_map_crop]
141
+ type_list, type_pixels = np.unique(inst_type, return_counts=True)
142
+ type_list = list(zip(type_list, type_pixels))
143
+ type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
144
+ inst_type = type_list[0][0]
145
+ if inst_type == 0: # ! pick the 2nd most dominant if exist
146
+ if len(type_list) > 1:
147
+ inst_type = type_list[1][0]
148
+ type_dict = {v[0]: v[1] for v in type_list}
149
+ type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6)
150
+ inst_info_dict[inst_id]["type"] = int(inst_type)
151
+ inst_info_dict[inst_id]["type_prob"] = float(type_prob)
152
+
153
+ return pred_inst, inst_info_dict
154
+
155
+ def __proc_np_hv(
156
+ self, pred: np.ndarray, object_size: int = 10, ksize: int = 21
157
+ ) -> np.ndarray:
158
+ """Process Nuclei Prediction with XY Coordinate Map and generate instance map (each instance has unique integer)
159
+
160
+ Separate Instances (also overlapping ones) from binary nuclei map and hv map by using morphological operations and watershed
161
+
162
+ Args:
163
+ pred (np.ndarray): Prediction output, assuming. Shape: (H, W, 3)
164
+ * channel 0 contain probability map of nuclei
165
+ * channel 1 containing the regressed X-map
166
+ * channel 2 containing the regressed Y-map
167
+ object_size (int, optional): Smallest oject size for filtering. Defaults to 10
168
+ k_size (int, optional): Sobel Kernel size. Defaults to 21
169
+ Returns:
170
+ np.ndarray: Instance map for one image. Each nuclei has own integer. Shape: (H, W)
171
+ """
172
+ pred = np.array(pred, dtype=np.float32)
173
+
174
+ blb_raw = pred[..., 0]
175
+ h_dir_raw = pred[..., 1]
176
+ v_dir_raw = pred[..., 2]
177
+
178
+ # processing
179
+ blb = np.array(blb_raw >= 0.5, dtype=np.int32)
180
+
181
+ blb = measurements.label(blb)[0] # ndimage.label(blb)[0]
182
+ blb = remove_small_objects(blb, min_size=10) # 10
183
+ blb[blb > 0] = 1 # background is 0 already
184
+
185
+ h_dir = cv2.normalize(
186
+ h_dir_raw,
187
+ None,
188
+ alpha=0,
189
+ beta=1,
190
+ norm_type=cv2.NORM_MINMAX,
191
+ dtype=cv2.CV_32F,
192
+ )
193
+ v_dir = cv2.normalize(
194
+ v_dir_raw,
195
+ None,
196
+ alpha=0,
197
+ beta=1,
198
+ norm_type=cv2.NORM_MINMAX,
199
+ dtype=cv2.CV_32F,
200
+ )
201
+
202
+ # ksize = int((20 * scale_factor) + 1) # 21 vs 41
203
+ # obj_size = math.ceil(10 * (scale_factor**2)) #10 vs 40
204
+
205
+ sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=ksize)
206
+ sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=ksize)
207
+
208
+ sobelh = 1 - (
209
+ cv2.normalize(
210
+ sobelh,
211
+ None,
212
+ alpha=0,
213
+ beta=1,
214
+ norm_type=cv2.NORM_MINMAX,
215
+ dtype=cv2.CV_32F,
216
+ )
217
+ )
218
+ sobelv = 1 - (
219
+ cv2.normalize(
220
+ sobelv,
221
+ None,
222
+ alpha=0,
223
+ beta=1,
224
+ norm_type=cv2.NORM_MINMAX,
225
+ dtype=cv2.CV_32F,
226
+ )
227
+ )
228
+
229
+ overall = np.maximum(sobelh, sobelv)
230
+ overall = overall - (1 - blb)
231
+ overall[overall < 0] = 0
232
+
233
+ dist = (1.0 - overall) * blb
234
+ ## nuclei values form mountains so inverse to get basins
235
+ dist = -cv2.GaussianBlur(dist, (3, 3), 0)
236
+
237
+ overall = np.array(overall >= 0.4, dtype=np.int32)
238
+
239
+ marker = blb - overall
240
+ marker[marker < 0] = 0
241
+ marker = binary_fill_holes(marker).astype("uint8")
242
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
243
+ marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
244
+ marker = measurements.label(marker)[0]
245
+ marker = remove_small_objects(marker, min_size=object_size)
246
+
247
+ proced_pred = watershed(dist, markers=marker, mask=blb)
248
+
249
+ return proced_pred
250
+
251
+
252
+ def calculate_instances(
253
+ pred_types: torch.Tensor, pred_insts: torch.Tensor
254
+ ) -> List[dict]:
255
+ """Best used for GT
256
+
257
+ Args:
258
+ pred_types (torch.Tensor): Binary or type map ground-truth.
259
+ Shape must be (B, C, H, W) with C=1 for binary or num_nuclei_types for multi-class.
260
+ pred_insts (torch.Tensor): Ground-Truth instance map with shape (B, H, W)
261
+
262
+ Returns:
263
+ list[dict]: Dictionary with nuclei informations, output similar to post_process_cell_segmentation
264
+ """
265
+ type_preds = []
266
+ pred_types = pred_types.permute(0, 2, 3, 1)
267
+ for i in range(pred_types.shape[0]):
268
+ pred_type = torch.argmax(pred_types, dim=-1)[i].detach().cpu().numpy()
269
+ pred_inst = pred_insts[i].detach().cpu().numpy()
270
+ inst_id_list = np.unique(pred_inst)[1:] # exlcude background
271
+ inst_info_dict = {}
272
+ for inst_id in inst_id_list:
273
+ inst_map = pred_inst == inst_id
274
+ rmin, rmax, cmin, cmax = get_bounding_box(inst_map)
275
+ inst_bbox = np.array([[rmin, cmin], [rmax, cmax]])
276
+ inst_map = inst_map[
277
+ inst_bbox[0][0] : inst_bbox[1][0], inst_bbox[0][1] : inst_bbox[1][1]
278
+ ]
279
+ inst_map = inst_map.astype(np.uint8)
280
+ inst_moment = cv2.moments(inst_map)
281
+ inst_contour = cv2.findContours(
282
+ inst_map, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
283
+ )
284
+ # * opencv protocol format may break
285
+ inst_contour = np.squeeze(inst_contour[0][0].astype("int32"))
286
+ # < 3 points dont make a contour, so skip, likely artifact too
287
+ # as the contours obtained via approximation => too small or sthg
288
+ if inst_contour.shape[0] < 3:
289
+ continue
290
+ if len(inst_contour.shape) != 2:
291
+ continue # ! check for trickery shape
292
+ inst_centroid = [
293
+ (inst_moment["m10"] / inst_moment["m00"]),
294
+ (inst_moment["m01"] / inst_moment["m00"]),
295
+ ]
296
+ inst_centroid = np.array(inst_centroid)
297
+ inst_contour[:, 0] += inst_bbox[0][1] # X
298
+ inst_contour[:, 1] += inst_bbox[0][0] # Y
299
+ inst_centroid[0] += inst_bbox[0][1] # X
300
+ inst_centroid[1] += inst_bbox[0][0] # Y
301
+ inst_info_dict[inst_id] = { # inst_id should start at 1
302
+ "bbox": inst_bbox,
303
+ "centroid": inst_centroid,
304
+ "contour": inst_contour,
305
+ "type_prob": None,
306
+ "type": None,
307
+ }
308
+ #### * Get class of each instance id, stored at index id-1 (inst_id = number of deteced nucleus)
309
+ for inst_id in list(inst_info_dict.keys()):
310
+ rmin, cmin, rmax, cmax = (inst_info_dict[inst_id]["bbox"]).flatten()
311
+ inst_map_crop = pred_inst[rmin:rmax, cmin:cmax]
312
+ inst_type_crop = pred_type[rmin:rmax, cmin:cmax]
313
+ inst_map_crop = inst_map_crop == inst_id
314
+ inst_type = inst_type_crop[inst_map_crop]
315
+ type_list, type_pixels = np.unique(inst_type, return_counts=True)
316
+ type_list = list(zip(type_list, type_pixels))
317
+ type_list = sorted(type_list, key=lambda x: x[1], reverse=True)
318
+ inst_type = type_list[0][0]
319
+ if inst_type == 0: # ! pick the 2nd most dominant if exist
320
+ if len(type_list) > 1:
321
+ inst_type = type_list[1][0]
322
+ type_dict = {v[0]: v[1] for v in type_list}
323
+ type_prob = type_dict[inst_type] / (np.sum(inst_map_crop) + 1.0e-6)
324
+ inst_info_dict[inst_id]["type"] = int(inst_type)
325
+ inst_info_dict[inst_id]["type_prob"] = float(type_prob)
326
+ type_preds.append(inst_info_dict)
327
+
328
+ return type_preds
cell_segmentation/utils/template_geojson.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # GeoJson templates
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+
9
+ def get_template_point() -> dict:
10
+ """Return a template for a Point geojson object
11
+
12
+ Returns:
13
+ dict: Template
14
+ """
15
+ template_point = {
16
+ "type": "Feature",
17
+ "id": "TODO",
18
+ "geometry": {
19
+ "type": "MultiPoint",
20
+ "coordinates": [
21
+ [],
22
+ ],
23
+ },
24
+ "properties": {
25
+ "objectType": "annotation",
26
+ "classification": {"name": "TODO", "color": []},
27
+ },
28
+ }
29
+ return template_point
30
+
31
+
32
+ def get_template_segmentation() -> dict:
33
+ """Return a template for a MultiPolygon geojson object
34
+
35
+ Returns:
36
+ dict: Template
37
+ """
38
+ template_multipolygon = {
39
+ "type": "Feature",
40
+ "id": "TODO",
41
+ "geometry": {
42
+ "type": "MultiPolygon",
43
+ "coordinates": [
44
+ [],
45
+ ],
46
+ },
47
+ "properties": {
48
+ "objectType": "annotation",
49
+ "classification": {"name": "TODO", "color": []},
50
+ },
51
+ }
52
+ return template_multipolygon
cell_segmentation/utils/tools.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Helpful functions Pipeline
3
+ #
4
+ # Adapted from HoverNet
5
+ # HoverNet Network (https://doi.org/10.1016/j.media.2019.101563)
6
+ # Code Snippet adapted from HoverNet implementation (https://github.com/vqdang/hover_net)
7
+ #
8
+ # @ Fabian Hörst, [email protected]
9
+ # Institute for Artifical Intelligence in Medicine,
10
+ # University Medicine Essen
11
+
12
+
13
+ import math
14
+ from typing import Tuple
15
+
16
+ import numpy as np
17
+ import scipy
18
+ from numba import njit, prange
19
+ from scipy import ndimage
20
+ from scipy.optimize import linear_sum_assignment
21
+ from skimage.draw import polygon
22
+
23
+
24
+ def get_bounding_box(img):
25
+ """Get bounding box coordinate information."""
26
+ rows = np.any(img, axis=1)
27
+ cols = np.any(img, axis=0)
28
+ rmin, rmax = np.where(rows)[0][[0, -1]]
29
+ cmin, cmax = np.where(cols)[0][[0, -1]]
30
+ # due to python indexing, need to add 1 to max
31
+ # else accessing will be 1px in the box, not out
32
+ rmax += 1
33
+ cmax += 1
34
+ return [rmin, rmax, cmin, cmax]
35
+
36
+
37
+ @njit
38
+ def cropping_center(x, crop_shape, batch=False):
39
+ """Crop an input image at the centre.
40
+
41
+ Args:
42
+ x: input array
43
+ crop_shape: dimensions of cropped array
44
+
45
+ Returns:
46
+ x: cropped array
47
+
48
+ """
49
+ orig_shape = x.shape
50
+ if not batch:
51
+ h0 = int((orig_shape[0] - crop_shape[0]) * 0.5)
52
+ w0 = int((orig_shape[1] - crop_shape[1]) * 0.5)
53
+ x = x[h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1], ...]
54
+ else:
55
+ h0 = int((orig_shape[1] - crop_shape[0]) * 0.5)
56
+ w0 = int((orig_shape[2] - crop_shape[1]) * 0.5)
57
+ x = x[:, h0 : h0 + crop_shape[0], w0 : w0 + crop_shape[1], ...]
58
+ return x
59
+
60
+
61
+ def remove_small_objects(pred, min_size=64, connectivity=1):
62
+ """Remove connected components smaller than the specified size.
63
+
64
+ This function is taken from skimage.morphology.remove_small_objects, but the warning
65
+ is removed when a single label is provided.
66
+
67
+ Args:
68
+ pred: input labelled array
69
+ min_size: minimum size of instance in output array
70
+ connectivity: The connectivity defining the neighborhood of a pixel.
71
+
72
+ Returns:
73
+ out: output array with instances removed under min_size
74
+
75
+ """
76
+ out = pred
77
+
78
+ if min_size == 0: # shortcut for efficiency
79
+ return out
80
+
81
+ if out.dtype == bool:
82
+ selem = ndimage.generate_binary_structure(pred.ndim, connectivity)
83
+ ccs = np.zeros_like(pred, dtype=np.int32)
84
+ ndimage.label(pred, selem, output=ccs)
85
+ else:
86
+ ccs = out
87
+
88
+ try:
89
+ component_sizes = np.bincount(ccs.ravel())
90
+ except ValueError:
91
+ raise ValueError(
92
+ "Negative value labels are not supported. Try "
93
+ "relabeling the input with `scipy.ndimage.label` or "
94
+ "`skimage.morphology.label`."
95
+ )
96
+
97
+ too_small = component_sizes < min_size
98
+ too_small_mask = too_small[ccs]
99
+ out[too_small_mask] = 0
100
+
101
+ return out
102
+
103
+
104
+ def pair_coordinates(
105
+ setA: np.ndarray, setB: np.ndarray, radius: float
106
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
107
+ """Use the Munkres or Kuhn-Munkres algorithm to find the most optimal
108
+ unique pairing (largest possible match) when pairing points in set B
109
+ against points in set A, using distance as cost function.
110
+
111
+ Args:
112
+ setA (np.ndarray): np.array (float32) of size Nx2 contains the of XY coordinate
113
+ of N different points
114
+ setB (np.ndarray): np.array (float32) of size Nx2 contains the of XY coordinate
115
+ of N different points
116
+ radius (float): valid area around a point in setA to consider
117
+ a given coordinate in setB a candidate for match
118
+
119
+ Returns:
120
+ Tuple[np.ndarray, np.ndarray, np.ndarray]:
121
+ pairing: pairing is an array of indices
122
+ where point at index pairing[0] in set A paired with point
123
+ in set B at index pairing[1]
124
+ unparedA: remaining point in set A unpaired
125
+ unparedB: remaining point in set B unpaired
126
+ """
127
+ # * Euclidean distance as the cost matrix
128
+ pair_distance = scipy.spatial.distance.cdist(setA, setB, metric="euclidean")
129
+
130
+ # * Munkres pairing with scipy library
131
+ # the algorithm return (row indices, matched column indices)
132
+ # if there is multiple same cost in a row, index of first occurence
133
+ # is return, thus the unique pairing is ensured
134
+ indicesA, paired_indicesB = linear_sum_assignment(pair_distance)
135
+
136
+ # extract the paired cost and remove instances
137
+ # outside of designated radius
138
+ pair_cost = pair_distance[indicesA, paired_indicesB]
139
+
140
+ pairedA = indicesA[pair_cost <= radius]
141
+ pairedB = paired_indicesB[pair_cost <= radius]
142
+
143
+ pairing = np.concatenate([pairedA[:, None], pairedB[:, None]], axis=-1)
144
+ unpairedA = np.delete(np.arange(setA.shape[0]), pairedA)
145
+ unpairedB = np.delete(np.arange(setB.shape[0]), pairedB)
146
+
147
+ return pairing, unpairedA, unpairedB
148
+
149
+
150
+ def fix_duplicates(inst_map: np.ndarray) -> np.ndarray:
151
+ """Re-label duplicated instances in an instance labelled mask.
152
+
153
+ Parameters
154
+ ----------
155
+ inst_map : np.ndarray
156
+ Instance labelled mask. Shape (H, W).
157
+
158
+ Returns
159
+ -------
160
+ np.ndarray:
161
+ The instance labelled mask without duplicated indices.
162
+ Shape (H, W).
163
+ """
164
+ current_max_id = np.amax(inst_map)
165
+ inst_list = list(np.unique(inst_map))
166
+ if 0 in inst_list:
167
+ inst_list.remove(0)
168
+
169
+ for inst_id in inst_list:
170
+ inst = np.array(inst_map == inst_id, np.uint8)
171
+ remapped_ids = ndimage.label(inst)[0]
172
+ remapped_ids[remapped_ids > 1] += current_max_id
173
+ inst_map[remapped_ids > 1] = remapped_ids[remapped_ids > 1]
174
+ current_max_id = np.amax(inst_map)
175
+
176
+ return inst_map
177
+
178
+
179
+ def polygons_to_label_coord(
180
+ coord: np.ndarray, shape: Tuple[int, int], labels: np.ndarray = None
181
+ ) -> np.ndarray:
182
+ """Render polygons to image given a shape.
183
+
184
+ Parameters
185
+ ----------
186
+ coord.shape : np.ndarray
187
+ Shape: (n_polys, n_rays)
188
+ shape : Tuple[int, int]
189
+ Shape of the output mask.
190
+ labels : np.ndarray, optional
191
+ Sorted indices of the centroids.
192
+
193
+ Returns
194
+ -------
195
+ np.ndarray:
196
+ Instance labelled mask. Shape: (H, W).
197
+ """
198
+ coord = np.asarray(coord)
199
+ if labels is None:
200
+ labels = np.arange(len(coord))
201
+
202
+ assert coord.ndim == 3 and coord.shape[1] == 2 and len(coord) == len(labels)
203
+
204
+ lbl = np.zeros(shape, np.int32)
205
+
206
+ for i, c in zip(labels, coord):
207
+ rr, cc = polygon(*c, shape)
208
+ lbl[rr, cc] = i + 1
209
+
210
+ return lbl
211
+
212
+
213
+ def ray_angles(n_rays: int = 32):
214
+ """Get linearly spaced angles for rays."""
215
+ return np.linspace(0, 2 * np.pi, n_rays, endpoint=False)
216
+
217
+
218
+ def dist_to_coord(
219
+ dist: np.ndarray, points: np.ndarray, scale_dist: Tuple[int, int] = (1, 1)
220
+ ) -> np.ndarray:
221
+ """Convert list of distances and centroids from polar to cartesian coordinates.
222
+
223
+ Parameters
224
+ ----------
225
+ dist : np.ndarray
226
+ The centerpoint pixels of the radial distance map. Shape (n_polys, n_rays).
227
+ points : np.ndarray
228
+ The centroids of the instances. Shape: (n_polys, 2).
229
+ scale_dist : Tuple[int, int], default=(1, 1)
230
+ Scaling factor.
231
+
232
+ Returns
233
+ -------
234
+ np.ndarray:
235
+ Cartesian cooridnates of the polygons. Shape (n_polys, 2, n_rays).
236
+ """
237
+ dist = np.asarray(dist)
238
+ points = np.asarray(points)
239
+ assert (
240
+ dist.ndim == 2
241
+ and points.ndim == 2
242
+ and len(dist) == len(points)
243
+ and points.shape[1] == 2
244
+ and len(scale_dist) == 2
245
+ )
246
+ n_rays = dist.shape[1]
247
+ phis = ray_angles(n_rays)
248
+ coord = (dist[:, np.newaxis] * np.array([np.sin(phis), np.cos(phis)])).astype(
249
+ np.float32
250
+ )
251
+ coord *= np.asarray(scale_dist).reshape(1, 2, 1)
252
+ coord += points[..., np.newaxis]
253
+ return coord
254
+
255
+
256
+ def polygons_to_label(
257
+ dist: np.ndarray,
258
+ points: np.ndarray,
259
+ shape: Tuple[int, int],
260
+ prob: np.ndarray = None,
261
+ thresh: float = -np.inf,
262
+ scale_dist: Tuple[int, int] = (1, 1),
263
+ ) -> np.ndarray:
264
+ """Convert distances and center points to instance labelled mask.
265
+
266
+ Parameters
267
+ ----------
268
+ dist : np.ndarray
269
+ The centerpoint pixels of the radial distance map. Shape (n_polys, n_rays).
270
+ points : np.ndarray
271
+ The centroids of the instances. Shape: (n_polys, 2).
272
+ shape : Tuple[int, int]:
273
+ Shape of the output mask.
274
+ prob : np.ndarray, optional
275
+ The centerpoint pixels of the regressed distance transform.
276
+ Shape: (n_polys, n_rays).
277
+ thresh : float, default=-np.inf
278
+ Threshold for the regressed distance transform.
279
+ scale_dist : Tuple[int, int], default=(1, 1)
280
+ Scaling factor.
281
+
282
+ Returns
283
+ -------
284
+ np.ndarray:
285
+ Instance labelled mask. Shape (H, W).
286
+ """
287
+ dist = np.asarray(dist)
288
+ points = np.asarray(points)
289
+ prob = np.inf * np.ones(len(points)) if prob is None else np.asarray(prob)
290
+
291
+ assert dist.ndim == 2 and points.ndim == 2 and len(dist) == len(points)
292
+ assert len(points) == len(prob) and points.shape[1] == 2 and prob.ndim == 1
293
+
294
+ ind = prob > thresh
295
+ points = points[ind]
296
+ dist = dist[ind]
297
+ prob = prob[ind]
298
+
299
+ ind = np.argsort(prob, kind="stable")
300
+ points = points[ind]
301
+ dist = dist[ind]
302
+
303
+ coord = dist_to_coord(dist, points, scale_dist=scale_dist)
304
+
305
+ return polygons_to_label_coord(coord, shape=shape, labels=ind)
306
+
307
+
308
+ @njit(cache=True, fastmath=True)
309
+ def intersection(boxA: np.ndarray, boxB: np.ndarray):
310
+ """Compute area of intersection of two boxes.
311
+
312
+ Parameters
313
+ ----------
314
+ boxA : np.ndarray
315
+ First boxes
316
+ boxB : np.ndarray
317
+ Second box
318
+
319
+ Returns
320
+ -------
321
+ float64:
322
+ Area of intersection
323
+ """
324
+ xA = max(boxA[..., 0], boxB[..., 0])
325
+ xB = min(boxA[..., 2], boxB[..., 2])
326
+ dx = xB - xA
327
+ if dx <= 0:
328
+ return 0.0
329
+
330
+ yA = max(boxA[..., 1], boxB[..., 1])
331
+ yB = min(boxA[..., 3], boxB[..., 3])
332
+ dy = yB - yA
333
+ if dy <= 0.0:
334
+ return 0.0
335
+
336
+ return dx * dy
337
+
338
+
339
+ @njit(parallel=True)
340
+ def get_bboxes(
341
+ dist: np.ndarray, points: np.ndarray
342
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]:
343
+ """Get bounding boxes from the non-zero pixels of the radial distance maps.
344
+
345
+ This is basically a translation from the stardist repo cpp code to python
346
+
347
+ NOTE: jit compiled and parallelized with numba.
348
+
349
+ Parameters
350
+ ----------
351
+ dist : np.ndarray
352
+ The non-zero values of the radial distance maps. Shape: (n_nonzero, n_rays).
353
+ points : np.ndarray
354
+ The yx-coordinates of the non-zero points. Shape (n_nonzero, 2).
355
+
356
+ Returns
357
+ -------
358
+ Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, int]:
359
+ Returns the x0, y0, x1, y1 bbox coordinates, bbox areas and the maximum
360
+ radial distance in the image.
361
+ """
362
+ n_polys = dist.shape[0]
363
+ n_rays = dist.shape[1]
364
+
365
+ bbox_x1 = np.zeros(n_polys)
366
+ bbox_x2 = np.zeros(n_polys)
367
+ bbox_y1 = np.zeros(n_polys)
368
+ bbox_y2 = np.zeros(n_polys)
369
+
370
+ areas = np.zeros(n_polys)
371
+ angle_pi = 2 * math.pi / n_rays
372
+ max_dist = 0
373
+
374
+ for i in prange(n_polys):
375
+ max_radius_outer = 0
376
+ py = points[i, 0]
377
+ px = points[i, 1]
378
+
379
+ for k in range(n_rays):
380
+ d = dist[i, k]
381
+ y = py + d * np.sin(angle_pi * k)
382
+ x = px + d * np.cos(angle_pi * k)
383
+
384
+ if k == 0:
385
+ bbox_x1[i] = x
386
+ bbox_x2[i] = x
387
+ bbox_y1[i] = y
388
+ bbox_y2[i] = y
389
+ else:
390
+ bbox_x1[i] = min(x, bbox_x1[i])
391
+ bbox_x2[i] = max(x, bbox_x2[i])
392
+ bbox_y1[i] = min(y, bbox_y1[i])
393
+ bbox_y2[i] = max(y, bbox_y2[i])
394
+
395
+ max_radius_outer = max(d, max_radius_outer)
396
+
397
+ areas[i] = (bbox_x2[i] - bbox_x1[i]) * (bbox_y2[i] - bbox_y1[i])
398
+ max_dist = max(max_dist, max_radius_outer)
399
+
400
+ return bbox_x1, bbox_y1, bbox_x2, bbox_y2, areas, max_dist
config.yaml ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES: 3
2
+ logging:
3
+ log_dir: /data5/ziweicui/cellvit256-unireplknet-n
4
+ mode: online
5
+ project: Cell-Segmentation
6
+ notes: CellViT-256
7
+ log_comment: CellViT-256-resnet50-tiny
8
+ tags:
9
+ - Fold-1
10
+ - ViT256
11
+ wandb_dir: /data5/ziweicui/UniRepLKNet-optimizerconfig-unetdecoder-inputconv/results
12
+ level: Debug
13
+ group: CellViT256
14
+ run_id: anifw9ux
15
+ wandb_file: anifw9ux
16
+ random_seed: 19
17
+ gpu: 0
18
+ data:
19
+ dataset: PanNuke
20
+ dataset_path: /data5/ziweicui/cellvit-png
21
+ train_folds:
22
+ - 0
23
+ val_folds:
24
+ - 1
25
+ test_folds:
26
+ - 2
27
+ num_nuclei_classes: 6
28
+ num_tissue_classes: 19
29
+ model:
30
+ backbone: default
31
+ pretrained_encoder: /data5/ziweicui/semi_supervised_resnet50-08389792.pth
32
+ shared_skip_connections: true
33
+ loss:
34
+ nuclei_binary_map:
35
+ focaltverskyloss:
36
+ loss_fn: FocalTverskyLoss
37
+ weight: 1
38
+ dice:
39
+ loss_fn: dice_loss
40
+ weight: 1
41
+ hv_map:
42
+ mse:
43
+ loss_fn: mse_loss_maps
44
+ weight: 2.5
45
+ msge:
46
+ loss_fn: msge_loss_maps
47
+ weight: 8
48
+ nuclei_type_map:
49
+ bce:
50
+ loss_fn: xentropy_loss
51
+ weight: 0.5
52
+ dice:
53
+ loss_fn: dice_loss
54
+ weight: 0.2
55
+ mcfocaltverskyloss:
56
+ loss_fn: MCFocalTverskyLoss
57
+ weight: 0.5
58
+ args:
59
+ num_classes: 6
60
+ tissue_types:
61
+ ce:
62
+ loss_fn: CrossEntropyLoss
63
+ weight: 0.1
64
+ training:
65
+ drop_rate: 0
66
+ attn_drop_rate: 0.1
67
+ drop_path_rate: 0.1
68
+ batch_size: 32
69
+ epochs: 130
70
+ optimizer: AdamW
71
+ early_stopping_patience: 130
72
+ scheduler:
73
+ scheduler_type: cosine
74
+ hyperparameters:
75
+ #gamma: 0.85
76
+ eta_min: 1e-5
77
+ optimizer_hyperparameter:
78
+ # betas:
79
+ # - 0.85
80
+ # - 0.95
81
+ #lr: 0.004
82
+ opt_lower: 'AdamW'
83
+ lr: 0.0008
84
+ opt_betas: [0.85,0.95]
85
+ weight_decay: 0.05
86
+ opt_eps: 0.00000008
87
+ unfreeze_epoch: 25
88
+ sampling_gamma: 0.85
89
+ sampling_strategy: cell+tissue
90
+ mixed_precision: true
91
+ transformations:
92
+ randomrotate90:
93
+ p: 0.5
94
+ horizontalflip:
95
+ p: 0.5
96
+ verticalflip:
97
+ p: 0.5
98
+ downscale:
99
+ p: 0.15
100
+ scale: 0.5
101
+ blur:
102
+ p: 0.2
103
+ blur_limit: 10
104
+ gaussnoise:
105
+ p: 0.25
106
+ var_limit: 50
107
+ colorjitter:
108
+ p: 0.2
109
+ scale_setting: 0.25
110
+ scale_color: 0.1
111
+ superpixels:
112
+ p: 0.1
113
+ zoomblur:
114
+ p: 0.1
115
+ randomsizedcrop:
116
+ p: 0.1
117
+ elastictransform:
118
+ p: 0.2
119
+ normalize:
120
+ mean:
121
+ - 0.5
122
+ - 0.5
123
+ - 0.5
124
+ std:
125
+ - 0.5
126
+ - 0.5
127
+ - 0.5
128
+ eval_checkpoint: latest_checkpoint.pth
129
+ dataset_config:
130
+ tissue_types:
131
+ Adrenal_gland: 0
132
+ Bile-duct: 1
133
+ Bladder: 2
134
+ Breast: 3
135
+ Cervix: 4
136
+ Colon: 5
137
+ Esophagus: 6
138
+ HeadNeck: 7
139
+ Kidney: 8
140
+ Liver: 9
141
+ Lung: 10
142
+ Ovarian: 11
143
+ Pancreatic: 12
144
+ Prostate: 13
145
+ Skin: 14
146
+ Stomach: 15
147
+ Testis: 16
148
+ Thyroid: 17
149
+ Uterus: 18
150
+ nuclei_types:
151
+ Background: 0
152
+ Neoplastic: 1
153
+ Inflammatory: 2
154
+ Connective: 3
155
+ Dead: 4
156
+ Epithelial: 5
157
+ run_sweep: false
158
+ agent: null
datamodel/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Data models
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
datamodel/graph_datamodel.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Graph Data model
3
+ #
4
+ # For more information, please check out docs/readmes/graphs.md
5
+ #
6
+ # @ Fabian Hörst, [email protected]
7
+ # Institute for Artifical Intelligence in Medicine,
8
+ # University Medicine Essen
9
+
10
+ from dataclasses import dataclass
11
+
12
+ import torch
13
+
14
+
15
+ @dataclass
16
+ class GraphDataWSI:
17
+ """Dataclass for Graph Data
18
+
19
+ Args:
20
+ x (torch.Tensor): Node feature matrix with shape (num_nodes, num_nodes_features)
21
+ positions(torch.Tensor): Each of the objects defined in x has a physical position in a Cartesian coordinate system,
22
+ be it detected cells or extracted patches. That's why we store the 2D position here, globally for the WSI.
23
+ Shape (num_nodes, 2)
24
+ metadata (dict, optional): Metadata about the object is stored here. Defaults to None
25
+ """
26
+
27
+ x: torch.Tensor
28
+ positions: torch.Tensor
29
+ metadata: dict
datamodel/wsi_datamodel.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # WSI Model
3
+ #
4
+ # @ Fabian Hörst, [email protected]
5
+ # Institute for Artifical Intelligence in Medicine,
6
+ # University Medicine Essen
7
+
8
+
9
+ import json
10
+ from pathlib import Path
11
+ from typing import Union, List, Callable, Tuple
12
+
13
+ from dataclasses import dataclass, field
14
+ import numpy as np
15
+ import yaml
16
+ import logging
17
+ import torch
18
+ from PIL import Image
19
+
20
+
21
+ @dataclass
22
+ class WSI:
23
+ """WSI object
24
+
25
+ Args:
26
+ name (str): WSI name
27
+ patient (str): Patient name
28
+ slide_path (Union[str, Path]): Full path to the WSI file.
29
+ patched_slide_path (Union[str, Path], optional): Full path to preprocessed WSI files (patches). Defaults to None.
30
+ embedding_name (Union[str, Path], optional): Defaults to None.
31
+ label (Union[str, int, float, np.ndarray], optional): Label of the WSI. Defaults to None.
32
+ logger (logging.logger, optional): Logger module for logging information. Defaults to None.
33
+ """
34
+
35
+ name: str
36
+ patient: str
37
+ slide_path: Union[str, Path]
38
+ patched_slide_path: Union[str, Path] = None
39
+ embedding_name: Union[str, Path] = None
40
+ label: Union[str, int, float, np.ndarray] = None
41
+ logger: logging.Logger = None
42
+
43
+ # unset attributes used in this class
44
+ metadata: dict = field(init=False, repr=False)
45
+ all_patch_metadata: List[dict] = field(init=False, repr=False)
46
+ patches_list: List = field(init=False, repr=False)
47
+ patch_transform: Callable = field(init=False, repr=False)
48
+
49
+ # name without ending (e.g. slide1 instead of slide1.svs)
50
+ def __post_init__(self):
51
+ """Post-Processing object"""
52
+ super().__init__()
53
+ # define paramaters that are used, but not defined at startup
54
+
55
+ # convert string to path
56
+ self.slide_path = Path(self.slide_path).resolve()
57
+ if self.patched_slide_path is not None:
58
+ self.patched_slide_path = Path(self.patched_slide_path).resolve()
59
+ # load metadata
60
+ self._get_metadata()
61
+ self._get_wsi_patch_metadata()
62
+ self.patch_transform = None # hardcode to None (should not be a parameter, but should be defined)
63
+
64
+ if self.logger is not None:
65
+ self.logger.debug(self.__repr__())
66
+
67
+ def _get_metadata(self) -> None:
68
+ """Load metadata yaml file"""
69
+ self.metadata_path = self.patched_slide_path / "metadata.yaml"
70
+ with open(self.metadata_path.resolve(), "r") as metadata_yaml:
71
+ try:
72
+ self.metadata = yaml.safe_load(metadata_yaml)
73
+ except yaml.YAMLError as exc:
74
+ print(exc)
75
+ self.metadata["label_map_inverse"] = {
76
+ v: k for k, v in self.metadata["label_map"].items()
77
+ }
78
+
79
+ def _get_wsi_patch_metadata(self) -> None:
80
+ """Load patch_metadata json file and convert to dict and lists"""
81
+ with open(self.patched_slide_path / "patch_metadata.json", "r") as json_file:
82
+ metadata = json.load(json_file)
83
+ self.patches_list = [str(list(elem.keys())[0]) for elem in metadata]
84
+ self.all_patch_metadata = {
85
+ str(list(elem.keys())[0]): elem[str(list(elem.keys())[0])]
86
+ for elem in metadata
87
+ }
88
+
89
+ def load_patch_metadata(self, patch_name: str) -> dict:
90
+ """Return the metadata of a patch with given name (including patch suffix, e.g., wsi_1_1.png)
91
+
92
+ This function assumes that metadata path is a subpath of the patches dataset path
93
+
94
+ Args:
95
+ patch_name (str): Name of patch
96
+
97
+ Returns:
98
+ dict: metadata
99
+ """
100
+ patch_metadata_path = self.all_patch_metadata[patch_name]["metadata_path"]
101
+ patch_metadata_path = self.patched_slide_path / patch_metadata_path
102
+
103
+ # open
104
+ with open(patch_metadata_path, "r") as metadata_yaml:
105
+ patch_metadata = yaml.safe_load(metadata_yaml)
106
+ patch_metadata["name"] = patch_name
107
+
108
+ return patch_metadata
109
+
110
+ def set_patch_transform(self, transform: Callable) -> None:
111
+ """Set the transformation function to process a patch
112
+
113
+ Args:
114
+ transform (Callable): Transformation function
115
+ """
116
+ self.patch_transform = transform
117
+
118
+ # patch processing
119
+ def process_patch_image(
120
+ self, patch_name: str, transform: Callable = None
121
+ ) -> Tuple[torch.Tensor, dict]:
122
+ """Process one patch: Load from disk, apply transformation if needed. ToTensor is applied automatically
123
+
124
+ Args:
125
+ patch_name (Path): Name of patch to load, including patch suffix, e.g., wsi_1_1.png
126
+ transform (Callable, optional): Optional Patch-Transformation
127
+ Returns:
128
+ Tuple[torch.Tensor, dict]:
129
+
130
+ * torch.Tensor: patch as torch.tensor (:,:,3)
131
+ * dict: patch metadata as dictionary
132
+ """
133
+ patch = Image.open(self.patched_slide_path / "patches" / patch_name)
134
+ if transform:
135
+ patch = transform(patch)
136
+
137
+ metadata = self.load_patch_metadata(patch_name)
138
+ return patch, metadata
139
+
140
+ def get_number_patches(self) -> int:
141
+ """Return the number of patches for this WSI
142
+
143
+ Returns:
144
+ int: number of patches
145
+ """
146
+ return int(len(self.patches_list))
147
+
148
+ def get_patches(
149
+ self, transform: Callable = None
150
+ ) -> Tuple[torch.Tensor, list, list]:
151
+ """Get all patches for one image
152
+
153
+ Args:
154
+ transform (Callable, optional): Optional Patch-Transformation
155
+
156
+ Returns:
157
+ Tuple[torch.Tensor, list]:
158
+
159
+ * patched image: Shape of torch.Tensor(num_patches, 3, :, :)
160
+ * coordinates as list metadata_dictionary
161
+
162
+ """
163
+ if self.logger is not None:
164
+ self.logger.warning(f"Loading {self.get_number_patches()} patches!")
165
+ patches = []
166
+ metadata = []
167
+ for patch in self.patches_list:
168
+ transformed_patch, meta = self.process_patch_image(patch, transform)
169
+ patches.append(transformed_patch)
170
+ metadata.append(meta)
171
+ patches = torch.stack(patches)
172
+
173
+ return patches, metadata
174
+
175
+ def load_embedding(self) -> torch.Tensor:
176
+ """Load embedding from subfolder patched_slide_path/embedding/
177
+
178
+ Raises:
179
+ FileNotFoundError: If embedding is not given
180
+
181
+ Returns:
182
+ torch.Tensor: WSI embedding
183
+ """
184
+ embedding_path = (
185
+ self.patched_slide_path / "embeddings" / f"{self.embedding_name}.pt"
186
+ )
187
+ if embedding_path.is_file():
188
+ embedding = torch.load(embedding_path)
189
+ return embedding
190
+ else:
191
+ raise FileNotFoundError(
192
+ f"Embeddings for WSI {self.slide_path} cannot be found in path {embedding_path}"
193
+ )
docs/datasets/PanNuke/dataset_config.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tissue_types:
2
+ "Adrenal_gland": 0
3
+ "Bile-duct": 1
4
+ "Bladder": 2
5
+ "Breast": 3
6
+ "Cervix": 4
7
+ "Colon": 5
8
+ "Esophagus": 6
9
+ "HeadNeck": 7
10
+ "Kidney": 8
11
+ "Liver": 9
12
+ "Lung": 10
13
+ "Ovarian": 11
14
+ "Pancreatic": 12
15
+ "Prostate": 13
16
+ "Skin": 14
17
+ "Stomach": 15
18
+ "Testis": 16
19
+ "Thyroid": 17
20
+ "Uterus": 18
21
+
22
+ nuclei_types:
23
+ "Background": 0
24
+ "Neoplastic": 1
25
+ "Inflammatory": 2
26
+ "Connective": 3
27
+ "Dead": 4
28
+ "Epithelial": 5
docs/datasets/PanNuke/fold0/cell_count.csv ADDED
@@ -0,0 +1,2657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial
2
+ 0_0.png,4,2,2,0,0
3
+ 0_1.png,8,1,1,0,0
4
+ 0_10.png,17,0,1,0,0
5
+ 0_100.png,10,0,11,0,0
6
+ 0_1000.png,0,0,2,0,0
7
+ 0_1001.png,0,0,7,0,0
8
+ 0_1002.png,0,0,5,0,0
9
+ 0_1003.png,0,0,6,0,0
10
+ 0_1004.png,0,0,5,0,0
11
+ 0_1005.png,0,0,5,0,0
12
+ 0_1006.png,0,0,7,0,0
13
+ 0_1007.png,0,0,5,0,0
14
+ 0_1008.png,0,0,17,0,0
15
+ 0_1009.png,0,0,12,0,0
16
+ 0_101.png,10,5,10,0,0
17
+ 0_1010.png,0,0,15,0,0
18
+ 0_1011.png,0,0,6,0,0
19
+ 0_1012.png,0,0,10,0,0
20
+ 0_1013.png,0,0,1,0,0
21
+ 0_1014.png,0,0,12,0,0
22
+ 0_1015.png,0,0,2,0,0
23
+ 0_1016.png,0,0,8,0,0
24
+ 0_1017.png,0,1,9,0,0
25
+ 0_1018.png,17,27,3,0,0
26
+ 0_1019.png,20,35,0,0,0
27
+ 0_102.png,11,0,13,0,0
28
+ 0_1020.png,17,42,1,0,0
29
+ 0_1021.png,26,32,7,0,0
30
+ 0_1022.png,22,18,0,0,0
31
+ 0_1023.png,20,33,4,0,0
32
+ 0_1024.png,29,27,4,0,0
33
+ 0_1025.png,10,0,0,28,0
34
+ 0_1026.png,35,0,6,34,0
35
+ 0_1027.png,28,0,6,34,0
36
+ 0_1028.png,15,0,9,25,0
37
+ 0_1029.png,35,0,6,50,0
38
+ 0_103.png,0,0,8,0,15
39
+ 0_1030.png,18,0,17,47,0
40
+ 0_1031.png,39,0,5,25,0
41
+ 0_1032.png,15,0,11,15,0
42
+ 0_1033.png,0,0,0,55,0
43
+ 0_1034.png,3,0,4,58,0
44
+ 0_1035.png,21,5,11,46,0
45
+ 0_1036.png,15,0,19,35,0
46
+ 0_1037.png,33,0,23,28,0
47
+ 0_1038.png,57,0,3,21,0
48
+ 0_1039.png,0,0,1,48,0
49
+ 0_104.png,0,0,8,0,17
50
+ 0_1040.png,33,0,1,34,0
51
+ 0_1041.png,0,0,0,53,0
52
+ 0_1042.png,9,0,12,0,0
53
+ 0_1043.png,5,0,17,0,0
54
+ 0_1044.png,3,0,15,0,0
55
+ 0_1045.png,22,0,5,0,0
56
+ 0_1046.png,24,0,0,0,0
57
+ 0_1047.png,1,1,17,0,0
58
+ 0_1048.png,12,0,9,0,0
59
+ 0_1049.png,24,0,1,0,0
60
+ 0_105.png,0,0,7,0,24
61
+ 0_1050.png,28,0,0,0,0
62
+ 0_1051.png,5,0,16,0,0
63
+ 0_1052.png,7,0,17,0,0
64
+ 0_1053.png,0,0,8,0,0
65
+ 0_1054.png,21,0,4,0,0
66
+ 0_1055.png,2,0,25,0,0
67
+ 0_1056.png,18,0,12,0,0
68
+ 0_1057.png,30,0,1,0,0
69
+ 0_1058.png,9,0,19,0,0
70
+ 0_1059.png,13,0,19,0,0
71
+ 0_106.png,0,0,4,0,30
72
+ 0_1060.png,34,0,0,0,0
73
+ 0_1061.png,0,74,104,0,0
74
+ 0_1062.png,0,113,60,0,23
75
+ 0_1063.png,0,193,75,0,8
76
+ 0_1064.png,18,0,0,0,0
77
+ 0_1065.png,11,1,4,0,0
78
+ 0_1066.png,4,3,4,0,0
79
+ 0_1067.png,0,0,6,0,0
80
+ 0_1068.png,0,3,9,0,0
81
+ 0_1069.png,0,7,16,0,0
82
+ 0_107.png,0,2,9,0,19
83
+ 0_1070.png,3,9,16,0,0
84
+ 0_1071.png,9,2,7,0,0
85
+ 0_1072.png,0,12,9,0,0
86
+ 0_1073.png,0,15,12,0,0
87
+ 0_1074.png,13,0,6,0,0
88
+ 0_1075.png,1,5,20,0,0
89
+ 0_1076.png,1,7,16,0,0
90
+ 0_1077.png,0,6,17,0,0
91
+ 0_1078.png,16,1,6,0,0
92
+ 0_1079.png,0,6,12,0,0
93
+ 0_108.png,0,0,11,0,9
94
+ 0_1080.png,7,3,6,0,0
95
+ 0_1081.png,17,0,4,0,0
96
+ 0_1082.png,11,1,1,0,0
97
+ 0_1083.png,0,4,12,0,0
98
+ 0_1084.png,0,0,11,0,0
99
+ 0_1085.png,14,0,2,0,0
100
+ 0_1086.png,0,93,2,0,0
101
+ 0_1087.png,0,63,3,0,0
102
+ 0_1088.png,0,0,5,0,27
103
+ 0_1089.png,0,0,2,0,34
104
+ 0_109.png,0,0,13,0,13
105
+ 0_1090.png,0,0,3,0,37
106
+ 0_1091.png,0,0,0,0,4
107
+ 0_1092.png,0,0,0,0,8
108
+ 0_1093.png,0,0,0,0,14
109
+ 0_1094.png,0,0,0,0,2
110
+ 0_1095.png,0,2,13,0,24
111
+ 0_1096.png,0,4,36,0,10
112
+ 0_1097.png,0,0,5,0,0
113
+ 0_1098.png,0,0,5,0,0
114
+ 0_1099.png,1,0,25,0,0
115
+ 0_11.png,9,0,5,0,0
116
+ 0_110.png,0,0,7,0,32
117
+ 0_1100.png,0,0,3,0,0
118
+ 0_1101.png,0,3,5,0,0
119
+ 0_1102.png,3,4,5,0,0
120
+ 0_1103.png,14,2,23,0,0
121
+ 0_1104.png,0,0,4,0,0
122
+ 0_1105.png,0,0,9,0,0
123
+ 0_1106.png,0,1,4,0,0
124
+ 0_1107.png,7,0,0,0,0
125
+ 0_1108.png,20,1,1,0,0
126
+ 0_1109.png,0,1,4,0,0
127
+ 0_111.png,0,0,5,0,29
128
+ 0_1110.png,0,1,2,0,0
129
+ 0_1111.png,18,3,9,0,0
130
+ 0_1112.png,25,0,7,0,0
131
+ 0_1113.png,19,2,2,0,0
132
+ 0_1114.png,14,0,9,0,0
133
+ 0_1115.png,0,0,9,0,0
134
+ 0_1116.png,0,0,9,0,0
135
+ 0_1117.png,0,1,7,0,0
136
+ 0_1118.png,0,0,12,0,0
137
+ 0_1119.png,0,0,13,0,0
138
+ 0_112.png,38,0,2,0,0
139
+ 0_1120.png,0,0,4,0,0
140
+ 0_1121.png,0,0,8,0,0
141
+ 0_1122.png,0,0,4,0,0
142
+ 0_1123.png,0,0,5,0,0
143
+ 0_1124.png,0,0,3,0,0
144
+ 0_1125.png,0,0,12,0,0
145
+ 0_1126.png,0,0,11,0,0
146
+ 0_1127.png,0,0,18,0,0
147
+ 0_1128.png,0,0,14,0,0
148
+ 0_1129.png,0,0,16,0,0
149
+ 0_113.png,28,0,2,0,0
150
+ 0_1130.png,0,0,4,0,0
151
+ 0_1131.png,0,0,9,0,0
152
+ 0_1132.png,0,0,9,0,0
153
+ 0_1133.png,21,0,9,0,0
154
+ 0_1134.png,37,0,0,0,0
155
+ 0_1135.png,26,0,6,0,0
156
+ 0_1136.png,34,0,0,0,0
157
+ 0_1137.png,32,0,2,0,0
158
+ 0_1138.png,35,0,2,0,0
159
+ 0_1139.png,33,0,2,0,0
160
+ 0_114.png,28,0,3,0,0
161
+ 0_1140.png,17,0,15,0,0
162
+ 0_1141.png,28,0,2,0,0
163
+ 0_1142.png,38,0,0,0,0
164
+ 0_1143.png,9,0,32,0,0
165
+ 0_1144.png,19,0,25,0,0
166
+ 0_1145.png,29,0,10,0,0
167
+ 0_1146.png,21,0,13,0,0
168
+ 0_1147.png,0,0,0,0,0
169
+ 0_1148.png,0,0,0,0,0
170
+ 0_1149.png,0,0,0,0,0
171
+ 0_115.png,26,0,7,0,0
172
+ 0_1150.png,0,3,0,0,0
173
+ 0_1151.png,0,5,0,0,0
174
+ 0_1152.png,0,0,0,0,0
175
+ 0_1153.png,0,4,0,0,0
176
+ 0_1154.png,0,1,0,0,0
177
+ 0_1155.png,0,0,0,0,0
178
+ 0_1156.png,0,5,0,0,0
179
+ 0_1157.png,0,1,1,0,0
180
+ 0_1158.png,0,0,0,0,0
181
+ 0_1159.png,0,2,0,0,0
182
+ 0_116.png,49,1,0,0,0
183
+ 0_1160.png,0,0,0,0,0
184
+ 0_1161.png,0,0,0,0,0
185
+ 0_1162.png,0,3,0,0,0
186
+ 0_1163.png,0,7,1,0,0
187
+ 0_1164.png,0,0,0,0,0
188
+ 0_1165.png,0,1,0,0,0
189
+ 0_1166.png,0,0,0,0,0
190
+ 0_1167.png,0,1,0,0,0
191
+ 0_1168.png,0,2,0,0,0
192
+ 0_1169.png,0,5,0,0,0
193
+ 0_117.png,0,69,3,0,0
194
+ 0_1170.png,0,2,0,0,0
195
+ 0_1171.png,26,1,6,0,0
196
+ 0_1172.png,19,0,3,0,0
197
+ 0_1173.png,19,0,4,0,0
198
+ 0_1174.png,22,2,2,0,0
199
+ 0_1175.png,26,1,4,0,0
200
+ 0_1176.png,24,1,5,0,0
201
+ 0_1177.png,19,0,3,0,0
202
+ 0_1178.png,15,0,2,0,0
203
+ 0_1179.png,26,1,3,0,0
204
+ 0_118.png,0,36,14,0,0
205
+ 0_1180.png,20,2,4,0,0
206
+ 0_1181.png,21,0,7,0,0
207
+ 0_1182.png,20,1,4,0,0
208
+ 0_1183.png,17,1,5,0,0
209
+ 0_1184.png,21,1,3,0,0
210
+ 0_1185.png,16,0,4,0,0
211
+ 0_1186.png,1,0,8,0,0
212
+ 0_1187.png,16,1,8,0,0
213
+ 0_1188.png,10,0,6,0,0
214
+ 0_1189.png,0,0,1,0,0
215
+ 0_119.png,9,48,3,0,0
216
+ 0_1190.png,0,0,6,0,0
217
+ 0_1191.png,6,0,0,0,0
218
+ 0_1192.png,7,0,0,0,0
219
+ 0_1193.png,11,0,6,0,0
220
+ 0_1194.png,11,0,0,0,0
221
+ 0_1195.png,11,0,1,0,0
222
+ 0_1196.png,12,0,0,0,0
223
+ 0_1197.png,13,0,0,0,0
224
+ 0_1198.png,15,0,0,0,0
225
+ 0_1199.png,15,0,0,0,0
226
+ 0_12.png,7,0,2,0,0
227
+ 0_120.png,2,58,3,0,0
228
+ 0_1200.png,12,0,2,0,0
229
+ 0_1201.png,9,1,3,0,0
230
+ 0_1202.png,16,0,2,0,0
231
+ 0_1203.png,9,0,4,0,0
232
+ 0_1204.png,6,0,2,0,0
233
+ 0_1205.png,6,0,0,0,0
234
+ 0_1206.png,12,0,0,0,0
235
+ 0_1207.png,10,3,0,0,0
236
+ 0_1208.png,12,0,2,0,0
237
+ 0_1209.png,14,0,0,0,0
238
+ 0_121.png,29,2,5,0,0
239
+ 0_1210.png,14,0,0,0,0
240
+ 0_1211.png,7,0,0,0,0
241
+ 0_1212.png,10,0,6,0,0
242
+ 0_1213.png,6,0,4,0,0
243
+ 0_1214.png,6,0,1,0,0
244
+ 0_1215.png,8,0,3,0,0
245
+ 0_1216.png,6,0,3,0,0
246
+ 0_1217.png,10,2,0,0,0
247
+ 0_1218.png,19,0,2,0,0
248
+ 0_1219.png,8,0,0,0,0
249
+ 0_122.png,28,9,2,0,0
250
+ 0_1220.png,10,0,0,0,0
251
+ 0_1221.png,9,0,4,0,0
252
+ 0_1222.png,14,0,0,0,0
253
+ 0_1223.png,15,0,0,0,0
254
+ 0_1224.png,13,0,0,0,0
255
+ 0_1225.png,10,0,1,0,0
256
+ 0_1226.png,11,0,1,0,0
257
+ 0_1227.png,15,0,0,0,0
258
+ 0_1228.png,10,0,0,0,0
259
+ 0_1229.png,11,0,1,0,0
260
+ 0_123.png,0,1,11,0,18
261
+ 0_1230.png,12,0,0,0,0
262
+ 0_1231.png,15,1,1,0,0
263
+ 0_1232.png,20,0,0,0,0
264
+ 0_1233.png,6,2,8,0,0
265
+ 0_1234.png,14,1,0,0,0
266
+ 0_1235.png,9,0,3,0,0
267
+ 0_1236.png,13,0,4,0,0
268
+ 0_1237.png,2,0,11,0,0
269
+ 0_1238.png,0,0,1,0,12
270
+ 0_1239.png,0,0,4,0,4
271
+ 0_124.png,0,0,8,0,29
272
+ 0_1240.png,0,4,3,0,10
273
+ 0_1241.png,0,0,0,0,5
274
+ 0_1242.png,0,0,0,0,8
275
+ 0_1243.png,0,4,6,0,7
276
+ 0_1244.png,0,1,0,0,6
277
+ 0_1245.png,0,6,14,0,12
278
+ 0_1246.png,0,0,11,0,0
279
+ 0_1247.png,0,0,9,0,0
280
+ 0_1248.png,4,0,4,0,0
281
+ 0_1249.png,13,0,2,0,0
282
+ 0_125.png,0,1,6,0,29
283
+ 0_1250.png,15,0,5,0,0
284
+ 0_1251.png,0,0,11,0,0
285
+ 0_1252.png,21,0,0,0,0
286
+ 0_1253.png,11,0,9,0,0
287
+ 0_1254.png,2,0,7,0,0
288
+ 0_1255.png,25,1,1,0,0
289
+ 0_1256.png,0,0,12,0,0
290
+ 0_1257.png,2,0,11,0,0
291
+ 0_1258.png,21,1,1,0,0
292
+ 0_1259.png,18,0,3,0,0
293
+ 0_126.png,0,2,12,0,11
294
+ 0_1260.png,3,0,5,0,0
295
+ 0_1261.png,14,0,3,0,0
296
+ 0_1262.png,11,0,7,0,0
297
+ 0_1263.png,0,0,6,0,0
298
+ 0_1264.png,0,0,0,0,25
299
+ 0_1265.png,0,1,2,0,14
300
+ 0_1266.png,0,0,1,0,14
301
+ 0_1267.png,0,1,3,0,21
302
+ 0_1268.png,1,0,2,0,23
303
+ 0_1269.png,0,0,3,0,26
304
+ 0_127.png,0,0,10,0,22
305
+ 0_1270.png,0,0,2,0,19
306
+ 0_1271.png,0,0,3,0,21
307
+ 0_1272.png,0,0,5,0,34
308
+ 0_1273.png,0,4,10,0,17
309
+ 0_1274.png,0,2,6,0,23
310
+ 0_1275.png,0,2,6,0,23
311
+ 0_1276.png,0,1,3,0,37
312
+ 0_1277.png,0,1,5,0,27
313
+ 0_1278.png,0,4,10,0,5
314
+ 0_1279.png,0,10,9,0,33
315
+ 0_128.png,0,2,10,0,27
316
+ 0_1280.png,0,21,24,0,14
317
+ 0_1281.png,0,0,29,0,0
318
+ 0_1282.png,0,0,12,0,0
319
+ 0_1283.png,0,0,11,0,0
320
+ 0_1284.png,0,7,12,0,0
321
+ 0_1285.png,0,1,13,0,0
322
+ 0_1286.png,0,6,14,0,0
323
+ 0_1287.png,0,4,16,0,0
324
+ 0_1288.png,0,0,10,0,0
325
+ 0_1289.png,0,0,17,0,0
326
+ 0_129.png,0,2,12,0,35
327
+ 0_1290.png,1,1,7,0,0
328
+ 0_1291.png,29,0,5,0,0
329
+ 0_1292.png,6,0,4,0,0
330
+ 0_1293.png,0,1,8,0,0
331
+ 0_1294.png,16,1,3,0,0
332
+ 0_1295.png,0,1,18,0,0
333
+ 0_1296.png,0,1,26,0,0
334
+ 0_1297.png,17,0,1,0,0
335
+ 0_1298.png,0,1,24,0,0
336
+ 0_1299.png,0,1,14,0,0
337
+ 0_13.png,3,0,6,0,0
338
+ 0_130.png,0,1,12,0,6
339
+ 0_1300.png,32,1,3,0,0
340
+ 0_1301.png,21,1,3,0,0
341
+ 0_1302.png,12,2,9,0,0
342
+ 0_1303.png,11,1,2,0,0
343
+ 0_1304.png,0,0,12,0,0
344
+ 0_1305.png,12,7,0,0,0
345
+ 0_1306.png,13,1,0,0,0
346
+ 0_1307.png,1,1,12,0,0
347
+ 0_1308.png,10,0,7,0,0
348
+ 0_1309.png,19,1,0,0,0
349
+ 0_131.png,0,2,2,0,45
350
+ 0_1310.png,9,0,6,0,0
351
+ 0_1311.png,5,0,13,0,0
352
+ 0_1312.png,9,0,3,0,0
353
+ 0_1313.png,13,2,1,0,0
354
+ 0_1314.png,16,0,2,0,0
355
+ 0_1315.png,13,0,7,0,0
356
+ 0_1316.png,13,0,1,0,0
357
+ 0_1317.png,17,2,2,0,0
358
+ 0_1318.png,13,1,5,0,0
359
+ 0_1319.png,14,8,4,0,0
360
+ 0_132.png,0,0,17,0,13
361
+ 0_1320.png,21,1,3,0,0
362
+ 0_1321.png,19,2,0,0,0
363
+ 0_1322.png,24,3,0,0,0
364
+ 0_1323.png,18,5,2,0,0
365
+ 0_1324.png,16,4,0,0,0
366
+ 0_1325.png,13,5,0,0,0
367
+ 0_1326.png,14,3,3,0,0
368
+ 0_1327.png,18,3,0,0,0
369
+ 0_1328.png,18,7,0,0,0
370
+ 0_1329.png,16,4,2,0,0
371
+ 0_133.png,0,1,3,0,1
372
+ 0_1330.png,7,9,4,0,0
373
+ 0_1331.png,21,9,0,0,0
374
+ 0_1332.png,22,4,0,0,0
375
+ 0_1333.png,11,7,2,0,0
376
+ 0_1334.png,18,5,6,0,0
377
+ 0_1335.png,18,6,0,0,0
378
+ 0_1336.png,15,3,0,0,0
379
+ 0_1337.png,4,13,14,0,0
380
+ 0_1338.png,22,5,0,0,0
381
+ 0_1339.png,20,10,1,0,0
382
+ 0_134.png,0,0,5,0,8
383
+ 0_1340.png,8,9,11,0,0
384
+ 0_1341.png,18,4,3,0,0
385
+ 0_1342.png,22,9,6,0,0
386
+ 0_1343.png,19,3,4,0,0
387
+ 0_1344.png,32,1,1,1,0
388
+ 0_1345.png,52,0,0,0,0
389
+ 0_1346.png,0,6,0,0,0
390
+ 0_1347.png,17,9,2,0,0
391
+ 0_1348.png,61,0,0,0,0
392
+ 0_1349.png,39,0,0,7,0
393
+ 0_135.png,0,1,5,0,30
394
+ 0_1350.png,0,14,3,0,0
395
+ 0_1351.png,48,1,0,0,0
396
+ 0_1352.png,74,0,0,0,0
397
+ 0_1353.png,0,15,5,0,0
398
+ 0_1354.png,0,17,3,0,0
399
+ 0_1355.png,0,9,3,0,0
400
+ 0_1356.png,0,35,4,0,0
401
+ 0_1357.png,0,27,0,0,0
402
+ 0_1358.png,0,23,4,0,0
403
+ 0_1359.png,1,10,3,0,0
404
+ 0_136.png,0,3,14,0,15
405
+ 0_1360.png,24,8,2,0,0
406
+ 0_1361.png,0,13,7,0,0
407
+ 0_1362.png,0,16,7,0,0
408
+ 0_1363.png,0,10,4,0,0
409
+ 0_1364.png,0,13,4,0,0
410
+ 0_1365.png,0,10,7,0,0
411
+ 0_1366.png,0,9,5,0,0
412
+ 0_1367.png,0,19,6,0,0
413
+ 0_1368.png,0,19,2,0,0
414
+ 0_1369.png,14,14,3,0,0
415
+ 0_137.png,0,1,4,0,18
416
+ 0_1370.png,5,1,7,0,0
417
+ 0_1371.png,0,2,9,0,0
418
+ 0_1372.png,15,1,2,0,0
419
+ 0_1373.png,3,0,10,0,0
420
+ 0_1374.png,22,2,5,0,0
421
+ 0_1375.png,8,1,6,0,0
422
+ 0_1376.png,8,2,1,0,0
423
+ 0_1377.png,15,2,15,0,0
424
+ 0_1378.png,5,2,11,0,0
425
+ 0_1379.png,11,1,2,0,0
426
+ 0_138.png,0,0,7,0,29
427
+ 0_1380.png,7,2,4,0,0
428
+ 0_1381.png,14,1,0,0,0
429
+ 0_1382.png,7,4,7,0,0
430
+ 0_1383.png,9,8,8,0,0
431
+ 0_1384.png,15,2,4,0,0
432
+ 0_1385.png,9,0,11,0,0
433
+ 0_1386.png,0,1,12,0,0
434
+ 0_1387.png,11,2,7,0,0
435
+ 0_1388.png,21,0,0,0,0
436
+ 0_1389.png,16,0,0,0,0
437
+ 0_139.png,0,0,2,0,38
438
+ 0_1390.png,13,0,0,0,0
439
+ 0_1391.png,22,0,1,0,0
440
+ 0_1392.png,7,0,0,0,0
441
+ 0_1393.png,15,0,1,0,0
442
+ 0_1394.png,15,0,1,0,0
443
+ 0_1395.png,16,0,0,0,0
444
+ 0_1396.png,19,0,0,0,0
445
+ 0_1397.png,17,0,0,0,0
446
+ 0_1398.png,13,0,0,0,0
447
+ 0_1399.png,20,0,2,0,0
448
+ 0_14.png,5,1,3,0,0
449
+ 0_140.png,0,1,9,0,0
450
+ 0_1400.png,5,0,0,0,0
451
+ 0_1401.png,21,0,0,0,0
452
+ 0_1402.png,24,0,0,0,0
453
+ 0_1403.png,11,0,0,0,0
454
+ 0_1404.png,14,0,0,0,0
455
+ 0_1405.png,7,0,0,0,0
456
+ 0_1406.png,17,0,0,0,0
457
+ 0_1407.png,12,0,1,0,0
458
+ 0_1408.png,11,0,1,0,0
459
+ 0_1409.png,20,0,0,0,0
460
+ 0_141.png,9,0,2,0,0
461
+ 0_1410.png,13,0,3,0,0
462
+ 0_1411.png,23,0,0,0,0
463
+ 0_1412.png,16,2,2,0,0
464
+ 0_1413.png,21,4,1,0,0
465
+ 0_1414.png,2,3,7,0,0
466
+ 0_1415.png,17,9,3,0,0
467
+ 0_1416.png,9,7,2,0,0
468
+ 0_1417.png,22,0,2,0,0
469
+ 0_1418.png,1,12,6,0,0
470
+ 0_1419.png,0,2,18,0,0
471
+ 0_142.png,18,0,5,0,0
472
+ 0_1420.png,9,1,4,0,0
473
+ 0_1421.png,0,6,13,0,0
474
+ 0_1422.png,0,4,19,0,0
475
+ 0_1423.png,5,2,13,0,0
476
+ 0_1424.png,0,4,12,0,0
477
+ 0_1425.png,0,3,14,0,15
478
+ 0_1426.png,0,1,0,0,25
479
+ 0_1427.png,0,1,4,0,38
480
+ 0_1428.png,0,2,4,0,36
481
+ 0_1429.png,0,0,0,0,14
482
+ 0_143.png,26,1,3,0,0
483
+ 0_1430.png,0,0,0,0,3
484
+ 0_1431.png,0,5,7,0,15
485
+ 0_1432.png,0,0,0,0,0
486
+ 0_1433.png,0,0,0,0,0
487
+ 0_1434.png,0,0,1,0,19
488
+ 0_1435.png,0,4,18,0,2
489
+ 0_1436.png,0,4,12,0,20
490
+ 0_1437.png,0,0,0,0,32
491
+ 0_1438.png,0,7,6,0,22
492
+ 0_1439.png,0,8,16,0,0
493
+ 0_144.png,17,1,2,0,0
494
+ 0_1440.png,5,10,12,0,0
495
+ 0_1441.png,10,12,4,0,0
496
+ 0_1442.png,12,15,9,0,0
497
+ 0_1443.png,0,2,16,0,0
498
+ 0_1444.png,0,1,25,0,9
499
+ 0_1445.png,0,1,18,0,15
500
+ 0_1446.png,39,0,2,0,0
501
+ 0_1447.png,8,0,5,0,0
502
+ 0_1448.png,34,0,2,0,0
503
+ 0_1449.png,23,0,7,0,0
504
+ 0_145.png,6,0,3,0,0
505
+ 0_1450.png,21,0,7,0,0
506
+ 0_1451.png,13,0,9,0,0
507
+ 0_1452.png,9,0,11,0,0
508
+ 0_1453.png,13,0,6,0,0
509
+ 0_1454.png,14,0,2,0,0
510
+ 0_1455.png,6,1,11,0,0
511
+ 0_1456.png,50,0,0,0,0
512
+ 0_1457.png,16,0,6,0,0
513
+ 0_1458.png,18,0,11,0,0
514
+ 0_1459.png,0,1,2,0,0
515
+ 0_146.png,14,2,2,0,0
516
+ 0_1460.png,0,3,13,0,0
517
+ 0_1461.png,0,0,20,0,0
518
+ 0_1462.png,18,12,8,0,0
519
+ 0_1463.png,0,27,16,0,0
520
+ 0_1464.png,0,13,24,0,0
521
+ 0_1465.png,46,2,1,0,0
522
+ 0_1466.png,0,24,15,0,0
523
+ 0_1467.png,23,1,4,0,0
524
+ 0_1468.png,30,0,2,0,0
525
+ 0_1469.png,30,0,3,0,0
526
+ 0_147.png,12,0,1,0,0
527
+ 0_1470.png,30,1,0,0,0
528
+ 0_1471.png,31,0,0,0,0
529
+ 0_1472.png,5,5,17,0,0
530
+ 0_1473.png,20,26,4,0,0
531
+ 0_1474.png,23,0,2,0,0
532
+ 0_1475.png,10,1,5,0,0
533
+ 0_1476.png,6,1,5,0,0
534
+ 0_1477.png,5,13,13,0,0
535
+ 0_1478.png,0,19,14,0,0
536
+ 0_1479.png,0,21,14,0,22
537
+ 0_148.png,0,0,2,0,15
538
+ 0_1480.png,0,3,9,0,34
539
+ 0_1481.png,67,0,0,3,0
540
+ 0_1482.png,7,0,0,68,0
541
+ 0_1483.png,0,14,35,0,20
542
+ 0_1484.png,0,5,5,0,14
543
+ 0_1485.png,0,31,56,0,0
544
+ 0_1486.png,0,97,57,0,0
545
+ 0_1487.png,0,104,85,0,0
546
+ 0_1488.png,45,1,1,0,0
547
+ 0_1489.png,28,1,3,0,0
548
+ 0_149.png,0,0,0,0,37
549
+ 0_1490.png,59,1,0,0,0
550
+ 0_1491.png,54,0,1,0,0
551
+ 0_1492.png,27,2,7,0,0
552
+ 0_1493.png,33,0,2,0,0
553
+ 0_1494.png,0,10,50,0,0
554
+ 0_1495.png,0,10,50,0,0
555
+ 0_1496.png,0,0,23,0,0
556
+ 0_1497.png,0,0,20,0,0
557
+ 0_1498.png,0,0,12,0,0
558
+ 0_1499.png,0,0,12,0,0
559
+ 0_15.png,4,0,8,0,0
560
+ 0_150.png,0,0,11,0,27
561
+ 0_1500.png,47,0,3,0,0
562
+ 0_1501.png,4,1,18,0,0
563
+ 0_1502.png,32,0,1,0,0
564
+ 0_1503.png,30,0,0,2,0
565
+ 0_1504.png,3,7,49,0,0
566
+ 0_1505.png,1,14,32,0,0
567
+ 0_1506.png,8,26,57,0,0
568
+ 0_1507.png,0,5,28,0,0
569
+ 0_1508.png,54,5,11,0,0
570
+ 0_1509.png,0,5,18,0,0
571
+ 0_151.png,0,3,4,0,19
572
+ 0_1510.png,0,3,21,0,0
573
+ 0_1511.png,0,0,14,0,0
574
+ 0_1512.png,0,0,0,0,0
575
+ 0_1513.png,79,0,0,0,0
576
+ 0_1514.png,59,0,3,0,0
577
+ 0_1515.png,32,0,0,0,0
578
+ 0_1516.png,59,1,1,0,0
579
+ 0_1517.png,51,2,2,0,0
580
+ 0_1518.png,0,7,23,0,0
581
+ 0_1519.png,0,9,34,0,0
582
+ 0_152.png,0,1,0,0,33
583
+ 0_1520.png,28,0,2,0,0
584
+ 0_1521.png,35,0,6,0,0
585
+ 0_1522.png,32,0,8,0,0
586
+ 0_1523.png,24,0,1,0,0
587
+ 0_1524.png,23,2,0,0,0
588
+ 0_1525.png,23,0,0,0,0
589
+ 0_1526.png,0,8,15,0,0
590
+ 0_1527.png,0,1,0,0,0
591
+ 0_1528.png,0,0,4,0,0
592
+ 0_1529.png,0,8,6,0,0
593
+ 0_153.png,12,4,0,0,0
594
+ 0_1530.png,0,1,1,0,0
595
+ 0_1531.png,0,2,14,0,0
596
+ 0_1532.png,7,0,1,0,0
597
+ 0_1533.png,15,0,0,0,0
598
+ 0_1534.png,11,0,0,0,0
599
+ 0_1535.png,15,0,0,0,0
600
+ 0_1536.png,11,1,0,0,0
601
+ 0_1537.png,11,0,0,0,0
602
+ 0_1538.png,9,0,0,0,0
603
+ 0_1539.png,8,0,0,0,0
604
+ 0_154.png,1,9,5,0,0
605
+ 0_1540.png,23,0,0,0,0
606
+ 0_1541.png,17,0,2,0,0
607
+ 0_1542.png,0,0,0,0,3
608
+ 0_1543.png,0,0,2,0,3
609
+ 0_1544.png,0,0,4,0,1
610
+ 0_1545.png,0,0,4,0,0
611
+ 0_1546.png,0,0,4,0,0
612
+ 0_1547.png,0,0,7,0,0
613
+ 0_1548.png,0,4,28,0,0
614
+ 0_1549.png,6,0,0,0,0
615
+ 0_155.png,8,3,1,0,0
616
+ 0_1550.png,7,0,0,0,0
617
+ 0_1551.png,6,0,0,0,0
618
+ 0_1552.png,11,0,0,0,0
619
+ 0_1553.png,11,0,2,0,0
620
+ 0_1554.png,8,0,0,0,0
621
+ 0_1555.png,8,0,4,0,0
622
+ 0_1556.png,9,0,0,0,0
623
+ 0_1557.png,12,2,4,0,0
624
+ 0_1558.png,19,2,1,0,0
625
+ 0_1559.png,14,0,4,0,0
626
+ 0_156.png,4,7,5,0,0
627
+ 0_1560.png,17,3,4,0,0
628
+ 0_1561.png,7,2,2,0,0
629
+ 0_1562.png,11,1,1,0,0
630
+ 0_1563.png,14,0,6,0,0
631
+ 0_1564.png,18,1,2,0,0
632
+ 0_1565.png,8,0,7,0,0
633
+ 0_1566.png,10,0,8,0,0
634
+ 0_1567.png,1,1,1,0,0
635
+ 0_1568.png,5,0,3,0,0
636
+ 0_1569.png,2,1,0,0,0
637
+ 0_157.png,6,5,4,0,0
638
+ 0_1570.png,23,1,2,0,0
639
+ 0_1571.png,15,0,4,0,0
640
+ 0_1572.png,2,0,9,0,0
641
+ 0_1573.png,18,1,4,0,0
642
+ 0_1574.png,0,0,2,0,0
643
+ 0_1575.png,0,0,4,0,0
644
+ 0_1576.png,0,0,1,0,0
645
+ 0_1577.png,0,0,2,0,0
646
+ 0_1578.png,0,0,0,0,0
647
+ 0_1579.png,0,0,4,0,0
648
+ 0_158.png,0,1,5,0,0
649
+ 0_1580.png,0,9,21,0,0
650
+ 0_1581.png,0,11,21,0,0
651
+ 0_1582.png,0,7,18,0,1
652
+ 0_1583.png,0,5,15,0,1
653
+ 0_1584.png,0,20,14,0,14
654
+ 0_1585.png,2,0,14,0,0
655
+ 0_1586.png,2,0,9,0,0
656
+ 0_1587.png,1,4,9,0,0
657
+ 0_1588.png,5,2,16,0,0
658
+ 0_1589.png,2,3,10,0,0
659
+ 0_159.png,0,1,9,0,8
660
+ 0_1590.png,2,0,7,0,0
661
+ 0_1591.png,14,0,2,0,0
662
+ 0_1592.png,9,0,6,0,0
663
+ 0_1593.png,0,3,15,0,0
664
+ 0_1594.png,0,0,10,0,0
665
+ 0_1595.png,0,2,22,0,0
666
+ 0_1596.png,0,5,5,0,9
667
+ 0_1597.png,0,10,8,0,2
668
+ 0_1598.png,0,3,4,0,7
669
+ 0_1599.png,0,0,7,0,10
670
+ 0_16.png,7,1,4,0,0
671
+ 0_160.png,0,1,11,0,9
672
+ 0_1600.png,0,0,1,0,9
673
+ 0_1601.png,0,5,5,0,10
674
+ 0_1602.png,0,4,4,0,14
675
+ 0_1603.png,0,0,6,0,9
676
+ 0_1604.png,0,5,10,0,0
677
+ 0_1605.png,0,4,22,0,0
678
+ 0_1606.png,0,10,33,0,0
679
+ 0_1607.png,21,0,7,0,0
680
+ 0_1608.png,21,0,14,0,0
681
+ 0_1609.png,21,1,8,0,0
682
+ 0_161.png,0,2,1,0,30
683
+ 0_1610.png,16,1,16,0,0
684
+ 0_1611.png,20,1,14,0,0
685
+ 0_1612.png,27,1,9,0,0
686
+ 0_1613.png,23,0,11,0,0
687
+ 0_1614.png,0,0,0,0,0
688
+ 0_1615.png,0,0,0,0,0
689
+ 0_1616.png,0,0,0,0,0
690
+ 0_1617.png,0,0,0,0,0
691
+ 0_1618.png,0,0,0,0,0
692
+ 0_1619.png,0,0,0,0,0
693
+ 0_162.png,0,2,6,0,24
694
+ 0_1620.png,0,0,0,0,0
695
+ 0_1621.png,8,11,12,0,0
696
+ 0_1622.png,21,8,7,0,0
697
+ 0_1623.png,11,14,22,0,0
698
+ 0_1624.png,22,6,7,0,0
699
+ 0_1625.png,28,4,2,0,0
700
+ 0_1626.png,28,6,0,0,0
701
+ 0_1627.png,13,17,2,0,0
702
+ 0_1628.png,0,2,3,0,0
703
+ 0_1629.png,0,0,0,0,0
704
+ 0_163.png,0,1,7,0,15
705
+ 0_1630.png,0,3,9,0,0
706
+ 0_1631.png,0,0,2,0,0
707
+ 0_1632.png,0,0,0,0,0
708
+ 0_1633.png,6,2,0,0,0
709
+ 0_1634.png,4,19,10,0,0
710
+ 0_1635.png,13,3,0,0,0
711
+ 0_1636.png,9,15,2,0,0
712
+ 0_1637.png,1,89,7,0,0
713
+ 0_1638.png,0,63,13,0,0
714
+ 0_1639.png,18,0,1,0,0
715
+ 0_164.png,0,1,4,0,2
716
+ 0_1640.png,12,1,3,0,0
717
+ 0_1641.png,12,1,1,0,0
718
+ 0_1642.png,24,0,0,0,0
719
+ 0_1643.png,1,2,12,0,0
720
+ 0_1644.png,0,9,17,0,0
721
+ 0_1645.png,0,6,8,0,0
722
+ 0_1646.png,0,6,11,0,0
723
+ 0_1647.png,0,13,17,0,0
724
+ 0_1648.png,4,31,17,0,0
725
+ 0_1649.png,3,25,11,0,0
726
+ 0_165.png,0,1,14,0,5
727
+ 0_1650.png,0,9,17,0,0
728
+ 0_1651.png,0,29,9,0,0
729
+ 0_1652.png,0,5,17,0,0
730
+ 0_1653.png,0,3,18,0,0
731
+ 0_1654.png,0,12,12,0,0
732
+ 0_1655.png,0,1,8,0,0
733
+ 0_1656.png,0,1,6,0,10
734
+ 0_1657.png,0,0,20,0,1
735
+ 0_1658.png,0,1,18,0,0
736
+ 0_1659.png,0,4,12,0,0
737
+ 0_166.png,0,0,5,0,31
738
+ 0_1660.png,23,1,4,0,0
739
+ 0_1661.png,18,0,0,0,0
740
+ 0_1662.png,30,0,0,0,0
741
+ 0_1663.png,0,1,11,0,0
742
+ 0_1664.png,0,0,5,0,0
743
+ 0_1665.png,0,0,8,0,0
744
+ 0_1666.png,0,1,16,0,0
745
+ 0_1667.png,0,0,7,0,0
746
+ 0_1668.png,0,0,8,0,0
747
+ 0_1669.png,0,0,12,0,0
748
+ 0_167.png,0,2,7,0,30
749
+ 0_1670.png,0,0,13,0,0
750
+ 0_1671.png,0,0,8,0,0
751
+ 0_1672.png,0,0,8,0,0
752
+ 0_1673.png,0,0,8,0,0
753
+ 0_1674.png,0,0,7,0,0
754
+ 0_1675.png,0,2,2,0,0
755
+ 0_1676.png,0,0,9,0,0
756
+ 0_1677.png,0,0,8,0,0
757
+ 0_1678.png,0,1,3,0,0
758
+ 0_1679.png,0,0,2,0,0
759
+ 0_168.png,0,4,12,0,15
760
+ 0_1680.png,0,0,1,0,0
761
+ 0_1681.png,0,3,4,0,0
762
+ 0_1682.png,0,0,2,0,0
763
+ 0_1683.png,0,0,0,0,20
764
+ 0_1684.png,0,0,6,0,28
765
+ 0_1685.png,0,2,10,0,2
766
+ 0_1686.png,0,2,18,0,0
767
+ 0_1687.png,8,5,13,0,0
768
+ 0_1688.png,15,0,0,0,0
769
+ 0_1689.png,0,3,22,0,0
770
+ 0_169.png,0,1,10,0,18
771
+ 0_1690.png,0,8,24,0,0
772
+ 0_1691.png,2,10,13,0,0
773
+ 0_1692.png,3,6,9,0,0
774
+ 0_1693.png,10,7,4,0,0
775
+ 0_1694.png,5,1,17,0,0
776
+ 0_1695.png,4,0,15,0,0
777
+ 0_1696.png,18,0,0,0,0
778
+ 0_1697.png,11,1,0,6,0
779
+ 0_1698.png,27,0,0,0,0
780
+ 0_1699.png,24,0,0,0,0
781
+ 0_17.png,10,1,6,0,0
782
+ 0_170.png,0,1,3,0,27
783
+ 0_1700.png,22,1,0,0,0
784
+ 0_1701.png,22,0,0,0,0
785
+ 0_1702.png,21,0,0,0,0
786
+ 0_1703.png,16,0,0,0,0
787
+ 0_1704.png,0,0,0,0,0
788
+ 0_1705.png,0,0,0,0,0
789
+ 0_1706.png,0,1,1,0,0
790
+ 0_1707.png,0,0,0,0,0
791
+ 0_1708.png,0,0,0,0,0
792
+ 0_1709.png,0,0,0,0,0
793
+ 0_171.png,0,0,7,0,20
794
+ 0_1710.png,15,0,0,0,0
795
+ 0_1711.png,12,0,0,0,0
796
+ 0_1712.png,13,0,2,0,0
797
+ 0_1713.png,19,0,0,0,0
798
+ 0_1714.png,21,0,0,0,0
799
+ 0_1715.png,19,0,2,0,0
800
+ 0_1716.png,19,1,1,0,0
801
+ 0_1717.png,19,1,0,0,0
802
+ 0_1718.png,0,0,8,0,0
803
+ 0_1719.png,0,1,8,0,0
804
+ 0_172.png,4,1,1,0,0
805
+ 0_1720.png,0,0,9,0,0
806
+ 0_1721.png,0,0,6,0,0
807
+ 0_1722.png,0,0,12,0,0
808
+ 0_1723.png,0,0,0,0,0
809
+ 0_1724.png,0,0,0,0,0
810
+ 0_1725.png,0,0,0,0,0
811
+ 0_1726.png,79,0,3,0,0
812
+ 0_1727.png,83,0,8,0,0
813
+ 0_1728.png,77,0,13,0,0
814
+ 0_1729.png,91,1,1,0,0
815
+ 0_173.png,1,1,2,0,0
816
+ 0_1730.png,99,0,4,0,0
817
+ 0_1731.png,79,1,5,0,0
818
+ 0_1732.png,12,5,19,0,0
819
+ 0_1733.png,17,2,9,0,0
820
+ 0_1734.png,21,3,16,0,0
821
+ 0_1735.png,17,0,11,0,0
822
+ 0_1736.png,26,0,16,0,0
823
+ 0_1737.png,38,0,1,0,0
824
+ 0_1738.png,26,0,11,0,0
825
+ 0_1739.png,6,2,12,0,0
826
+ 0_174.png,0,1,3,0,2
827
+ 0_1740.png,12,1,12,0,0
828
+ 0_1741.png,12,1,12,0,0
829
+ 0_1742.png,28,2,3,0,0
830
+ 0_1743.png,45,0,2,0,0
831
+ 0_1744.png,20,0,5,0,0
832
+ 0_1745.png,31,0,9,0,0
833
+ 0_1746.png,10,0,13,0,0
834
+ 0_1747.png,18,1,7,0,0
835
+ 0_1748.png,0,0,0,0,0
836
+ 0_1749.png,0,0,0,0,0
837
+ 0_175.png,0,0,7,0,30
838
+ 0_1750.png,0,0,0,0,0
839
+ 0_1751.png,0,0,0,0,0
840
+ 0_1752.png,0,0,0,0,0
841
+ 0_1753.png,0,0,0,0,0
842
+ 0_1754.png,47,0,0,0,0
843
+ 0_1755.png,38,0,0,7,0
844
+ 0_1756.png,4,0,0,19,0
845
+ 0_1757.png,0,3,4,16,0
846
+ 0_1758.png,41,1,0,0,0
847
+ 0_1759.png,35,1,0,0,0
848
+ 0_176.png,0,0,2,0,28
849
+ 0_1760.png,56,1,1,0,0
850
+ 0_1761.png,58,2,0,0,0
851
+ 0_1762.png,65,2,0,0,0
852
+ 0_1763.png,63,1,4,0,0
853
+ 0_1764.png,17,20,9,0,0
854
+ 0_1765.png,0,2,36,0,0
855
+ 0_1766.png,6,5,8,0,0
856
+ 0_1767.png,0,5,50,0,0
857
+ 0_1768.png,8,5,17,0,0
858
+ 0_1769.png,39,0,0,0,0
859
+ 0_177.png,0,2,8,0,18
860
+ 0_1770.png,26,2,6,0,0
861
+ 0_1771.png,17,2,4,0,0
862
+ 0_1772.png,39,0,0,0,0
863
+ 0_1773.png,8,0,19,0,0
864
+ 0_1774.png,8,1,5,0,0
865
+ 0_1775.png,0,3,4,0,0
866
+ 0_1776.png,0,0,7,0,0
867
+ 0_1777.png,0,0,16,0,0
868
+ 0_1778.png,0,0,8,0,0
869
+ 0_1779.png,0,3,21,0,0
870
+ 0_178.png,0,1,2,1,36
871
+ 0_1780.png,0,0,8,0,0
872
+ 0_1781.png,0,3,18,0,0
873
+ 0_1782.png,0,1,9,0,0
874
+ 0_1783.png,0,1,10,0,0
875
+ 0_1784.png,0,2,9,0,0
876
+ 0_1785.png,0,2,17,0,0
877
+ 0_1786.png,0,0,14,0,0
878
+ 0_1787.png,41,3,1,0,0
879
+ 0_1788.png,47,1,0,0,0
880
+ 0_1789.png,9,4,6,0,0
881
+ 0_179.png,0,2,2,0,56
882
+ 0_1790.png,30,6,8,0,0
883
+ 0_1791.png,32,1,0,0,0
884
+ 0_1792.png,5,3,8,0,0
885
+ 0_1793.png,37,3,5,0,0
886
+ 0_1794.png,28,0,0,0,0
887
+ 0_1795.png,49,2,0,0,0
888
+ 0_1796.png,34,2,0,0,0
889
+ 0_1797.png,45,2,0,0,0
890
+ 0_1798.png,28,3,1,0,0
891
+ 0_1799.png,27,2,2,0,0
892
+ 0_18.png,4,1,2,0,0
893
+ 0_180.png,2,2,5,0,0
894
+ 0_1800.png,28,1,0,0,0
895
+ 0_1801.png,24,2,1,0,0
896
+ 0_1802.png,35,6,1,0,0
897
+ 0_1803.png,25,3,2,0,0
898
+ 0_1804.png,0,10,39,0,0
899
+ 0_1805.png,0,0,33,0,0
900
+ 0_1806.png,4,59,4,0,0
901
+ 0_1807.png,0,15,33,0,0
902
+ 0_1808.png,0,0,32,0,0
903
+ 0_1809.png,17,12,1,0,0
904
+ 0_181.png,1,0,8,0,0
905
+ 0_1810.png,11,32,4,0,0
906
+ 0_1811.png,18,9,2,0,0
907
+ 0_1812.png,27,8,0,0,0
908
+ 0_1813.png,26,22,3,0,0
909
+ 0_1814.png,24,3,0,0,0
910
+ 0_1815.png,40,0,0,0,0
911
+ 0_1816.png,14,0,0,0,0
912
+ 0_1817.png,34,0,0,0,0
913
+ 0_1818.png,41,5,0,0,0
914
+ 0_1819.png,20,3,0,0,0
915
+ 0_182.png,1,0,2,0,0
916
+ 0_1820.png,43,6,0,0,0
917
+ 0_1821.png,34,0,0,0,0
918
+ 0_1822.png,0,2,1,0,6
919
+ 0_1823.png,0,3,6,0,17
920
+ 0_1824.png,0,11,7,0,0
921
+ 0_1825.png,0,11,2,0,0
922
+ 0_1826.png,0,1,5,0,19
923
+ 0_1827.png,0,2,3,0,23
924
+ 0_1828.png,0,2,6,0,18
925
+ 0_1829.png,0,3,12,0,8
926
+ 0_183.png,0,0,14,0,0
927
+ 0_1830.png,0,0,0,0,12
928
+ 0_1831.png,0,4,9,0,11
929
+ 0_1832.png,0,9,4,0,10
930
+ 0_1833.png,0,9,7,0,11
931
+ 0_1834.png,0,4,2,0,17
932
+ 0_1835.png,0,0,4,0,15
933
+ 0_1836.png,0,0,2,0,15
934
+ 0_1837.png,0,3,6,0,8
935
+ 0_1838.png,0,0,0,0,8
936
+ 0_1839.png,0,1,0,0,3
937
+ 0_184.png,0,2,17,0,26
938
+ 0_1840.png,0,2,5,0,18
939
+ 0_1841.png,0,0,0,0,21
940
+ 0_1842.png,0,1,2,0,11
941
+ 0_1843.png,0,4,7,0,10
942
+ 0_1844.png,0,2,4,0,17
943
+ 0_1845.png,0,5,2,0,8
944
+ 0_1846.png,0,0,1,0,10
945
+ 0_1847.png,0,0,0,0,2
946
+ 0_1848.png,0,9,3,0,11
947
+ 0_1849.png,0,0,0,0,11
948
+ 0_185.png,0,6,2,0,34
949
+ 0_1850.png,0,13,8,0,0
950
+ 0_1851.png,1,3,11,0,4
951
+ 0_1852.png,0,15,8,0,0
952
+ 0_1853.png,0,18,10,0,0
953
+ 0_1854.png,0,18,11,0,7
954
+ 0_1855.png,0,0,0,0,9
955
+ 0_1856.png,0,3,3,0,12
956
+ 0_1857.png,0,7,10,0,8
957
+ 0_1858.png,2,14,9,0,0
958
+ 0_1859.png,0,0,0,0,0
959
+ 0_186.png,0,0,3,0,47
960
+ 0_1860.png,0,18,4,0,5
961
+ 0_1861.png,0,3,9,0,17
962
+ 0_1862.png,0,2,1,0,12
963
+ 0_1863.png,0,6,7,0,16
964
+ 0_1864.png,0,0,0,0,0
965
+ 0_1865.png,0,3,4,0,4
966
+ 0_1866.png,0,3,6,0,4
967
+ 0_1867.png,0,5,3,0,1
968
+ 0_1868.png,0,4,5,0,0
969
+ 0_1869.png,0,3,1,0,0
970
+ 0_187.png,0,0,9,0,40
971
+ 0_1870.png,0,0,0,0,0
972
+ 0_1871.png,0,0,2,0,4
973
+ 0_1872.png,0,3,4,0,1
974
+ 0_1873.png,0,0,0,0,8
975
+ 0_1874.png,0,0,4,0,12
976
+ 0_1875.png,0,0,2,0,7
977
+ 0_1876.png,0,0,3,0,1
978
+ 0_1877.png,0,1,4,0,3
979
+ 0_1878.png,0,3,3,0,0
980
+ 0_1879.png,0,1,6,0,7
981
+ 0_188.png,0,2,2,0,42
982
+ 0_1880.png,0,0,0,0,0
983
+ 0_1881.png,0,0,8,0,3
984
+ 0_1882.png,0,0,3,0,8
985
+ 0_1883.png,0,2,3,0,0
986
+ 0_1884.png,0,0,2,0,0
987
+ 0_1885.png,0,0,0,0,6
988
+ 0_1886.png,0,0,13,0,0
989
+ 0_1887.png,0,0,5,0,6
990
+ 0_1888.png,0,1,5,0,0
991
+ 0_1889.png,0,1,1,0,0
992
+ 0_189.png,0,1,13,0,36
993
+ 0_1890.png,0,0,1,0,0
994
+ 0_1891.png,0,1,4,0,0
995
+ 0_1892.png,0,0,1,0,5
996
+ 0_1893.png,0,0,2,0,11
997
+ 0_1894.png,0,0,5,0,1
998
+ 0_1895.png,0,0,10,0,0
999
+ 0_1896.png,4,0,0,0,0
1000
+ 0_1897.png,0,0,0,0,7
1001
+ 0_1898.png,0,0,0,0,6
1002
+ 0_1899.png,0,1,4,0,11
1003
+ 0_19.png,2,3,8,0,0
1004
+ 0_190.png,0,2,4,0,28
1005
+ 0_1900.png,3,0,0,0,0
1006
+ 0_1901.png,1,0,0,0,7
1007
+ 0_1902.png,0,0,2,0,23
1008
+ 0_1903.png,0,1,4,0,8
1009
+ 0_1904.png,0,0,0,0,31
1010
+ 0_1905.png,0,1,6,0,15
1011
+ 0_1906.png,0,18,8,0,4
1012
+ 0_1907.png,0,4,2,0,18
1013
+ 0_1908.png,0,3,7,0,11
1014
+ 0_1909.png,0,0,0,0,2
1015
+ 0_191.png,0,1,4,0,18
1016
+ 0_1910.png,0,4,8,0,17
1017
+ 0_1911.png,0,0,0,0,8
1018
+ 0_1912.png,0,0,3,0,12
1019
+ 0_1913.png,0,0,0,0,8
1020
+ 0_1914.png,0,3,5,0,7
1021
+ 0_1915.png,0,2,4,0,20
1022
+ 0_1916.png,0,0,0,0,12
1023
+ 0_1917.png,0,0,3,0,19
1024
+ 0_1918.png,0,0,0,0,5
1025
+ 0_1919.png,0,4,4,0,7
1026
+ 0_192.png,0,0,0,0,45
1027
+ 0_1920.png,0,5,10,0,16
1028
+ 0_1921.png,0,2,13,0,13
1029
+ 0_1922.png,0,0,2,0,0
1030
+ 0_1923.png,0,0,0,0,0
1031
+ 0_1924.png,0,0,11,0,0
1032
+ 0_1925.png,0,0,9,0,0
1033
+ 0_1926.png,0,0,5,0,0
1034
+ 0_1927.png,0,1,14,0,0
1035
+ 0_1928.png,0,0,0,0,6
1036
+ 0_1929.png,0,1,5,0,10
1037
+ 0_193.png,0,0,0,0,23
1038
+ 0_1930.png,0,2,20,0,0
1039
+ 0_1931.png,0,4,28,0,0
1040
+ 0_1932.png,0,0,13,0,15
1041
+ 0_1933.png,0,0,0,0,5
1042
+ 0_1934.png,0,1,2,0,13
1043
+ 0_1935.png,0,3,26,0,3
1044
+ 0_1936.png,0,0,0,0,1
1045
+ 0_1937.png,0,2,19,0,0
1046
+ 0_1938.png,0,8,11,0,7
1047
+ 0_1939.png,0,0,0,0,0
1048
+ 0_194.png,2,0,2,0,0
1049
+ 0_1940.png,0,0,0,0,0
1050
+ 0_1941.png,0,0,0,0,4
1051
+ 0_1942.png,0,0,0,0,13
1052
+ 0_1943.png,0,7,9,0,14
1053
+ 0_1944.png,0,13,25,0,0
1054
+ 0_1945.png,0,0,0,0,0
1055
+ 0_1946.png,0,0,0,0,0
1056
+ 0_1947.png,0,4,1,0,25
1057
+ 0_1948.png,0,0,0,0,4
1058
+ 0_1949.png,0,0,0,0,7
1059
+ 0_195.png,11,0,1,0,0
1060
+ 0_1950.png,0,6,4,0,9
1061
+ 0_1951.png,0,0,0,0,8
1062
+ 0_1952.png,0,0,0,0,5
1063
+ 0_1953.png,0,3,5,0,14
1064
+ 0_1954.png,0,13,3,0,4
1065
+ 0_1955.png,0,7,5,0,11
1066
+ 0_1956.png,0,0,0,0,12
1067
+ 0_1957.png,0,0,0,0,4
1068
+ 0_1958.png,0,19,3,0,3
1069
+ 0_1959.png,0,1,0,0,8
1070
+ 0_196.png,6,0,6,0,0
1071
+ 0_1960.png,0,8,3,0,8
1072
+ 0_1961.png,0,20,2,0,5
1073
+ 0_1962.png,0,11,3,0,10
1074
+ 0_1963.png,0,8,2,0,7
1075
+ 0_1964.png,0,0,0,0,6
1076
+ 0_1965.png,0,1,0,0,7
1077
+ 0_1966.png,0,0,0,0,0
1078
+ 0_1967.png,0,0,0,0,0
1079
+ 0_1968.png,0,0,0,0,0
1080
+ 0_1969.png,0,0,0,0,0
1081
+ 0_197.png,23,0,5,0,0
1082
+ 0_1970.png,0,0,0,0,0
1083
+ 0_1971.png,0,0,0,0,0
1084
+ 0_1972.png,0,0,0,0,0
1085
+ 0_1973.png,0,0,0,0,0
1086
+ 0_1974.png,0,0,0,0,0
1087
+ 0_1975.png,0,0,0,0,0
1088
+ 0_1976.png,0,0,0,0,0
1089
+ 0_1977.png,0,0,0,0,0
1090
+ 0_1978.png,0,0,0,0,0
1091
+ 0_1979.png,0,0,0,0,0
1092
+ 0_198.png,8,1,3,0,1
1093
+ 0_1980.png,0,8,0,0,13
1094
+ 0_1981.png,0,22,1,0,9
1095
+ 0_1982.png,4,17,1,0,9
1096
+ 0_1983.png,0,19,2,0,6
1097
+ 0_1984.png,0,24,0,0,5
1098
+ 0_1985.png,0,1,0,0,17
1099
+ 0_1986.png,0,22,2,0,10
1100
+ 0_1987.png,0,8,0,0,13
1101
+ 0_1988.png,0,8,0,0,16
1102
+ 0_1989.png,0,11,2,0,5
1103
+ 0_199.png,4,0,7,0,0
1104
+ 0_1990.png,0,1,0,0,9
1105
+ 0_1991.png,0,24,1,0,3
1106
+ 0_1992.png,0,18,0,0,10
1107
+ 0_1993.png,0,27,3,0,4
1108
+ 0_1994.png,0,9,1,0,0
1109
+ 0_1995.png,0,9,0,0,12
1110
+ 0_1996.png,0,9,0,0,9
1111
+ 0_1997.png,0,1,0,0,13
1112
+ 0_1998.png,0,14,1,0,8
1113
+ 0_1999.png,0,7,6,0,5
1114
+ 0_2.png,8,0,3,0,0
1115
+ 0_20.png,10,0,0,0,0
1116
+ 0_200.png,0,2,3,0,22
1117
+ 0_2000.png,0,8,8,0,0
1118
+ 0_2001.png,0,0,1,0,12
1119
+ 0_2002.png,0,0,0,0,10
1120
+ 0_2003.png,0,7,14,0,0
1121
+ 0_2004.png,0,6,4,0,10
1122
+ 0_2005.png,0,13,8,0,0
1123
+ 0_2006.png,0,0,1,0,12
1124
+ 0_2007.png,0,0,0,0,0
1125
+ 0_2008.png,0,11,4,0,6
1126
+ 0_2009.png,0,0,0,0,0
1127
+ 0_201.png,0,0,5,0,51
1128
+ 0_2010.png,0,2,3,0,7
1129
+ 0_2011.png,0,0,1,0,7
1130
+ 0_2012.png,0,5,9,0,5
1131
+ 0_2013.png,0,2,4,0,4
1132
+ 0_2014.png,0,0,0,0,14
1133
+ 0_2015.png,0,0,0,0,7
1134
+ 0_2016.png,0,11,7,0,0
1135
+ 0_2017.png,0,3,6,0,5
1136
+ 0_2018.png,0,0,0,0,15
1137
+ 0_2019.png,0,0,0,0,0
1138
+ 0_202.png,0,3,12,0,12
1139
+ 0_2020.png,0,0,0,0,0
1140
+ 0_2021.png,0,1,7,0,0
1141
+ 0_2022.png,0,0,0,0,0
1142
+ 0_2023.png,0,2,19,0,0
1143
+ 0_2024.png,0,0,18,0,0
1144
+ 0_2025.png,0,0,10,0,0
1145
+ 0_2026.png,0,2,30,0,0
1146
+ 0_2027.png,0,0,8,0,0
1147
+ 0_2028.png,0,0,17,0,0
1148
+ 0_2029.png,0,0,14,0,0
1149
+ 0_203.png,0,6,7,0,0
1150
+ 0_2030.png,0,0,17,0,0
1151
+ 0_2031.png,0,2,19,0,0
1152
+ 0_2032.png,0,7,28,0,0
1153
+ 0_2033.png,0,2,14,0,0
1154
+ 0_2034.png,0,0,15,0,0
1155
+ 0_2035.png,0,0,12,0,0
1156
+ 0_2036.png,0,0,17,0,0
1157
+ 0_2037.png,0,0,20,0,0
1158
+ 0_2038.png,0,3,12,0,0
1159
+ 0_2039.png,0,1,13,0,0
1160
+ 0_204.png,0,2,8,0,38
1161
+ 0_2040.png,0,1,9,0,0
1162
+ 0_2041.png,0,6,20,0,0
1163
+ 0_2042.png,0,3,18,0,0
1164
+ 0_2043.png,0,0,15,0,5
1165
+ 0_2044.png,0,0,7,0,9
1166
+ 0_2045.png,0,0,1,0,11
1167
+ 0_2046.png,0,0,5,0,12
1168
+ 0_2047.png,0,0,9,0,12
1169
+ 0_2048.png,0,3,0,0,1
1170
+ 0_2049.png,0,0,6,0,10
1171
+ 0_205.png,0,0,2,0,42
1172
+ 0_2050.png,0,0,14,0,12
1173
+ 0_2051.png,0,0,13,0,4
1174
+ 0_2052.png,0,0,4,0,18
1175
+ 0_2053.png,0,1,14,0,6
1176
+ 0_2054.png,0,0,8,0,11
1177
+ 0_2055.png,0,0,17,0,10
1178
+ 0_2056.png,0,0,8,0,23
1179
+ 0_2057.png,19,0,0,1,0
1180
+ 0_2058.png,12,0,0,18,0
1181
+ 0_2059.png,11,1,0,4,0
1182
+ 0_206.png,0,1,6,0,30
1183
+ 0_2060.png,14,0,8,0,0
1184
+ 0_2061.png,28,0,0,0,0
1185
+ 0_2062.png,20,1,0,0,0
1186
+ 0_2063.png,33,2,0,0,0
1187
+ 0_2064.png,48,0,0,0,0
1188
+ 0_2065.png,39,3,0,0,0
1189
+ 0_2066.png,0,0,10,0,0
1190
+ 0_2067.png,39,9,1,0,0
1191
+ 0_2068.png,28,11,4,0,0
1192
+ 0_2069.png,16,4,0,0,0
1193
+ 0_207.png,0,0,2,0,30
1194
+ 0_2070.png,19,0,4,0,0
1195
+ 0_2071.png,15,0,4,1,0
1196
+ 0_2072.png,23,0,1,0,0
1197
+ 0_2073.png,15,1,1,0,0
1198
+ 0_2074.png,8,0,15,0,0
1199
+ 0_2075.png,9,1,7,0,0
1200
+ 0_2076.png,22,1,3,8,0
1201
+ 0_2077.png,10,1,10,0,0
1202
+ 0_2078.png,20,0,1,2,0
1203
+ 0_2079.png,0,2,17,0,0
1204
+ 0_208.png,0,1,8,0,21
1205
+ 0_2080.png,0,0,22,0,0
1206
+ 0_2081.png,0,3,19,0,0
1207
+ 0_2082.png,0,3,13,0,0
1208
+ 0_2083.png,0,0,18,0,0
1209
+ 0_2084.png,0,4,28,0,0
1210
+ 0_2085.png,0,4,33,0,0
1211
+ 0_2086.png,21,0,1,0,0
1212
+ 0_2087.png,13,0,10,0,0
1213
+ 0_2088.png,0,8,20,1,0
1214
+ 0_2089.png,1,2,14,0,0
1215
+ 0_209.png,0,4,8,0,15
1216
+ 0_2090.png,0,5,16,0,0
1217
+ 0_2091.png,0,0,19,0,0
1218
+ 0_2092.png,0,0,24,0,0
1219
+ 0_2093.png,0,0,20,0,0
1220
+ 0_2094.png,0,0,25,0,0
1221
+ 0_2095.png,0,0,26,0,0
1222
+ 0_2096.png,0,0,20,0,0
1223
+ 0_2097.png,0,0,22,0,0
1224
+ 0_2098.png,0,0,0,0,0
1225
+ 0_2099.png,13,0,6,0,0
1226
+ 0_21.png,9,0,2,0,0
1227
+ 0_210.png,0,2,3,0,36
1228
+ 0_2100.png,22,1,5,0,0
1229
+ 0_2101.png,21,1,1,0,0
1230
+ 0_2102.png,18,0,2,0,0
1231
+ 0_2103.png,0,0,11,0,0
1232
+ 0_2104.png,9,0,6,0,0
1233
+ 0_2105.png,5,2,17,0,0
1234
+ 0_2106.png,0,10,22,0,0
1235
+ 0_2107.png,4,4,11,0,0
1236
+ 0_2108.png,0,0,0,0,0
1237
+ 0_2109.png,0,0,0,0,0
1238
+ 0_211.png,0,3,14,0,2
1239
+ 0_2110.png,0,0,0,0,0
1240
+ 0_2111.png,0,0,0,0,0
1241
+ 0_2112.png,15,0,0,0,0
1242
+ 0_2113.png,18,0,0,0,0
1243
+ 0_2114.png,2,0,5,0,0
1244
+ 0_2115.png,0,0,0,0,0
1245
+ 0_2116.png,3,1,9,0,0
1246
+ 0_2117.png,9,0,2,0,0
1247
+ 0_2118.png,0,91,3,0,0
1248
+ 0_2119.png,0,83,5,0,0
1249
+ 0_212.png,0,0,5,0,46
1250
+ 0_2120.png,0,91,3,0,0
1251
+ 0_2121.png,0,90,2,0,0
1252
+ 0_2122.png,18,3,4,0,0
1253
+ 0_2123.png,17,4,3,0,0
1254
+ 0_2124.png,25,2,0,0,0
1255
+ 0_2125.png,0,4,7,0,0
1256
+ 0_2126.png,0,1,8,0,0
1257
+ 0_2127.png,0,0,0,0,8
1258
+ 0_2128.png,0,0,0,0,15
1259
+ 0_2129.png,0,0,0,0,13
1260
+ 0_213.png,0,3,10,0,32
1261
+ 0_2130.png,0,0,0,0,16
1262
+ 0_2131.png,0,0,0,0,0
1263
+ 0_2132.png,0,0,0,0,0
1264
+ 0_2133.png,0,0,0,0,0
1265
+ 0_2134.png,0,0,0,0,0
1266
+ 0_2135.png,0,0,0,0,0
1267
+ 0_2136.png,0,0,5,0,0
1268
+ 0_2137.png,0,0,0,0,0
1269
+ 0_2138.png,0,0,13,0,0
1270
+ 0_2139.png,0,0,25,0,0
1271
+ 0_214.png,0,2,11,0,29
1272
+ 0_2140.png,0,0,21,0,0
1273
+ 0_2141.png,0,0,16,0,0
1274
+ 0_2142.png,0,0,15,0,0
1275
+ 0_2143.png,0,0,18,0,0
1276
+ 0_2144.png,0,0,18,0,0
1277
+ 0_2145.png,0,0,18,0,0
1278
+ 0_2146.png,0,0,8,0,3
1279
+ 0_2147.png,0,0,1,0,4
1280
+ 0_2148.png,0,0,8,0,4
1281
+ 0_2149.png,0,0,8,0,2
1282
+ 0_215.png,0,3,13,0,22
1283
+ 0_2150.png,0,0,13,0,0
1284
+ 0_2151.png,0,1,10,0,0
1285
+ 0_2152.png,0,2,14,0,0
1286
+ 0_2153.png,49,0,0,0,0
1287
+ 0_2154.png,38,0,0,0,0
1288
+ 0_2155.png,0,27,16,0,0
1289
+ 0_2156.png,1,5,6,0,0
1290
+ 0_2157.png,0,6,7,0,0
1291
+ 0_2158.png,0,4,5,0,0
1292
+ 0_2159.png,19,0,0,0,0
1293
+ 0_216.png,0,0,5,0,16
1294
+ 0_2160.png,31,0,2,0,0
1295
+ 0_2161.png,4,5,0,8,0
1296
+ 0_2162.png,29,3,0,0,0
1297
+ 0_2163.png,0,0,0,0,0
1298
+ 0_2164.png,0,0,0,0,0
1299
+ 0_2165.png,0,0,0,0,0
1300
+ 0_2166.png,39,0,2,0,0
1301
+ 0_2167.png,38,2,0,0,0
1302
+ 0_2168.png,2,0,6,0,0
1303
+ 0_2169.png,24,0,0,0,0
1304
+ 0_217.png,0,0,7,0,23
1305
+ 0_2170.png,9,0,1,0,0
1306
+ 0_2171.png,16,0,8,0,0
1307
+ 0_2172.png,21,0,3,0,0
1308
+ 0_2173.png,25,0,0,0,0
1309
+ 0_2174.png,0,0,0,0,0
1310
+ 0_2175.png,0,1,0,0,0
1311
+ 0_2176.png,0,0,0,0,0
1312
+ 0_2177.png,0,0,9,0,0
1313
+ 0_2178.png,0,1,11,0,0
1314
+ 0_2179.png,0,0,11,0,0
1315
+ 0_218.png,0,1,11,0,15
1316
+ 0_2180.png,0,0,11,0,0
1317
+ 0_2181.png,0,0,9,0,0
1318
+ 0_2182.png,22,2,7,0,0
1319
+ 0_2183.png,19,0,2,0,0
1320
+ 0_2184.png,15,4,3,0,0
1321
+ 0_2185.png,0,0,7,0,9
1322
+ 0_2186.png,0,0,3,0,11
1323
+ 0_2187.png,0,0,6,0,7
1324
+ 0_2188.png,0,0,8,0,13
1325
+ 0_2189.png,22,0,0,0,0
1326
+ 0_219.png,0,11,5,0,22
1327
+ 0_2190.png,15,2,0,0,0
1328
+ 0_2191.png,8,0,0,0,0
1329
+ 0_2192.png,15,4,6,0,0
1330
+ 0_2193.png,1,7,28,0,0
1331
+ 0_2194.png,9,0,0,0,0
1332
+ 0_2195.png,6,1,1,0,0
1333
+ 0_2196.png,8,0,4,0,0
1334
+ 0_2197.png,14,0,1,0,0
1335
+ 0_2198.png,13,0,3,0,0
1336
+ 0_2199.png,13,0,1,0,0
1337
+ 0_22.png,8,0,0,0,0
1338
+ 0_220.png,0,0,0,0,36
1339
+ 0_2200.png,0,9,7,0,0
1340
+ 0_2201.png,0,3,12,0,0
1341
+ 0_2202.png,0,0,0,0,0
1342
+ 0_2203.png,17,4,1,0,0
1343
+ 0_2204.png,8,3,0,0,0
1344
+ 0_2205.png,16,2,1,0,0
1345
+ 0_2206.png,14,3,5,0,0
1346
+ 0_2207.png,0,5,36,0,0
1347
+ 0_2208.png,16,2,0,0,0
1348
+ 0_2209.png,0,0,3,0,68
1349
+ 0_221.png,0,0,7,0,25
1350
+ 0_2210.png,0,3,2,0,60
1351
+ 0_2211.png,0,3,1,0,59
1352
+ 0_2212.png,0,1,3,0,57
1353
+ 0_2213.png,0,0,0,0,10
1354
+ 0_2214.png,0,0,2,0,12
1355
+ 0_2215.png,0,0,8,0,13
1356
+ 0_2216.png,0,0,3,0,12
1357
+ 0_2217.png,0,0,1,0,17
1358
+ 0_2218.png,0,0,1,0,14
1359
+ 0_2219.png,17,1,1,0,0
1360
+ 0_222.png,0,0,4,0,32
1361
+ 0_2220.png,29,1,2,0,0
1362
+ 0_2221.png,23,0,0,0,0
1363
+ 0_2222.png,18,2,2,0,0
1364
+ 0_2223.png,20,0,3,0,0
1365
+ 0_2224.png,8,3,2,0,0
1366
+ 0_2225.png,24,0,0,0,0
1367
+ 0_2226.png,30,0,0,0,0
1368
+ 0_2227.png,0,2,15,0,0
1369
+ 0_2228.png,0,0,12,0,0
1370
+ 0_2229.png,0,1,12,0,0
1371
+ 0_223.png,0,0,2,0,51
1372
+ 0_2230.png,0,0,14,0,0
1373
+ 0_2231.png,23,1,0,0,0
1374
+ 0_2232.png,19,5,2,0,0
1375
+ 0_2233.png,25,0,0,0,0
1376
+ 0_2234.png,17,4,10,0,0
1377
+ 0_2235.png,18,0,0,0,0
1378
+ 0_2236.png,13,1,12,0,0
1379
+ 0_2237.png,0,0,27,0,0
1380
+ 0_2238.png,11,1,8,0,0
1381
+ 0_2239.png,3,0,13,0,0
1382
+ 0_224.png,0,2,1,0,35
1383
+ 0_2240.png,14,0,2,0,0
1384
+ 0_2241.png,3,0,13,0,0
1385
+ 0_2242.png,0,2,0,0,12
1386
+ 0_2243.png,0,0,0,0,11
1387
+ 0_2244.png,0,2,0,0,23
1388
+ 0_2245.png,0,0,15,0,0
1389
+ 0_2246.png,0,2,17,0,0
1390
+ 0_2247.png,12,3,9,0,0
1391
+ 0_2248.png,3,4,13,0,0
1392
+ 0_2249.png,20,1,1,0,0
1393
+ 0_225.png,0,1,4,0,20
1394
+ 0_2250.png,22,0,6,0,0
1395
+ 0_2251.png,14,2,4,0,0
1396
+ 0_2252.png,15,1,2,0,0
1397
+ 0_2253.png,18,0,0,0,0
1398
+ 0_2254.png,20,2,0,0,0
1399
+ 0_2255.png,23,2,1,0,0
1400
+ 0_2256.png,14,0,4,0,0
1401
+ 0_2257.png,19,2,5,0,0
1402
+ 0_2258.png,28,0,0,0,0
1403
+ 0_2259.png,24,1,1,0,0
1404
+ 0_226.png,0,0,7,0,8
1405
+ 0_2260.png,28,0,0,0,0
1406
+ 0_2261.png,18,3,2,0,0
1407
+ 0_2262.png,13,2,2,0,0
1408
+ 0_2263.png,0,0,0,0,0
1409
+ 0_2264.png,0,0,0,0,0
1410
+ 0_2265.png,0,0,0,0,0
1411
+ 0_2266.png,3,1,1,0,0
1412
+ 0_2267.png,0,0,0,0,0
1413
+ 0_2268.png,0,10,8,0,0
1414
+ 0_2269.png,0,3,11,0,0
1415
+ 0_227.png,0,1,5,0,37
1416
+ 0_2270.png,0,2,16,0,0
1417
+ 0_2271.png,0,5,14,0,0
1418
+ 0_2272.png,0,2,6,0,0
1419
+ 0_2273.png,0,0,42,0,0
1420
+ 0_2274.png,0,1,34,0,0
1421
+ 0_2275.png,0,1,39,0,0
1422
+ 0_2276.png,0,0,42,0,0
1423
+ 0_2277.png,0,0,37,0,33
1424
+ 0_2278.png,0,1,5,0,22
1425
+ 0_2279.png,0,1,4,0,11
1426
+ 0_228.png,0,2,7,0,31
1427
+ 0_2280.png,0,0,5,0,13
1428
+ 0_2281.png,0,4,32,0,1
1429
+ 0_2282.png,0,1,9,0,35
1430
+ 0_2283.png,0,1,40,0,4
1431
+ 0_2284.png,29,0,0,0,0
1432
+ 0_2285.png,6,1,0,0,0
1433
+ 0_2286.png,16,0,0,0,0
1434
+ 0_2287.png,8,1,6,0,0
1435
+ 0_2288.png,29,1,0,0,0
1436
+ 0_2289.png,22,0,4,0,0
1437
+ 0_229.png,0,2,10,0,26
1438
+ 0_2290.png,20,1,5,0,0
1439
+ 0_2291.png,1,1,15,0,0
1440
+ 0_2292.png,6,3,7,0,0
1441
+ 0_2293.png,12,4,16,0,0
1442
+ 0_2294.png,11,4,12,0,0
1443
+ 0_2295.png,23,5,6,0,0
1444
+ 0_2296.png,21,11,10,0,0
1445
+ 0_2297.png,20,20,7,0,0
1446
+ 0_2298.png,12,6,15,0,0
1447
+ 0_2299.png,21,5,7,0,0
1448
+ 0_23.png,7,0,5,0,0
1449
+ 0_230.png,0,1,8,0,32
1450
+ 0_2300.png,2,6,8,0,0
1451
+ 0_2301.png,16,0,0,0,0
1452
+ 0_2302.png,22,0,2,0,0
1453
+ 0_2303.png,19,0,0,0,0
1454
+ 0_2304.png,29,0,0,0,0
1455
+ 0_2305.png,29,0,0,0,0
1456
+ 0_2306.png,30,1,0,0,0
1457
+ 0_2307.png,25,2,0,0,0
1458
+ 0_2308.png,32,0,0,0,0
1459
+ 0_2309.png,40,0,0,0,0
1460
+ 0_231.png,0,1,11,0,11
1461
+ 0_2310.png,32,0,0,0,0
1462
+ 0_2311.png,30,0,0,0,0
1463
+ 0_2312.png,34,0,0,0,0
1464
+ 0_2313.png,0,4,7,0,8
1465
+ 0_2314.png,0,3,1,0,0
1466
+ 0_2315.png,0,0,5,0,12
1467
+ 0_2316.png,0,4,6,0,10
1468
+ 0_2317.png,0,0,8,0,9
1469
+ 0_2318.png,0,2,4,0,19
1470
+ 0_2319.png,6,0,0,0,0
1471
+ 0_232.png,0,2,4,0,27
1472
+ 0_2320.png,2,0,2,0,0
1473
+ 0_2321.png,9,0,1,0,0
1474
+ 0_2322.png,3,1,10,0,0
1475
+ 0_2323.png,0,0,14,0,0
1476
+ 0_2324.png,0,2,11,0,0
1477
+ 0_2325.png,8,1,20,0,0
1478
+ 0_2326.png,10,7,18,0,0
1479
+ 0_2327.png,23,0,3,0,0
1480
+ 0_2328.png,30,0,5,0,0
1481
+ 0_2329.png,9,0,21,0,0
1482
+ 0_233.png,0,0,0,0,37
1483
+ 0_2330.png,29,0,0,0,0
1484
+ 0_2331.png,2,1,21,0,0
1485
+ 0_2332.png,0,3,22,0,0
1486
+ 0_2333.png,0,12,22,0,0
1487
+ 0_2334.png,0,5,35,0,0
1488
+ 0_2335.png,0,1,31,0,0
1489
+ 0_2336.png,0,5,20,0,0
1490
+ 0_2337.png,0,13,18,0,0
1491
+ 0_2338.png,0,8,14,0,0
1492
+ 0_2339.png,0,5,5,0,27
1493
+ 0_234.png,1,0,3,0,0
1494
+ 0_2340.png,0,10,15,0,31
1495
+ 0_2341.png,0,1,3,0,0
1496
+ 0_2342.png,0,3,8,0,0
1497
+ 0_2343.png,0,1,6,0,0
1498
+ 0_2344.png,0,0,15,0,0
1499
+ 0_2345.png,0,2,10,0,0
1500
+ 0_2346.png,0,1,7,0,0
1501
+ 0_2347.png,0,0,14,0,0
1502
+ 0_2348.png,0,0,14,0,0
1503
+ 0_2349.png,0,0,9,0,0
1504
+ 0_235.png,9,0,3,0,0
1505
+ 0_2350.png,0,0,17,0,0
1506
+ 0_2351.png,0,0,13,0,0
1507
+ 0_2352.png,0,0,10,0,0
1508
+ 0_2353.png,0,0,6,0,0
1509
+ 0_2354.png,0,3,14,0,0
1510
+ 0_2355.png,0,4,6,0,0
1511
+ 0_2356.png,0,3,19,0,0
1512
+ 0_2357.png,6,1,19,0,0
1513
+ 0_2358.png,17,0,0,0,0
1514
+ 0_2359.png,16,0,2,0,0
1515
+ 0_236.png,0,0,4,0,0
1516
+ 0_2360.png,0,0,5,0,35
1517
+ 0_2361.png,0,0,22,0,12
1518
+ 0_2362.png,0,0,9,0,26
1519
+ 0_2363.png,0,0,3,0,11
1520
+ 0_2364.png,0,0,14,0,22
1521
+ 0_2365.png,10,0,1,0,0
1522
+ 0_2366.png,15,0,1,0,0
1523
+ 0_2367.png,15,0,3,0,0
1524
+ 0_2368.png,12,0,2,0,0
1525
+ 0_2369.png,9,0,3,0,0
1526
+ 0_237.png,5,0,1,0,0
1527
+ 0_2370.png,5,0,2,0,0
1528
+ 0_2371.png,16,0,1,0,0
1529
+ 0_2372.png,28,1,4,0,4
1530
+ 0_2373.png,0,0,22,0,0
1531
+ 0_2374.png,25,0,7,0,7
1532
+ 0_2375.png,0,0,13,0,7
1533
+ 0_2376.png,0,0,13,0,0
1534
+ 0_2377.png,0,0,9,0,0
1535
+ 0_2378.png,0,0,12,0,0
1536
+ 0_2379.png,0,3,18,0,0
1537
+ 0_238.png,16,1,1,0,0
1538
+ 0_2380.png,0,0,7,0,0
1539
+ 0_2381.png,0,0,11,0,0
1540
+ 0_2382.png,0,1,11,0,0
1541
+ 0_2383.png,1,0,5,0,0
1542
+ 0_2384.png,48,0,0,0,0
1543
+ 0_2385.png,11,0,10,0,0
1544
+ 0_2386.png,18,0,13,0,0
1545
+ 0_2387.png,41,0,0,0,0
1546
+ 0_2388.png,44,0,0,0,0
1547
+ 0_2389.png,45,0,0,0,0
1548
+ 0_239.png,0,0,4,0,0
1549
+ 0_2390.png,37,0,0,0,0
1550
+ 0_2391.png,18,0,6,0,0
1551
+ 0_2392.png,23,0,3,0,0
1552
+ 0_2393.png,0,0,3,0,0
1553
+ 0_2394.png,0,0,2,0,0
1554
+ 0_2395.png,0,0,5,0,0
1555
+ 0_2396.png,0,0,5,0,0
1556
+ 0_2397.png,0,0,4,0,0
1557
+ 0_2398.png,0,0,4,0,0
1558
+ 0_2399.png,0,0,10,0,0
1559
+ 0_24.png,10,0,0,0,0
1560
+ 0_240.png,0,5,7,0,23
1561
+ 0_2400.png,16,0,11,0,0
1562
+ 0_2401.png,7,0,15,0,0
1563
+ 0_2402.png,15,0,9,0,0
1564
+ 0_2403.png,11,0,12,0,0
1565
+ 0_2404.png,5,2,0,0,0
1566
+ 0_2405.png,6,1,0,0,0
1567
+ 0_2406.png,0,0,0,0,0
1568
+ 0_2407.png,0,0,0,0,0
1569
+ 0_2408.png,1,0,0,0,0
1570
+ 0_2409.png,13,0,2,0,0
1571
+ 0_241.png,0,7,11,0,9
1572
+ 0_2410.png,23,0,0,0,0
1573
+ 0_2411.png,14,0,5,0,0
1574
+ 0_2412.png,6,1,13,0,0
1575
+ 0_2413.png,25,21,0,0,0
1576
+ 0_2414.png,38,58,0,0,0
1577
+ 0_2415.png,20,33,0,0,0
1578
+ 0_2416.png,31,11,0,0,0
1579
+ 0_2417.png,0,3,14,0,0
1580
+ 0_2418.png,0,1,18,0,0
1581
+ 0_2419.png,0,6,4,0,0
1582
+ 0_242.png,0,10,8,0,20
1583
+ 0_2420.png,0,3,4,0,0
1584
+ 0_2421.png,55,0,4,0,0
1585
+ 0_2422.png,68,1,1,0,0
1586
+ 0_2423.png,80,2,11,0,0
1587
+ 0_2424.png,71,1,1,0,0
1588
+ 0_2425.png,62,1,5,0,0
1589
+ 0_2426.png,53,0,5,0,0
1590
+ 0_2427.png,62,1,1,0,0
1591
+ 0_2428.png,0,0,6,0,1
1592
+ 0_2429.png,0,0,1,0,0
1593
+ 0_243.png,0,11,8,0,13
1594
+ 0_2430.png,0,3,0,0,28
1595
+ 0_2431.png,0,1,3,0,3
1596
+ 0_2432.png,0,2,10,0,0
1597
+ 0_2433.png,0,1,3,0,0
1598
+ 0_2434.png,0,25,29,0,0
1599
+ 0_2435.png,0,92,27,0,0
1600
+ 0_2436.png,0,89,31,0,0
1601
+ 0_2437.png,0,85,22,0,0
1602
+ 0_2438.png,0,59,24,0,0
1603
+ 0_2439.png,0,91,32,0,0
1604
+ 0_244.png,0,9,12,0,0
1605
+ 0_2440.png,0,109,14,0,0
1606
+ 0_2441.png,16,0,0,0,0
1607
+ 0_2442.png,19,0,0,0,0
1608
+ 0_2443.png,14,0,0,0,0
1609
+ 0_2444.png,19,1,0,0,0
1610
+ 0_2445.png,5,5,0,0,0
1611
+ 0_2446.png,36,1,0,0,0
1612
+ 0_2447.png,28,0,7,0,0
1613
+ 0_2448.png,22,0,15,0,0
1614
+ 0_2449.png,36,1,1,0,0
1615
+ 0_245.png,0,1,6,0,17
1616
+ 0_2450.png,9,3,0,0,0
1617
+ 0_2451.png,14,2,5,0,0
1618
+ 0_2452.png,17,6,0,0,0
1619
+ 0_2453.png,14,2,0,0,0
1620
+ 0_2454.png,29,1,3,0,0
1621
+ 0_2455.png,21,6,2,0,0
1622
+ 0_2456.png,27,8,3,0,0
1623
+ 0_2457.png,18,4,7,0,0
1624
+ 0_2458.png,28,2,2,0,0
1625
+ 0_2459.png,17,10,1,0,0
1626
+ 0_246.png,0,7,6,0,21
1627
+ 0_2460.png,25,14,0,0,0
1628
+ 0_2461.png,0,0,3,0,0
1629
+ 0_2462.png,0,0,0,0,0
1630
+ 0_2463.png,0,0,0,0,0
1631
+ 0_2464.png,0,0,0,0,0
1632
+ 0_2465.png,0,0,0,0,0
1633
+ 0_2466.png,0,0,11,0,0
1634
+ 0_2467.png,0,0,14,0,0
1635
+ 0_2468.png,0,1,10,0,0
1636
+ 0_2469.png,0,0,20,0,0
1637
+ 0_247.png,0,1,3,0,36
1638
+ 0_2470.png,0,0,17,0,0
1639
+ 0_2471.png,32,1,6,3,0
1640
+ 0_2472.png,25,2,4,36,0
1641
+ 0_2473.png,40,1,2,5,0
1642
+ 0_2474.png,11,2,12,5,0
1643
+ 0_2475.png,27,5,1,0,0
1644
+ 0_2476.png,38,3,0,1,0
1645
+ 0_2477.png,17,12,14,1,0
1646
+ 0_2478.png,36,7,2,1,0
1647
+ 0_2479.png,26,12,1,1,0
1648
+ 0_248.png,0,3,5,0,41
1649
+ 0_2480.png,39,3,5,0,0
1650
+ 0_2481.png,21,8,5,1,0
1651
+ 0_2482.png,19,5,11,3,0
1652
+ 0_2483.png,33,0,0,1,0
1653
+ 0_2484.png,26,10,6,5,0
1654
+ 0_2485.png,21,3,9,1,0
1655
+ 0_2486.png,16,5,6,0,0
1656
+ 0_2487.png,0,1,18,0,0
1657
+ 0_2488.png,0,0,7,0,0
1658
+ 0_2489.png,4,0,3,0,0
1659
+ 0_249.png,0,0,3,0,46
1660
+ 0_2490.png,0,0,16,0,0
1661
+ 0_2491.png,1,4,7,0,0
1662
+ 0_2492.png,1,0,12,0,0
1663
+ 0_2493.png,0,1,20,0,0
1664
+ 0_2494.png,26,0,2,0,0
1665
+ 0_2495.png,25,7,10,1,0
1666
+ 0_2496.png,22,6,4,0,0
1667
+ 0_2497.png,0,0,8,0,0
1668
+ 0_2498.png,1,0,12,0,0
1669
+ 0_2499.png,0,0,8,0,0
1670
+ 0_25.png,13,0,1,0,0
1671
+ 0_250.png,0,6,20,0,20
1672
+ 0_2500.png,10,0,2,0,0
1673
+ 0_2501.png,3,0,10,0,0
1674
+ 0_2502.png,8,0,9,0,0
1675
+ 0_2503.png,8,0,5,0,0
1676
+ 0_2504.png,8,26,6,0,0
1677
+ 0_2505.png,18,8,5,1,0
1678
+ 0_2506.png,20,8,0,0,0
1679
+ 0_2507.png,23,7,3,1,0
1680
+ 0_2508.png,13,3,1,1,0
1681
+ 0_2509.png,17,20,8,0,0
1682
+ 0_251.png,0,11,3,0,27
1683
+ 0_2510.png,3,9,17,0,0
1684
+ 0_2511.png,11,13,12,1,0
1685
+ 0_2512.png,0,2,1,0,0
1686
+ 0_2513.png,0,2,1,0,0
1687
+ 0_2514.png,0,2,0,0,0
1688
+ 0_2515.png,0,0,0,0,0
1689
+ 0_2516.png,0,1,2,0,0
1690
+ 0_2517.png,0,1,12,0,0
1691
+ 0_2518.png,3,0,0,0,0
1692
+ 0_2519.png,13,1,0,0,0
1693
+ 0_252.png,0,19,4,0,28
1694
+ 0_2520.png,0,0,8,0,0
1695
+ 0_2521.png,17,1,0,0,0
1696
+ 0_2522.png,18,0,0,0,0
1697
+ 0_2523.png,22,2,1,0,0
1698
+ 0_2524.png,13,2,3,0,0
1699
+ 0_2525.png,9,81,0,0,0
1700
+ 0_2526.png,9,70,10,0,0
1701
+ 0_2527.png,5,30,6,0,0
1702
+ 0_2528.png,14,54,7,0,0
1703
+ 0_2529.png,12,29,7,0,0
1704
+ 0_253.png,0,5,10,0,33
1705
+ 0_2530.png,16,39,4,0,0
1706
+ 0_2531.png,23,1,0,0,0
1707
+ 0_2532.png,19,0,1,0,0
1708
+ 0_2533.png,20,1,0,0,0
1709
+ 0_2534.png,19,2,5,0,0
1710
+ 0_2535.png,19,1,0,0,0
1711
+ 0_2536.png,20,0,0,0,0
1712
+ 0_2537.png,0,3,4,0,14
1713
+ 0_2538.png,0,1,2,0,13
1714
+ 0_2539.png,0,1,5,2,11
1715
+ 0_254.png,0,10,6,0,27
1716
+ 0_2540.png,0,1,0,0,5
1717
+ 0_2541.png,0,2,1,0,14
1718
+ 0_2542.png,0,0,2,0,13
1719
+ 0_2543.png,0,1,0,0,11
1720
+ 0_2544.png,0,0,0,0,24
1721
+ 0_2545.png,0,0,2,0,24
1722
+ 0_2546.png,0,0,0,0,21
1723
+ 0_2547.png,0,0,1,0,16
1724
+ 0_2548.png,0,0,8,0,4
1725
+ 0_2549.png,0,0,0,0,8
1726
+ 0_255.png,0,16,6,0,24
1727
+ 0_2550.png,0,0,0,0,0
1728
+ 0_2551.png,0,0,0,0,0
1729
+ 0_2552.png,0,0,0,0,0
1730
+ 0_2553.png,0,0,4,0,22
1731
+ 0_2554.png,0,0,1,0,20
1732
+ 0_2555.png,0,1,1,0,30
1733
+ 0_2556.png,0,2,2,0,24
1734
+ 0_2557.png,0,0,2,0,34
1735
+ 0_2558.png,0,0,1,0,18
1736
+ 0_2559.png,0,1,4,0,22
1737
+ 0_256.png,0,1,6,0,0
1738
+ 0_2560.png,0,1,5,0,34
1739
+ 0_2561.png,0,0,4,0,14
1740
+ 0_2562.png,0,14,2,0,24
1741
+ 0_2563.png,13,0,1,0,0
1742
+ 0_2564.png,43,0,4,0,0
1743
+ 0_2565.png,36,1,6,0,0
1744
+ 0_2566.png,6,4,11,0,0
1745
+ 0_2567.png,8,6,17,0,0
1746
+ 0_2568.png,3,10,21,0,0
1747
+ 0_2569.png,9,10,13,0,0
1748
+ 0_257.png,0,0,15,0,0
1749
+ 0_2570.png,9,6,12,0,0
1750
+ 0_2571.png,9,9,20,0,0
1751
+ 0_2572.png,0,6,14,0,28
1752
+ 0_2573.png,0,0,1,0,28
1753
+ 0_2574.png,0,1,3,0,21
1754
+ 0_2575.png,0,5,9,0,20
1755
+ 0_2576.png,0,0,2,0,14
1756
+ 0_2577.png,0,1,10,0,29
1757
+ 0_2578.png,0,0,17,0,0
1758
+ 0_2579.png,0,0,23,0,0
1759
+ 0_258.png,0,0,4,0,27
1760
+ 0_2580.png,0,1,7,0,0
1761
+ 0_2581.png,30,0,1,0,0
1762
+ 0_2582.png,1,1,3,0,0
1763
+ 0_2583.png,0,0,30,0,0
1764
+ 0_2584.png,24,2,9,0,0
1765
+ 0_2585.png,35,0,0,0,0
1766
+ 0_2586.png,0,0,29,0,0
1767
+ 0_2587.png,0,1,25,1,0
1768
+ 0_2588.png,0,0,19,0,0
1769
+ 0_2589.png,61,1,2,2,0
1770
+ 0_259.png,0,1,5,0,19
1771
+ 0_2590.png,41,0,0,0,0
1772
+ 0_2591.png,22,1,0,0,0
1773
+ 0_2592.png,0,12,18,0,38
1774
+ 0_2593.png,0,7,27,0,32
1775
+ 0_2594.png,0,34,36,0,23
1776
+ 0_2595.png,0,0,8,0,0
1777
+ 0_2596.png,0,0,8,0,0
1778
+ 0_2597.png,0,0,4,0,0
1779
+ 0_2598.png,0,5,9,0,0
1780
+ 0_2599.png,56,0,4,0,0
1781
+ 0_26.png,4,0,7,0,0
1782
+ 0_260.png,0,0,6,0,22
1783
+ 0_2600.png,0,1,18,0,0
1784
+ 0_2601.png,15,2,0,0,0
1785
+ 0_2602.png,46,2,7,0,0
1786
+ 0_2603.png,55,0,2,0,0
1787
+ 0_2604.png,0,0,17,0,0
1788
+ 0_2605.png,0,4,21,0,0
1789
+ 0_2606.png,0,0,23,0,0
1790
+ 0_2607.png,0,0,10,0,0
1791
+ 0_2608.png,41,4,2,0,0
1792
+ 0_2609.png,0,138,28,0,0
1793
+ 0_261.png,19,1,0,0,0
1794
+ 0_2610.png,0,150,18,0,0
1795
+ 0_2611.png,0,145,20,0,0
1796
+ 0_2612.png,0,145,21,0,0
1797
+ 0_2613.png,53,0,3,16,0
1798
+ 0_2614.png,37,4,0,0,0
1799
+ 0_2615.png,0,8,49,0,0
1800
+ 0_2616.png,0,6,48,0,0
1801
+ 0_2617.png,59,3,15,0,0
1802
+ 0_2618.png,28,4,36,0,0
1803
+ 0_2619.png,0,1,0,0,42
1804
+ 0_262.png,18,0,0,0,0
1805
+ 0_2620.png,0,6,8,0,33
1806
+ 0_2621.png,0,0,1,0,32
1807
+ 0_2622.png,0,3,3,0,34
1808
+ 0_2623.png,0,0,23,0,0
1809
+ 0_2624.png,7,3,50,0,0
1810
+ 0_2625.png,1,1,51,0,0
1811
+ 0_2626.png,0,0,0,0,0
1812
+ 0_2627.png,0,4,6,0,0
1813
+ 0_2628.png,0,0,1,0,0
1814
+ 0_2629.png,0,12,10,0,0
1815
+ 0_263.png,16,0,1,0,0
1816
+ 0_2630.png,0,0,14,0,0
1817
+ 0_2631.png,0,0,1,0,0
1818
+ 0_2632.png,0,2,8,0,0
1819
+ 0_2633.png,22,1,32,0,0
1820
+ 0_2634.png,13,10,42,0,0
1821
+ 0_2635.png,34,2,13,0,0
1822
+ 0_2636.png,0,0,0,0,0
1823
+ 0_2637.png,0,0,2,0,0
1824
+ 0_2638.png,0,0,15,0,0
1825
+ 0_2639.png,0,0,0,0,0
1826
+ 0_264.png,15,0,1,0,0
1827
+ 0_2640.png,0,0,0,0,0
1828
+ 0_2641.png,0,0,3,0,0
1829
+ 0_2642.png,0,29,46,0,11
1830
+ 0_2643.png,0,15,14,0,30
1831
+ 0_2644.png,52,1,0,0,0
1832
+ 0_2645.png,36,0,10,0,0
1833
+ 0_2646.png,47,0,0,10,0
1834
+ 0_2647.png,41,0,5,0,0
1835
+ 0_2648.png,40,0,0,14,0
1836
+ 0_2649.png,19,22,36,0,0
1837
+ 0_265.png,17,0,3,0,0
1838
+ 0_2650.png,27,25,20,0,0
1839
+ 0_2651.png,61,6,11,0,0
1840
+ 0_2652.png,10,24,56,0,0
1841
+ 0_2653.png,0,19,15,0,0
1842
+ 0_2654.png,0,5,22,0,0
1843
+ 0_2655.png,0,10,30,0,0
1844
+ 0_266.png,20,0,0,0,0
1845
+ 0_267.png,19,0,0,0,0
1846
+ 0_268.png,20,0,0,0,0
1847
+ 0_269.png,11,9,5,0,0
1848
+ 0_27.png,4,0,7,0,0
1849
+ 0_270.png,17,7,1,0,0
1850
+ 0_271.png,0,14,21,0,0
1851
+ 0_272.png,2,29,14,0,0
1852
+ 0_273.png,23,0,1,0,0
1853
+ 0_274.png,16,0,1,0,0
1854
+ 0_275.png,6,7,1,0,0
1855
+ 0_276.png,19,0,0,0,0
1856
+ 0_277.png,23,0,0,0,0
1857
+ 0_278.png,16,0,3,0,0
1858
+ 0_279.png,0,4,6,0,4
1859
+ 0_28.png,6,0,10,0,0
1860
+ 0_280.png,0,1,13,0,13
1861
+ 0_281.png,0,2,12,0,25
1862
+ 0_282.png,0,5,13,0,7
1863
+ 0_283.png,12,0,7,0,0
1864
+ 0_284.png,11,2,7,0,0
1865
+ 0_285.png,20,5,3,0,0
1866
+ 0_286.png,5,13,16,0,0
1867
+ 0_287.png,5,12,11,0,0
1868
+ 0_288.png,0,22,8,0,0
1869
+ 0_289.png,0,23,5,0,0
1870
+ 0_29.png,7,0,6,0,0
1871
+ 0_290.png,2,28,14,0,0
1872
+ 0_291.png,0,30,8,0,0
1873
+ 0_292.png,1,15,7,0,0
1874
+ 0_293.png,0,7,9,0,0
1875
+ 0_294.png,0,12,9,0,0
1876
+ 0_295.png,0,45,6,0,0
1877
+ 0_296.png,26,4,2,0,0
1878
+ 0_297.png,0,1,14,0,0
1879
+ 0_298.png,11,2,9,0,0
1880
+ 0_299.png,1,1,10,0,0
1881
+ 0_3.png,10,0,0,0,0
1882
+ 0_30.png,12,0,0,0,0
1883
+ 0_300.png,3,1,8,0,0
1884
+ 0_301.png,9,2,6,0,0
1885
+ 0_302.png,10,1,1,0,0
1886
+ 0_303.png,3,0,10,0,0
1887
+ 0_304.png,0,2,9,0,0
1888
+ 0_305.png,0,1,10,0,0
1889
+ 0_306.png,6,0,7,0,0
1890
+ 0_307.png,10,0,4,0,0
1891
+ 0_308.png,11,0,8,0,0
1892
+ 0_309.png,22,0,4,0,0
1893
+ 0_31.png,0,7,16,0,0
1894
+ 0_310.png,1,5,8,0,0
1895
+ 0_311.png,0,1,7,0,15
1896
+ 0_312.png,0,1,14,0,10
1897
+ 0_313.png,0,1,14,0,9
1898
+ 0_314.png,0,0,11,0,8
1899
+ 0_315.png,0,2,10,0,18
1900
+ 0_316.png,0,0,5,0,34
1901
+ 0_317.png,0,0,15,0,14
1902
+ 0_318.png,0,1,1,0,28
1903
+ 0_319.png,0,1,3,0,20
1904
+ 0_32.png,1,3,8,0,0
1905
+ 0_320.png,0,5,0,0,31
1906
+ 0_321.png,0,6,2,0,17
1907
+ 0_322.png,16,0,0,0,0
1908
+ 0_323.png,7,3,1,0,0
1909
+ 0_324.png,19,3,4,0,0
1910
+ 0_325.png,18,4,1,0,0
1911
+ 0_326.png,15,11,2,0,0
1912
+ 0_327.png,27,3,1,0,0
1913
+ 0_328.png,24,0,0,0,0
1914
+ 0_329.png,38,0,0,0,0
1915
+ 0_33.png,11,0,1,0,0
1916
+ 0_330.png,30,0,0,0,0
1917
+ 0_331.png,26,2,3,0,0
1918
+ 0_332.png,24,3,1,0,0
1919
+ 0_333.png,28,0,5,0,0
1920
+ 0_334.png,28,1,2,0,0
1921
+ 0_335.png,0,24,17,0,0
1922
+ 0_336.png,12,12,10,0,0
1923
+ 0_337.png,4,0,1,0,0
1924
+ 0_338.png,2,0,0,0,0
1925
+ 0_339.png,9,0,1,0,0
1926
+ 0_34.png,2,8,9,0,0
1927
+ 0_340.png,7,0,0,0,0
1928
+ 0_341.png,8,0,0,0,0
1929
+ 0_342.png,0,2,11,0,0
1930
+ 0_343.png,0,6,10,0,0
1931
+ 0_344.png,0,6,5,0,0
1932
+ 0_345.png,0,4,12,0,0
1933
+ 0_346.png,2,0,5,0,0
1934
+ 0_347.png,7,1,3,0,0
1935
+ 0_348.png,5,2,2,0,0
1936
+ 0_349.png,7,0,2,0,0
1937
+ 0_35.png,6,2,3,0,0
1938
+ 0_350.png,23,0,0,0,0
1939
+ 0_351.png,22,0,0,0,0
1940
+ 0_352.png,33,0,0,0,0
1941
+ 0_353.png,33,0,0,0,0
1942
+ 0_354.png,33,0,0,0,0
1943
+ 0_355.png,26,0,0,0,0
1944
+ 0_356.png,3,4,7,0,0
1945
+ 0_357.png,1,0,8,0,0
1946
+ 0_358.png,18,0,2,0,0
1947
+ 0_359.png,1,0,3,0,0
1948
+ 0_36.png,8,0,5,0,0
1949
+ 0_360.png,12,2,4,0,0
1950
+ 0_361.png,8,0,0,0,0
1951
+ 0_362.png,6,1,5,0,0
1952
+ 0_363.png,12,0,1,0,0
1953
+ 0_364.png,12,0,1,0,0
1954
+ 0_365.png,5,0,9,0,0
1955
+ 0_366.png,11,0,4,0,0
1956
+ 0_367.png,19,0,2,0,0
1957
+ 0_368.png,17,0,4,0,0
1958
+ 0_369.png,18,0,1,0,0
1959
+ 0_37.png,2,1,8,0,0
1960
+ 0_370.png,12,0,2,0,0
1961
+ 0_371.png,12,0,0,0,0
1962
+ 0_372.png,7,0,5,0,0
1963
+ 0_373.png,14,0,0,0,0
1964
+ 0_374.png,24,0,0,0,0
1965
+ 0_375.png,11,0,1,0,0
1966
+ 0_376.png,9,1,3,0,0
1967
+ 0_377.png,14,0,2,0,0
1968
+ 0_378.png,11,0,0,0,0
1969
+ 0_379.png,9,0,2,0,0
1970
+ 0_38.png,0,1,6,0,0
1971
+ 0_380.png,11,0,1,0,0
1972
+ 0_381.png,11,0,2,0,0
1973
+ 0_382.png,12,0,0,0,0
1974
+ 0_383.png,12,1,5,0,0
1975
+ 0_384.png,21,1,7,0,0
1976
+ 0_385.png,18,10,6,0,0
1977
+ 0_386.png,18,7,15,0,0
1978
+ 0_387.png,9,1,2,0,0
1979
+ 0_388.png,27,1,0,0,0
1980
+ 0_389.png,5,10,7,0,0
1981
+ 0_39.png,10,0,4,0,0
1982
+ 0_390.png,18,3,0,0,0
1983
+ 0_391.png,9,3,3,0,0
1984
+ 0_392.png,5,10,0,0,0
1985
+ 0_393.png,2,7,9,0,0
1986
+ 0_394.png,9,1,3,0,0
1987
+ 0_395.png,0,6,10,0,0
1988
+ 0_396.png,0,4,4,0,0
1989
+ 0_397.png,0,8,11,0,0
1990
+ 0_398.png,10,2,2,0,0
1991
+ 0_399.png,5,14,2,0,0
1992
+ 0_4.png,3,2,2,0,0
1993
+ 0_40.png,3,1,8,0,0
1994
+ 0_400.png,15,2,1,0,0
1995
+ 0_401.png,7,6,5,0,0
1996
+ 0_402.png,2,3,7,0,0
1997
+ 0_403.png,0,12,9,0,0
1998
+ 0_404.png,9,0,0,0,0
1999
+ 0_405.png,11,0,2,0,0
2000
+ 0_406.png,13,1,1,0,0
2001
+ 0_407.png,4,2,2,0,0
2002
+ 0_408.png,7,2,9,0,0
2003
+ 0_409.png,8,0,7,0,0
2004
+ 0_41.png,0,1,8,0,0
2005
+ 0_410.png,14,1,2,0,0
2006
+ 0_411.png,9,0,12,0,0
2007
+ 0_412.png,0,1,7,0,0
2008
+ 0_413.png,2,1,12,0,0
2009
+ 0_414.png,24,0,2,0,0
2010
+ 0_415.png,15,0,2,0,0
2011
+ 0_416.png,17,1,3,0,0
2012
+ 0_417.png,11,1,1,0,0
2013
+ 0_418.png,0,0,3,0,0
2014
+ 0_419.png,10,3,1,0,1
2015
+ 0_42.png,9,0,5,0,0
2016
+ 0_420.png,16,1,4,0,0
2017
+ 0_421.png,19,0,2,0,0
2018
+ 0_422.png,20,0,1,0,0
2019
+ 0_423.png,6,0,5,0,0
2020
+ 0_424.png,24,0,0,0,0
2021
+ 0_425.png,19,2,0,0,0
2022
+ 0_426.png,17,0,1,0,0
2023
+ 0_427.png,25,2,1,0,0
2024
+ 0_428.png,14,7,8,0,0
2025
+ 0_429.png,16,0,9,0,0
2026
+ 0_43.png,2,0,9,0,0
2027
+ 0_430.png,27,3,4,0,0
2028
+ 0_431.png,10,2,13,0,0
2029
+ 0_432.png,28,0,4,0,0
2030
+ 0_433.png,20,0,0,0,0
2031
+ 0_434.png,26,0,0,0,0
2032
+ 0_435.png,24,0,0,0,0
2033
+ 0_436.png,16,0,1,0,0
2034
+ 0_437.png,21,0,2,0,0
2035
+ 0_438.png,24,0,4,0,0
2036
+ 0_439.png,22,0,1,0,0
2037
+ 0_44.png,2,0,7,0,0
2038
+ 0_440.png,13,0,1,0,0
2039
+ 0_441.png,0,2,9,0,0
2040
+ 0_442.png,4,4,7,0,0
2041
+ 0_443.png,3,1,6,0,0
2042
+ 0_444.png,3,0,18,0,0
2043
+ 0_445.png,0,0,6,0,0
2044
+ 0_446.png,4,3,13,0,0
2045
+ 0_447.png,2,3,6,0,0
2046
+ 0_448.png,6,2,4,0,0
2047
+ 0_449.png,10,2,3,0,0
2048
+ 0_45.png,0,1,2,0,19
2049
+ 0_450.png,7,1,9,0,0
2050
+ 0_451.png,5,2,8,0,0
2051
+ 0_452.png,6,5,8,0,0
2052
+ 0_453.png,5,1,12,0,0
2053
+ 0_454.png,7,5,8,0,0
2054
+ 0_455.png,6,3,9,0,0
2055
+ 0_456.png,0,1,12,0,0
2056
+ 0_457.png,3,3,4,0,0
2057
+ 0_458.png,5,3,11,0,0
2058
+ 0_459.png,16,1,0,0,0
2059
+ 0_46.png,0,1,7,0,21
2060
+ 0_460.png,7,2,3,0,0
2061
+ 0_461.png,3,2,6,0,0
2062
+ 0_462.png,0,2,10,0,0
2063
+ 0_463.png,5,1,2,0,0
2064
+ 0_464.png,6,2,1,0,0
2065
+ 0_465.png,0,0,2,0,0
2066
+ 0_466.png,1,1,5,0,0
2067
+ 0_467.png,5,4,8,0,0
2068
+ 0_468.png,0,0,5,0,0
2069
+ 0_469.png,1,0,6,0,0
2070
+ 0_47.png,0,1,8,0,14
2071
+ 0_470.png,15,3,5,0,0
2072
+ 0_471.png,1,5,8,0,0
2073
+ 0_472.png,0,5,2,0,0
2074
+ 0_473.png,12,3,2,0,0
2075
+ 0_474.png,0,0,15,0,0
2076
+ 0_475.png,2,12,8,0,0
2077
+ 0_476.png,3,2,3,0,0
2078
+ 0_477.png,7,1,7,0,0
2079
+ 0_478.png,10,1,2,0,0
2080
+ 0_479.png,11,1,2,0,0
2081
+ 0_48.png,0,1,4,0,1
2082
+ 0_480.png,11,2,6,0,0
2083
+ 0_481.png,8,1,3,0,0
2084
+ 0_482.png,0,4,10,0,0
2085
+ 0_483.png,9,1,8,0,0
2086
+ 0_484.png,7,3,6,0,0
2087
+ 0_485.png,3,2,10,0,0
2088
+ 0_486.png,7,3,9,0,0
2089
+ 0_487.png,8,3,8,0,0
2090
+ 0_488.png,11,0,3,0,0
2091
+ 0_489.png,11,1,2,0,0
2092
+ 0_49.png,0,0,1,0,28
2093
+ 0_490.png,19,0,3,0,0
2094
+ 0_491.png,21,0,0,0,0
2095
+ 0_492.png,5,5,4,0,0
2096
+ 0_493.png,3,1,8,0,0
2097
+ 0_494.png,6,0,6,0,0
2098
+ 0_495.png,8,0,2,0,0
2099
+ 0_496.png,13,0,2,0,0
2100
+ 0_497.png,20,1,1,0,0
2101
+ 0_498.png,9,0,3,0,0
2102
+ 0_499.png,15,0,1,0,0
2103
+ 0_5.png,2,1,4,0,0
2104
+ 0_50.png,0,0,2,0,13
2105
+ 0_500.png,10,1,5,0,0
2106
+ 0_501.png,0,4,8,0,0
2107
+ 0_502.png,13,1,5,0,0
2108
+ 0_503.png,0,2,10,0,0
2109
+ 0_504.png,8,0,1,0,0
2110
+ 0_505.png,8,0,5,0,0
2111
+ 0_506.png,3,2,9,0,0
2112
+ 0_507.png,26,2,0,0,0
2113
+ 0_508.png,2,5,20,0,0
2114
+ 0_509.png,35,1,1,0,0
2115
+ 0_51.png,4,0,9,0,0
2116
+ 0_510.png,27,0,2,0,0
2117
+ 0_511.png,26,0,0,0,0
2118
+ 0_512.png,6,0,9,0,0
2119
+ 0_513.png,26,0,1,0,0
2120
+ 0_514.png,22,4,4,0,0
2121
+ 0_515.png,8,3,3,0,0
2122
+ 0_516.png,16,1,1,0,0
2123
+ 0_517.png,15,0,4,0,0
2124
+ 0_518.png,11,0,3,0,0
2125
+ 0_519.png,10,1,6,0,0
2126
+ 0_52.png,3,0,1,0,0
2127
+ 0_520.png,5,5,10,0,0
2128
+ 0_521.png,19,0,2,0,0
2129
+ 0_522.png,21,0,0,0,0
2130
+ 0_523.png,18,1,2,0,0
2131
+ 0_524.png,19,6,2,0,0
2132
+ 0_525.png,17,4,1,0,0
2133
+ 0_526.png,21,1,0,0,0
2134
+ 0_527.png,30,0,1,0,0
2135
+ 0_528.png,42,1,0,0,0
2136
+ 0_529.png,25,0,1,0,0
2137
+ 0_53.png,29,0,0,0,0
2138
+ 0_530.png,26,0,0,0,0
2139
+ 0_531.png,29,0,0,0,0
2140
+ 0_532.png,26,3,0,0,0
2141
+ 0_533.png,2,6,17,0,0
2142
+ 0_534.png,13,1,10,0,0
2143
+ 0_535.png,13,1,3,0,0
2144
+ 0_536.png,13,4,5,0,0
2145
+ 0_537.png,0,16,10,0,0
2146
+ 0_538.png,0,13,9,0,0
2147
+ 0_539.png,0,17,7,0,0
2148
+ 0_54.png,4,1,4,0,0
2149
+ 0_540.png,23,0,0,0,0
2150
+ 0_541.png,0,13,7,0,0
2151
+ 0_542.png,23,1,3,0,0
2152
+ 0_543.png,19,8,0,0,0
2153
+ 0_544.png,19,4,5,0,0
2154
+ 0_545.png,25,1,5,0,0
2155
+ 0_546.png,26,2,0,0,0
2156
+ 0_547.png,25,2,1,0,0
2157
+ 0_548.png,16,1,7,0,0
2158
+ 0_549.png,11,5,5,0,0
2159
+ 0_55.png,15,0,5,0,0
2160
+ 0_550.png,16,2,0,0,0
2161
+ 0_551.png,16,0,0,0,0
2162
+ 0_552.png,4,1,16,0,0
2163
+ 0_553.png,12,10,4,0,0
2164
+ 0_554.png,24,0,0,0,0
2165
+ 0_555.png,25,0,1,0,0
2166
+ 0_556.png,10,4,8,0,0
2167
+ 0_557.png,11,3,11,0,0
2168
+ 0_558.png,18,0,0,0,0
2169
+ 0_559.png,12,2,5,0,0
2170
+ 0_56.png,4,0,12,0,0
2171
+ 0_560.png,2,21,12,0,0
2172
+ 0_561.png,14,3,6,0,0
2173
+ 0_562.png,14,1,7,0,0
2174
+ 0_563.png,20,0,2,0,0
2175
+ 0_564.png,2,0,7,0,0
2176
+ 0_565.png,0,1,2,0,28
2177
+ 0_566.png,0,0,8,0,0
2178
+ 0_567.png,0,2,1,0,9
2179
+ 0_568.png,0,0,1,0,17
2180
+ 0_569.png,0,0,4,0,11
2181
+ 0_57.png,0,2,13,0,0
2182
+ 0_570.png,0,2,5,0,9
2183
+ 0_571.png,0,2,2,0,25
2184
+ 0_572.png,0,4,9,0,12
2185
+ 0_573.png,0,5,1,0,22
2186
+ 0_574.png,0,1,1,0,32
2187
+ 0_575.png,19,0,0,0,0
2188
+ 0_576.png,20,0,0,0,0
2189
+ 0_577.png,15,1,1,0,0
2190
+ 0_578.png,20,0,2,0,0
2191
+ 0_579.png,15,0,0,0,0
2192
+ 0_58.png,0,0,1,0,30
2193
+ 0_580.png,20,0,1,0,0
2194
+ 0_581.png,11,0,0,0,0
2195
+ 0_582.png,0,0,5,0,0
2196
+ 0_583.png,0,1,2,0,0
2197
+ 0_584.png,0,0,0,0,0
2198
+ 0_585.png,19,0,0,0,0
2199
+ 0_586.png,0,0,0,0,0
2200
+ 0_587.png,10,0,6,0,0
2201
+ 0_588.png,7,0,9,0,0
2202
+ 0_589.png,10,0,1,0,0
2203
+ 0_59.png,0,2,2,0,23
2204
+ 0_590.png,11,0,2,0,0
2205
+ 0_591.png,42,0,1,0,0
2206
+ 0_592.png,1,0,3,0,0
2207
+ 0_593.png,21,1,0,0,0
2208
+ 0_594.png,11,0,3,0,0
2209
+ 0_595.png,31,0,0,0,0
2210
+ 0_596.png,32,0,0,0,0
2211
+ 0_597.png,19,0,0,0,0
2212
+ 0_598.png,45,1,0,0,0
2213
+ 0_599.png,3,2,1,0,0
2214
+ 0_6.png,5,2,2,0,0
2215
+ 0_60.png,0,0,6,0,18
2216
+ 0_600.png,42,0,0,0,0
2217
+ 0_601.png,15,3,0,0,0
2218
+ 0_602.png,34,0,0,0,0
2219
+ 0_603.png,52,2,0,0,0
2220
+ 0_604.png,0,0,0,0,0
2221
+ 0_605.png,43,0,0,0,0
2222
+ 0_606.png,33,0,0,0,0
2223
+ 0_607.png,11,1,1,0,0
2224
+ 0_608.png,12,1,0,0,0
2225
+ 0_609.png,2,0,2,0,0
2226
+ 0_61.png,0,0,0,0,40
2227
+ 0_610.png,46,2,0,0,0
2228
+ 0_611.png,35,2,0,0,0
2229
+ 0_612.png,12,1,1,0,0
2230
+ 0_613.png,58,0,0,0,0
2231
+ 0_614.png,35,2,1,0,0
2232
+ 0_615.png,54,2,2,0,0
2233
+ 0_616.png,17,1,1,0,0
2234
+ 0_617.png,8,1,1,0,0
2235
+ 0_618.png,16,0,1,0,0
2236
+ 0_619.png,9,2,6,0,0
2237
+ 0_62.png,0,1,8,0,9
2238
+ 0_620.png,0,0,4,0,37
2239
+ 0_621.png,0,1,1,0,46
2240
+ 0_622.png,0,0,2,0,48
2241
+ 0_623.png,0,2,2,0,34
2242
+ 0_624.png,0,4,2,0,34
2243
+ 0_625.png,0,1,6,0,32
2244
+ 0_626.png,0,6,13,0,0
2245
+ 0_627.png,2,2,3,0,0
2246
+ 0_628.png,2,5,6,0,0
2247
+ 0_629.png,1,3,8,0,0
2248
+ 0_63.png,0,0,9,0,11
2249
+ 0_630.png,1,1,5,0,0
2250
+ 0_631.png,3,1,6,0,0
2251
+ 0_632.png,0,5,2,0,0
2252
+ 0_633.png,0,0,3,0,0
2253
+ 0_634.png,0,8,5,0,0
2254
+ 0_635.png,0,4,2,0,0
2255
+ 0_636.png,7,0,5,0,0
2256
+ 0_637.png,2,0,3,0,0
2257
+ 0_638.png,11,1,6,0,0
2258
+ 0_639.png,0,0,4,0,0
2259
+ 0_64.png,0,0,0,0,41
2260
+ 0_640.png,0,12,6,0,0
2261
+ 0_641.png,2,28,3,0,0
2262
+ 0_642.png,1,18,8,0,0
2263
+ 0_643.png,2,3,6,0,0
2264
+ 0_644.png,0,0,1,0,0
2265
+ 0_645.png,2,15,2,0,0
2266
+ 0_646.png,3,6,6,0,0
2267
+ 0_647.png,0,36,2,0,0
2268
+ 0_648.png,0,29,5,0,0
2269
+ 0_649.png,4,19,7,0,0
2270
+ 0_65.png,0,0,14,0,7
2271
+ 0_650.png,14,3,0,0,0
2272
+ 0_651.png,0,30,2,0,0
2273
+ 0_652.png,3,10,12,0,0
2274
+ 0_653.png,15,0,0,0,0
2275
+ 0_654.png,17,0,0,0,0
2276
+ 0_655.png,0,34,4,0,0
2277
+ 0_656.png,0,19,3,0,0
2278
+ 0_657.png,0,12,7,0,0
2279
+ 0_658.png,0,6,3,0,0
2280
+ 0_659.png,0,2,5,0,0
2281
+ 0_66.png,0,0,2,0,39
2282
+ 0_660.png,0,6,4,0,0
2283
+ 0_661.png,19,1,1,0,0
2284
+ 0_662.png,3,8,6,0,0
2285
+ 0_663.png,1,22,13,0,0
2286
+ 0_664.png,7,10,3,0,0
2287
+ 0_665.png,6,12,6,0,0
2288
+ 0_666.png,8,1,3,0,0
2289
+ 0_667.png,11,3,6,0,0
2290
+ 0_668.png,14,1,3,0,0
2291
+ 0_669.png,11,4,1,0,0
2292
+ 0_67.png,0,2,2,0,33
2293
+ 0_670.png,4,5,13,0,0
2294
+ 0_671.png,24,1,0,0,0
2295
+ 0_672.png,25,0,2,0,0
2296
+ 0_673.png,33,0,0,0,0
2297
+ 0_674.png,32,0,0,0,0
2298
+ 0_675.png,12,1,6,0,0
2299
+ 0_676.png,0,1,2,0,48
2300
+ 0_677.png,0,2,3,0,42
2301
+ 0_678.png,0,0,7,0,22
2302
+ 0_679.png,0,1,5,0,42
2303
+ 0_68.png,0,0,5,0,20
2304
+ 0_680.png,20,1,1,0,0
2305
+ 0_681.png,21,0,0,0,0
2306
+ 0_682.png,23,4,0,0,0
2307
+ 0_683.png,12,1,0,0,0
2308
+ 0_684.png,0,32,7,0,0
2309
+ 0_685.png,0,15,12,0,0
2310
+ 0_686.png,7,23,8,0,0
2311
+ 0_687.png,8,7,1,0,0
2312
+ 0_688.png,1,28,7,0,0
2313
+ 0_689.png,7,2,1,0,0
2314
+ 0_69.png,0,1,4,0,13
2315
+ 0_690.png,0,35,5,0,0
2316
+ 0_691.png,3,4,9,0,0
2317
+ 0_692.png,1,12,5,0,0
2318
+ 0_693.png,1,13,15,0,0
2319
+ 0_694.png,10,0,2,0,0
2320
+ 0_695.png,9,0,2,0,0
2321
+ 0_696.png,13,1,3,0,0
2322
+ 0_697.png,7,2,5,0,0
2323
+ 0_698.png,13,0,1,0,0
2324
+ 0_699.png,8,2,6,0,0
2325
+ 0_7.png,8,0,2,0,0
2326
+ 0_70.png,0,0,8,0,15
2327
+ 0_700.png,6,2,5,0,0
2328
+ 0_701.png,13,0,0,0,0
2329
+ 0_702.png,0,0,9,0,0
2330
+ 0_703.png,8,0,1,0,0
2331
+ 0_704.png,12,0,1,0,0
2332
+ 0_705.png,13,0,2,0,0
2333
+ 0_706.png,6,1,2,0,0
2334
+ 0_707.png,12,0,7,0,0
2335
+ 0_708.png,10,0,0,0,0
2336
+ 0_709.png,9,0,1,0,0
2337
+ 0_71.png,0,1,13,0,22
2338
+ 0_710.png,9,0,2,0,0
2339
+ 0_711.png,0,1,5,0,0
2340
+ 0_712.png,14,1,1,0,0
2341
+ 0_713.png,14,0,1,0,0
2342
+ 0_714.png,15,0,3,0,0
2343
+ 0_715.png,6,1,0,0,0
2344
+ 0_716.png,11,0,1,0,0
2345
+ 0_717.png,23,0,0,0,0
2346
+ 0_718.png,9,2,5,0,0
2347
+ 0_719.png,11,0,3,0,0
2348
+ 0_72.png,0,0,3,0,47
2349
+ 0_720.png,13,1,1,0,0
2350
+ 0_721.png,4,0,4,0,0
2351
+ 0_722.png,17,0,0,0,0
2352
+ 0_723.png,17,0,2,0,0
2353
+ 0_724.png,8,3,2,0,0
2354
+ 0_725.png,13,0,1,0,0
2355
+ 0_726.png,14,0,1,0,0
2356
+ 0_727.png,4,1,3,0,0
2357
+ 0_728.png,12,0,2,0,0
2358
+ 0_729.png,9,0,2,0,0
2359
+ 0_73.png,2,0,4,0,0
2360
+ 0_730.png,11,0,1,0,0
2361
+ 0_731.png,14,0,2,0,0
2362
+ 0_732.png,6,1,2,0,0
2363
+ 0_733.png,8,0,5,0,0
2364
+ 0_734.png,10,4,8,0,0
2365
+ 0_735.png,9,0,11,0,0
2366
+ 0_736.png,7,1,10,0,0
2367
+ 0_737.png,5,0,9,0,0
2368
+ 0_738.png,8,16,20,0,0
2369
+ 0_739.png,16,8,7,0,0
2370
+ 0_74.png,1,0,4,0,0
2371
+ 0_740.png,9,19,12,0,0
2372
+ 0_741.png,35,0,1,0,0
2373
+ 0_742.png,27,1,2,0,0
2374
+ 0_743.png,32,0,2,0,0
2375
+ 0_744.png,41,2,3,0,0
2376
+ 0_745.png,0,9,1,0,0
2377
+ 0_746.png,0,4,7,0,0
2378
+ 0_747.png,0,4,6,0,0
2379
+ 0_748.png,0,0,0,0,0
2380
+ 0_749.png,0,24,2,0,0
2381
+ 0_75.png,10,0,2,0,0
2382
+ 0_750.png,0,0,0,0,0
2383
+ 0_751.png,0,15,10,0,0
2384
+ 0_752.png,0,1,1,0,0
2385
+ 0_753.png,0,17,6,0,0
2386
+ 0_754.png,27,1,1,0,0
2387
+ 0_755.png,28,0,0,0,0
2388
+ 0_756.png,24,0,1,0,0
2389
+ 0_757.png,4,0,4,0,0
2390
+ 0_758.png,27,0,0,0,0
2391
+ 0_759.png,36,1,0,0,0
2392
+ 0_76.png,7,1,3,0,0
2393
+ 0_760.png,4,11,14,0,0
2394
+ 0_761.png,0,4,9,0,0
2395
+ 0_762.png,0,1,16,0,0
2396
+ 0_763.png,0,8,11,0,0
2397
+ 0_764.png,0,10,5,0,0
2398
+ 0_765.png,0,9,9,0,0
2399
+ 0_766.png,35,2,0,0,0
2400
+ 0_767.png,31,1,0,0,0
2401
+ 0_768.png,45,2,0,0,0
2402
+ 0_769.png,35,9,0,0,0
2403
+ 0_77.png,0,3,6,0,0
2404
+ 0_770.png,20,0,4,0,0
2405
+ 0_771.png,17,2,5,0,0
2406
+ 0_772.png,39,1,2,0,0
2407
+ 0_773.png,31,2,5,0,0
2408
+ 0_774.png,0,8,2,0,0
2409
+ 0_775.png,3,7,1,0,0
2410
+ 0_776.png,0,6,1,7,0
2411
+ 0_777.png,0,0,3,0,0
2412
+ 0_778.png,0,1,2,0,0
2413
+ 0_779.png,0,0,1,0,0
2414
+ 0_78.png,7,0,2,0,0
2415
+ 0_780.png,0,0,0,0,0
2416
+ 0_781.png,25,1,0,0,0
2417
+ 0_782.png,11,11,3,8,0
2418
+ 0_783.png,20,1,2,0,0
2419
+ 0_784.png,20,11,8,10,0
2420
+ 0_785.png,35,0,2,0,0
2421
+ 0_786.png,50,5,6,0,0
2422
+ 0_787.png,15,0,0,0,0
2423
+ 0_788.png,18,0,1,0,0
2424
+ 0_789.png,19,0,2,0,0
2425
+ 0_79.png,0,3,9,0,0
2426
+ 0_790.png,16,0,0,1,0
2427
+ 0_791.png,30,0,1,0,0
2428
+ 0_792.png,12,0,0,0,0
2429
+ 0_793.png,20,0,1,0,0
2430
+ 0_794.png,8,1,1,0,0
2431
+ 0_795.png,24,0,0,0,0
2432
+ 0_796.png,26,0,0,0,0
2433
+ 0_797.png,0,0,18,0,0
2434
+ 0_798.png,0,3,18,0,0
2435
+ 0_799.png,0,0,23,0,0
2436
+ 0_8.png,2,1,8,0,0
2437
+ 0_80.png,0,2,2,0,0
2438
+ 0_800.png,0,0,13,0,0
2439
+ 0_801.png,0,1,25,0,0
2440
+ 0_802.png,0,3,11,0,0
2441
+ 0_803.png,0,4,11,0,0
2442
+ 0_804.png,0,1,4,0,0
2443
+ 0_805.png,0,6,7,0,0
2444
+ 0_806.png,0,7,16,0,0
2445
+ 0_807.png,0,2,14,0,0
2446
+ 0_808.png,0,1,1,0,0
2447
+ 0_809.png,0,0,1,0,0
2448
+ 0_81.png,2,0,8,0,0
2449
+ 0_810.png,0,1,1,0,0
2450
+ 0_811.png,0,0,0,0,0
2451
+ 0_812.png,0,0,0,0,0
2452
+ 0_813.png,0,0,0,0,0
2453
+ 0_814.png,0,1,1,0,0
2454
+ 0_815.png,0,5,4,0,0
2455
+ 0_816.png,0,2,0,0,0
2456
+ 0_817.png,0,106,2,0,0
2457
+ 0_818.png,0,75,17,0,0
2458
+ 0_819.png,0,114,4,0,0
2459
+ 0_82.png,4,0,7,0,0
2460
+ 0_820.png,0,0,2,0,0
2461
+ 0_821.png,0,1,1,0,0
2462
+ 0_822.png,2,1,7,0,0
2463
+ 0_823.png,20,0,0,0,0
2464
+ 0_824.png,17,1,0,0,0
2465
+ 0_825.png,0,105,0,0,0
2466
+ 0_826.png,0,76,9,0,0
2467
+ 0_827.png,0,82,11,0,0
2468
+ 0_828.png,0,0,0,0,0
2469
+ 0_829.png,0,0,1,0,0
2470
+ 0_83.png,0,0,2,0,23
2471
+ 0_830.png,0,0,0,0,0
2472
+ 0_831.png,0,0,1,0,0
2473
+ 0_832.png,0,0,0,0,0
2474
+ 0_833.png,0,0,0,0,0
2475
+ 0_834.png,15,0,19,0,0
2476
+ 0_835.png,34,0,14,0,0
2477
+ 0_836.png,4,0,43,0,0
2478
+ 0_837.png,0,4,2,0,0
2479
+ 0_838.png,0,3,9,0,0
2480
+ 0_839.png,0,4,6,0,0
2481
+ 0_84.png,0,0,12,0,37
2482
+ 0_840.png,4,9,7,0,0
2483
+ 0_841.png,0,6,12,0,0
2484
+ 0_842.png,0,23,7,0,0
2485
+ 0_843.png,0,14,22,0,0
2486
+ 0_844.png,0,78,2,0,0
2487
+ 0_845.png,0,81,5,0,0
2488
+ 0_846.png,0,10,17,0,0
2489
+ 0_847.png,0,3,21,0,0
2490
+ 0_848.png,0,10,21,0,0
2491
+ 0_849.png,0,3,19,0,0
2492
+ 0_85.png,0,0,11,0,31
2493
+ 0_850.png,0,2,22,0,0
2494
+ 0_851.png,19,2,0,0,0
2495
+ 0_852.png,20,0,0,9,0
2496
+ 0_853.png,32,0,1,0,0
2497
+ 0_854.png,0,3,18,0,0
2498
+ 0_855.png,3,2,15,0,0
2499
+ 0_856.png,11,2,6,0,0
2500
+ 0_857.png,0,4,18,0,0
2501
+ 0_858.png,20,1,10,0,0
2502
+ 0_859.png,12,6,21,0,0
2503
+ 0_86.png,0,1,0,0,65
2504
+ 0_860.png,24,1,3,0,0
2505
+ 0_861.png,34,0,3,0,0
2506
+ 0_862.png,19,0,2,0,0
2507
+ 0_863.png,13,1,11,0,0
2508
+ 0_864.png,0,6,31,0,0
2509
+ 0_865.png,15,3,2,0,0
2510
+ 0_866.png,34,6,9,0,0
2511
+ 0_867.png,19,0,15,0,0
2512
+ 0_868.png,11,0,14,0,0
2513
+ 0_869.png,28,0,0,0,0
2514
+ 0_87.png,0,0,9,0,19
2515
+ 0_870.png,29,0,6,0,0
2516
+ 0_871.png,35,3,0,0,0
2517
+ 0_872.png,26,1,7,0,0
2518
+ 0_873.png,18,0,0,0,0
2519
+ 0_874.png,27,1,10,0,0
2520
+ 0_875.png,37,0,0,0,0
2521
+ 0_876.png,30,0,0,0,0
2522
+ 0_877.png,1,10,17,0,0
2523
+ 0_878.png,10,3,15,0,0
2524
+ 0_879.png,36,0,2,0,0
2525
+ 0_88.png,0,0,3,0,37
2526
+ 0_880.png,27,6,3,0,0
2527
+ 0_881.png,29,0,5,0,0
2528
+ 0_882.png,18,5,10,0,0
2529
+ 0_883.png,21,0,5,0,0
2530
+ 0_884.png,23,0,0,0,0
2531
+ 0_885.png,13,0,11,0,0
2532
+ 0_886.png,22,9,8,0,0
2533
+ 0_887.png,20,0,1,0,0
2534
+ 0_888.png,14,0,4,0,0
2535
+ 0_889.png,37,3,1,0,0
2536
+ 0_89.png,0,0,11,0,12
2537
+ 0_890.png,32,4,6,0,0
2538
+ 0_891.png,29,2,5,0,0
2539
+ 0_892.png,10,24,17,0,0
2540
+ 0_893.png,16,1,8,0,0
2541
+ 0_894.png,34,0,2,0,0
2542
+ 0_895.png,14,2,15,0,0
2543
+ 0_896.png,30,1,8,0,0
2544
+ 0_897.png,0,14,13,0,0
2545
+ 0_898.png,9,0,5,0,0
2546
+ 0_899.png,13,4,1,0,0
2547
+ 0_9.png,5,1,7,0,0
2548
+ 0_90.png,0,0,0,0,52
2549
+ 0_900.png,0,6,9,0,18
2550
+ 0_901.png,1,2,21,0,3
2551
+ 0_902.png,0,5,9,0,31
2552
+ 0_903.png,28,1,4,0,0
2553
+ 0_904.png,20,6,11,0,0
2554
+ 0_905.png,44,14,5,0,0
2555
+ 0_906.png,43,3,5,0,0
2556
+ 0_907.png,27,0,0,0,0
2557
+ 0_908.png,37,0,2,0,0
2558
+ 0_909.png,24,0,3,0,0
2559
+ 0_91.png,0,0,1,0,30
2560
+ 0_910.png,25,1,7,0,0
2561
+ 0_911.png,41,0,0,0,0
2562
+ 0_912.png,22,0,1,0,0
2563
+ 0_913.png,14,0,9,0,0
2564
+ 0_914.png,10,1,6,0,0
2565
+ 0_915.png,18,1,2,0,0
2566
+ 0_916.png,18,0,5,0,0
2567
+ 0_917.png,13,0,1,0,0
2568
+ 0_918.png,22,1,9,0,0
2569
+ 0_919.png,33,0,4,0,0
2570
+ 0_92.png,0,0,1,0,37
2571
+ 0_920.png,19,0,3,0,0
2572
+ 0_921.png,24,0,0,0,0
2573
+ 0_922.png,22,0,5,0,0
2574
+ 0_923.png,19,1,0,0,0
2575
+ 0_924.png,1,14,10,0,0
2576
+ 0_925.png,8,3,3,0,0
2577
+ 0_926.png,15,0,3,0,0
2578
+ 0_927.png,16,0,0,0,0
2579
+ 0_928.png,14,4,8,0,0
2580
+ 0_929.png,7,1,5,0,0
2581
+ 0_93.png,4,1,7,0,0
2582
+ 0_930.png,3,2,15,0,23
2583
+ 0_931.png,1,0,6,0,40
2584
+ 0_932.png,20,1,2,0,4
2585
+ 0_933.png,33,0,2,0,0
2586
+ 0_934.png,29,0,6,0,0
2587
+ 0_935.png,26,0,2,0,0
2588
+ 0_936.png,33,0,1,0,0
2589
+ 0_937.png,9,0,10,0,0
2590
+ 0_938.png,18,0,7,0,0
2591
+ 0_939.png,17,0,6,0,0
2592
+ 0_94.png,5,0,9,0,0
2593
+ 0_940.png,15,0,10,0,0
2594
+ 0_941.png,30,2,1,0,0
2595
+ 0_942.png,39,0,0,0,0
2596
+ 0_943.png,30,1,3,0,0
2597
+ 0_944.png,31,5,2,0,0
2598
+ 0_945.png,29,1,5,0,0
2599
+ 0_946.png,20,2,9,0,0
2600
+ 0_947.png,39,0,4,0,0
2601
+ 0_948.png,10,0,7,0,0
2602
+ 0_949.png,20,0,6,0,0
2603
+ 0_95.png,24,1,0,0,0
2604
+ 0_950.png,20,0,6,0,0
2605
+ 0_951.png,19,0,8,0,0
2606
+ 0_952.png,38,0,2,0,0
2607
+ 0_953.png,65,3,1,0,0
2608
+ 0_954.png,54,17,2,0,0
2609
+ 0_955.png,45,12,0,0,0
2610
+ 0_956.png,27,34,4,0,0
2611
+ 0_957.png,90,0,0,0,0
2612
+ 0_958.png,100,0,0,0,0
2613
+ 0_959.png,119,1,0,0,0
2614
+ 0_96.png,14,0,2,0,0
2615
+ 0_960.png,91,1,0,0,0
2616
+ 0_961.png,50,29,36,0,0
2617
+ 0_962.png,0,112,1,0,0
2618
+ 0_963.png,26,0,0,0,0
2619
+ 0_964.png,30,1,1,0,0
2620
+ 0_965.png,10,0,0,9,0
2621
+ 0_966.png,0,26,23,0,0
2622
+ 0_967.png,0,72,6,0,0
2623
+ 0_968.png,0,91,3,0,0
2624
+ 0_969.png,4,0,7,0,0
2625
+ 0_97.png,13,1,11,0,0
2626
+ 0_970.png,0,0,14,0,0
2627
+ 0_971.png,14,0,0,0,0
2628
+ 0_972.png,3,0,11,0,0
2629
+ 0_973.png,9,0,4,0,0
2630
+ 0_974.png,2,0,7,0,0
2631
+ 0_975.png,13,6,14,0,0
2632
+ 0_976.png,14,1,6,0,0
2633
+ 0_977.png,28,1,2,0,0
2634
+ 0_978.png,34,0,0,0,0
2635
+ 0_979.png,39,0,0,0,0
2636
+ 0_98.png,10,2,4,0,0
2637
+ 0_980.png,56,0,0,0,0
2638
+ 0_981.png,56,0,1,0,0
2639
+ 0_982.png,38,0,0,0,0
2640
+ 0_983.png,45,0,2,0,0
2641
+ 0_984.png,53,1,0,0,0
2642
+ 0_985.png,48,1,0,0,0
2643
+ 0_986.png,43,0,0,0,0
2644
+ 0_987.png,49,0,1,0,0
2645
+ 0_988.png,46,0,0,0,0
2646
+ 0_989.png,48,0,0,0,0
2647
+ 0_99.png,2,5,8,0,0
2648
+ 0_990.png,46,0,0,0,0
2649
+ 0_991.png,53,0,1,0,0
2650
+ 0_992.png,41,0,0,0,0
2651
+ 0_993.png,37,1,0,0,0
2652
+ 0_994.png,42,0,0,0,0
2653
+ 0_995.png,0,0,9,0,0
2654
+ 0_996.png,0,0,0,0,0
2655
+ 0_997.png,0,0,1,0,0
2656
+ 0_998.png,0,0,0,0,0
2657
+ 0_999.png,0,0,3,0,0
docs/datasets/PanNuke/fold0/types.csv ADDED
@@ -0,0 +1,2657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ img,type
2
+ 0_0.png,Breast
3
+ 0_1.png,Breast
4
+ 0_2.png,Breast
5
+ 0_3.png,Breast
6
+ 0_4.png,Breast
7
+ 0_5.png,Breast
8
+ 0_6.png,Breast
9
+ 0_7.png,Breast
10
+ 0_8.png,Breast
11
+ 0_9.png,Breast
12
+ 0_10.png,Breast
13
+ 0_11.png,Breast
14
+ 0_12.png,Breast
15
+ 0_13.png,Breast
16
+ 0_14.png,Breast
17
+ 0_15.png,Breast
18
+ 0_16.png,Breast
19
+ 0_17.png,Breast
20
+ 0_18.png,Breast
21
+ 0_19.png,Breast
22
+ 0_20.png,Breast
23
+ 0_21.png,Breast
24
+ 0_22.png,Breast
25
+ 0_23.png,Breast
26
+ 0_24.png,Breast
27
+ 0_25.png,Breast
28
+ 0_26.png,Breast
29
+ 0_27.png,Breast
30
+ 0_28.png,Breast
31
+ 0_29.png,Breast
32
+ 0_30.png,Breast
33
+ 0_31.png,Breast
34
+ 0_32.png,Breast
35
+ 0_33.png,Breast
36
+ 0_34.png,Breast
37
+ 0_35.png,Breast
38
+ 0_36.png,Breast
39
+ 0_37.png,Breast
40
+ 0_38.png,Breast
41
+ 0_39.png,Breast
42
+ 0_40.png,Breast
43
+ 0_41.png,Breast
44
+ 0_42.png,Breast
45
+ 0_43.png,Breast
46
+ 0_44.png,Breast
47
+ 0_45.png,Breast
48
+ 0_46.png,Breast
49
+ 0_47.png,Breast
50
+ 0_48.png,Breast
51
+ 0_49.png,Breast
52
+ 0_50.png,Breast
53
+ 0_51.png,Breast
54
+ 0_52.png,Breast
55
+ 0_53.png,Breast
56
+ 0_54.png,Breast
57
+ 0_55.png,Breast
58
+ 0_56.png,Breast
59
+ 0_57.png,Breast
60
+ 0_58.png,Breast
61
+ 0_59.png,Breast
62
+ 0_60.png,Breast
63
+ 0_61.png,Breast
64
+ 0_62.png,Breast
65
+ 0_63.png,Breast
66
+ 0_64.png,Breast
67
+ 0_65.png,Breast
68
+ 0_66.png,Breast
69
+ 0_67.png,Breast
70
+ 0_68.png,Breast
71
+ 0_69.png,Breast
72
+ 0_70.png,Breast
73
+ 0_71.png,Breast
74
+ 0_72.png,Breast
75
+ 0_73.png,Breast
76
+ 0_74.png,Breast
77
+ 0_75.png,Breast
78
+ 0_76.png,Breast
79
+ 0_77.png,Breast
80
+ 0_78.png,Breast
81
+ 0_79.png,Breast
82
+ 0_80.png,Breast
83
+ 0_81.png,Breast
84
+ 0_82.png,Breast
85
+ 0_83.png,Breast
86
+ 0_84.png,Breast
87
+ 0_85.png,Breast
88
+ 0_86.png,Breast
89
+ 0_87.png,Breast
90
+ 0_88.png,Breast
91
+ 0_89.png,Breast
92
+ 0_90.png,Breast
93
+ 0_91.png,Breast
94
+ 0_92.png,Breast
95
+ 0_93.png,Breast
96
+ 0_94.png,Breast
97
+ 0_95.png,Breast
98
+ 0_96.png,Breast
99
+ 0_97.png,Breast
100
+ 0_98.png,Breast
101
+ 0_99.png,Breast
102
+ 0_100.png,Breast
103
+ 0_101.png,Breast
104
+ 0_102.png,Breast
105
+ 0_103.png,Breast
106
+ 0_104.png,Breast
107
+ 0_105.png,Breast
108
+ 0_106.png,Breast
109
+ 0_107.png,Breast
110
+ 0_108.png,Breast
111
+ 0_109.png,Breast
112
+ 0_110.png,Breast
113
+ 0_111.png,Breast
114
+ 0_112.png,Breast
115
+ 0_113.png,Breast
116
+ 0_114.png,Breast
117
+ 0_115.png,Breast
118
+ 0_116.png,Breast
119
+ 0_117.png,Breast
120
+ 0_118.png,Breast
121
+ 0_119.png,Breast
122
+ 0_120.png,Breast
123
+ 0_121.png,Breast
124
+ 0_122.png,Breast
125
+ 0_123.png,Breast
126
+ 0_124.png,Breast
127
+ 0_125.png,Breast
128
+ 0_126.png,Breast
129
+ 0_127.png,Breast
130
+ 0_128.png,Breast
131
+ 0_129.png,Breast
132
+ 0_130.png,Breast
133
+ 0_131.png,Breast
134
+ 0_132.png,Breast
135
+ 0_133.png,Breast
136
+ 0_134.png,Breast
137
+ 0_135.png,Breast
138
+ 0_136.png,Breast
139
+ 0_137.png,Breast
140
+ 0_138.png,Breast
141
+ 0_139.png,Breast
142
+ 0_140.png,Breast
143
+ 0_141.png,Breast
144
+ 0_142.png,Breast
145
+ 0_143.png,Breast
146
+ 0_144.png,Breast
147
+ 0_145.png,Breast
148
+ 0_146.png,Breast
149
+ 0_147.png,Breast
150
+ 0_148.png,Breast
151
+ 0_149.png,Breast
152
+ 0_150.png,Breast
153
+ 0_151.png,Breast
154
+ 0_152.png,Breast
155
+ 0_153.png,Breast
156
+ 0_154.png,Breast
157
+ 0_155.png,Breast
158
+ 0_156.png,Breast
159
+ 0_157.png,Breast
160
+ 0_158.png,Breast
161
+ 0_159.png,Breast
162
+ 0_160.png,Breast
163
+ 0_161.png,Breast
164
+ 0_162.png,Breast
165
+ 0_163.png,Breast
166
+ 0_164.png,Breast
167
+ 0_165.png,Breast
168
+ 0_166.png,Breast
169
+ 0_167.png,Breast
170
+ 0_168.png,Breast
171
+ 0_169.png,Breast
172
+ 0_170.png,Breast
173
+ 0_171.png,Breast
174
+ 0_172.png,Breast
175
+ 0_173.png,Breast
176
+ 0_174.png,Breast
177
+ 0_175.png,Breast
178
+ 0_176.png,Breast
179
+ 0_177.png,Breast
180
+ 0_178.png,Breast
181
+ 0_179.png,Breast
182
+ 0_180.png,Breast
183
+ 0_181.png,Breast
184
+ 0_182.png,Breast
185
+ 0_183.png,Breast
186
+ 0_184.png,Breast
187
+ 0_185.png,Breast
188
+ 0_186.png,Breast
189
+ 0_187.png,Breast
190
+ 0_188.png,Breast
191
+ 0_189.png,Breast
192
+ 0_190.png,Breast
193
+ 0_191.png,Breast
194
+ 0_192.png,Breast
195
+ 0_193.png,Breast
196
+ 0_194.png,Breast
197
+ 0_195.png,Breast
198
+ 0_196.png,Breast
199
+ 0_197.png,Breast
200
+ 0_198.png,Breast
201
+ 0_199.png,Breast
202
+ 0_200.png,Breast
203
+ 0_201.png,Breast
204
+ 0_202.png,Breast
205
+ 0_203.png,Breast
206
+ 0_204.png,Breast
207
+ 0_205.png,Breast
208
+ 0_206.png,Breast
209
+ 0_207.png,Breast
210
+ 0_208.png,Breast
211
+ 0_209.png,Breast
212
+ 0_210.png,Breast
213
+ 0_211.png,Breast
214
+ 0_212.png,Breast
215
+ 0_213.png,Breast
216
+ 0_214.png,Breast
217
+ 0_215.png,Breast
218
+ 0_216.png,Breast
219
+ 0_217.png,Breast
220
+ 0_218.png,Breast
221
+ 0_219.png,Breast
222
+ 0_220.png,Breast
223
+ 0_221.png,Breast
224
+ 0_222.png,Breast
225
+ 0_223.png,Breast
226
+ 0_224.png,Breast
227
+ 0_225.png,Breast
228
+ 0_226.png,Breast
229
+ 0_227.png,Breast
230
+ 0_228.png,Breast
231
+ 0_229.png,Breast
232
+ 0_230.png,Breast
233
+ 0_231.png,Breast
234
+ 0_232.png,Breast
235
+ 0_233.png,Breast
236
+ 0_234.png,Breast
237
+ 0_235.png,Breast
238
+ 0_236.png,Breast
239
+ 0_237.png,Breast
240
+ 0_238.png,Breast
241
+ 0_239.png,Breast
242
+ 0_240.png,Breast
243
+ 0_241.png,Breast
244
+ 0_242.png,Breast
245
+ 0_243.png,Breast
246
+ 0_244.png,Breast
247
+ 0_245.png,Breast
248
+ 0_246.png,Breast
249
+ 0_247.png,Breast
250
+ 0_248.png,Breast
251
+ 0_249.png,Breast
252
+ 0_250.png,Breast
253
+ 0_251.png,Breast
254
+ 0_252.png,Breast
255
+ 0_253.png,Breast
256
+ 0_254.png,Breast
257
+ 0_255.png,Breast
258
+ 0_256.png,Breast
259
+ 0_257.png,Breast
260
+ 0_258.png,Breast
261
+ 0_259.png,Breast
262
+ 0_260.png,Breast
263
+ 0_261.png,Breast
264
+ 0_262.png,Breast
265
+ 0_263.png,Breast
266
+ 0_264.png,Breast
267
+ 0_265.png,Breast
268
+ 0_266.png,Breast
269
+ 0_267.png,Breast
270
+ 0_268.png,Breast
271
+ 0_269.png,Breast
272
+ 0_270.png,Breast
273
+ 0_271.png,Breast
274
+ 0_272.png,Breast
275
+ 0_273.png,Breast
276
+ 0_274.png,Breast
277
+ 0_275.png,Breast
278
+ 0_276.png,Breast
279
+ 0_277.png,Breast
280
+ 0_278.png,Breast
281
+ 0_279.png,Breast
282
+ 0_280.png,Breast
283
+ 0_281.png,Breast
284
+ 0_282.png,Breast
285
+ 0_283.png,Breast
286
+ 0_284.png,Breast
287
+ 0_285.png,Breast
288
+ 0_286.png,Breast
289
+ 0_287.png,Breast
290
+ 0_288.png,Breast
291
+ 0_289.png,Breast
292
+ 0_290.png,Breast
293
+ 0_291.png,Breast
294
+ 0_292.png,Breast
295
+ 0_293.png,Breast
296
+ 0_294.png,Breast
297
+ 0_295.png,Breast
298
+ 0_296.png,Breast
299
+ 0_297.png,Breast
300
+ 0_298.png,Breast
301
+ 0_299.png,Breast
302
+ 0_300.png,Breast
303
+ 0_301.png,Breast
304
+ 0_302.png,Breast
305
+ 0_303.png,Breast
306
+ 0_304.png,Breast
307
+ 0_305.png,Breast
308
+ 0_306.png,Breast
309
+ 0_307.png,Breast
310
+ 0_308.png,Breast
311
+ 0_309.png,Breast
312
+ 0_310.png,Breast
313
+ 0_311.png,Breast
314
+ 0_312.png,Breast
315
+ 0_313.png,Breast
316
+ 0_314.png,Breast
317
+ 0_315.png,Breast
318
+ 0_316.png,Breast
319
+ 0_317.png,Breast
320
+ 0_318.png,Breast
321
+ 0_319.png,Breast
322
+ 0_320.png,Breast
323
+ 0_321.png,Breast
324
+ 0_322.png,Breast
325
+ 0_323.png,Breast
326
+ 0_324.png,Breast
327
+ 0_325.png,Breast
328
+ 0_326.png,Breast
329
+ 0_327.png,Breast
330
+ 0_328.png,Breast
331
+ 0_329.png,Breast
332
+ 0_330.png,Breast
333
+ 0_331.png,Breast
334
+ 0_332.png,Breast
335
+ 0_333.png,Breast
336
+ 0_334.png,Breast
337
+ 0_335.png,Breast
338
+ 0_336.png,Breast
339
+ 0_337.png,Breast
340
+ 0_338.png,Breast
341
+ 0_339.png,Breast
342
+ 0_340.png,Breast
343
+ 0_341.png,Breast
344
+ 0_342.png,Breast
345
+ 0_343.png,Breast
346
+ 0_344.png,Breast
347
+ 0_345.png,Breast
348
+ 0_346.png,Breast
349
+ 0_347.png,Breast
350
+ 0_348.png,Breast
351
+ 0_349.png,Breast
352
+ 0_350.png,Breast
353
+ 0_351.png,Breast
354
+ 0_352.png,Breast
355
+ 0_353.png,Breast
356
+ 0_354.png,Breast
357
+ 0_355.png,Breast
358
+ 0_356.png,Breast
359
+ 0_357.png,Breast
360
+ 0_358.png,Breast
361
+ 0_359.png,Breast
362
+ 0_360.png,Breast
363
+ 0_361.png,Breast
364
+ 0_362.png,Breast
365
+ 0_363.png,Breast
366
+ 0_364.png,Breast
367
+ 0_365.png,Breast
368
+ 0_366.png,Breast
369
+ 0_367.png,Breast
370
+ 0_368.png,Breast
371
+ 0_369.png,Breast
372
+ 0_370.png,Breast
373
+ 0_371.png,Breast
374
+ 0_372.png,Breast
375
+ 0_373.png,Breast
376
+ 0_374.png,Breast
377
+ 0_375.png,Breast
378
+ 0_376.png,Breast
379
+ 0_377.png,Breast
380
+ 0_378.png,Breast
381
+ 0_379.png,Breast
382
+ 0_380.png,Breast
383
+ 0_381.png,Breast
384
+ 0_382.png,Breast
385
+ 0_383.png,Breast
386
+ 0_384.png,Breast
387
+ 0_385.png,Breast
388
+ 0_386.png,Breast
389
+ 0_387.png,Breast
390
+ 0_388.png,Breast
391
+ 0_389.png,Breast
392
+ 0_390.png,Breast
393
+ 0_391.png,Breast
394
+ 0_392.png,Breast
395
+ 0_393.png,Breast
396
+ 0_394.png,Breast
397
+ 0_395.png,Breast
398
+ 0_396.png,Breast
399
+ 0_397.png,Breast
400
+ 0_398.png,Breast
401
+ 0_399.png,Breast
402
+ 0_400.png,Breast
403
+ 0_401.png,Breast
404
+ 0_402.png,Breast
405
+ 0_403.png,Breast
406
+ 0_404.png,Breast
407
+ 0_405.png,Breast
408
+ 0_406.png,Breast
409
+ 0_407.png,Breast
410
+ 0_408.png,Breast
411
+ 0_409.png,Breast
412
+ 0_410.png,Breast
413
+ 0_411.png,Breast
414
+ 0_412.png,Breast
415
+ 0_413.png,Breast
416
+ 0_414.png,Breast
417
+ 0_415.png,Breast
418
+ 0_416.png,Breast
419
+ 0_417.png,Breast
420
+ 0_418.png,Breast
421
+ 0_419.png,Breast
422
+ 0_420.png,Breast
423
+ 0_421.png,Breast
424
+ 0_422.png,Breast
425
+ 0_423.png,Breast
426
+ 0_424.png,Breast
427
+ 0_425.png,Breast
428
+ 0_426.png,Breast
429
+ 0_427.png,Breast
430
+ 0_428.png,Breast
431
+ 0_429.png,Breast
432
+ 0_430.png,Breast
433
+ 0_431.png,Breast
434
+ 0_432.png,Breast
435
+ 0_433.png,Breast
436
+ 0_434.png,Breast
437
+ 0_435.png,Breast
438
+ 0_436.png,Breast
439
+ 0_437.png,Breast
440
+ 0_438.png,Breast
441
+ 0_439.png,Breast
442
+ 0_440.png,Breast
443
+ 0_441.png,Breast
444
+ 0_442.png,Breast
445
+ 0_443.png,Breast
446
+ 0_444.png,Breast
447
+ 0_445.png,Breast
448
+ 0_446.png,Breast
449
+ 0_447.png,Breast
450
+ 0_448.png,Breast
451
+ 0_449.png,Breast
452
+ 0_450.png,Breast
453
+ 0_451.png,Breast
454
+ 0_452.png,Breast
455
+ 0_453.png,Breast
456
+ 0_454.png,Breast
457
+ 0_455.png,Breast
458
+ 0_456.png,Breast
459
+ 0_457.png,Breast
460
+ 0_458.png,Breast
461
+ 0_459.png,Breast
462
+ 0_460.png,Breast
463
+ 0_461.png,Breast
464
+ 0_462.png,Breast
465
+ 0_463.png,Breast
466
+ 0_464.png,Breast
467
+ 0_465.png,Breast
468
+ 0_466.png,Breast
469
+ 0_467.png,Breast
470
+ 0_468.png,Breast
471
+ 0_469.png,Breast
472
+ 0_470.png,Breast
473
+ 0_471.png,Breast
474
+ 0_472.png,Breast
475
+ 0_473.png,Breast
476
+ 0_474.png,Breast
477
+ 0_475.png,Breast
478
+ 0_476.png,Breast
479
+ 0_477.png,Breast
480
+ 0_478.png,Breast
481
+ 0_479.png,Breast
482
+ 0_480.png,Breast
483
+ 0_481.png,Breast
484
+ 0_482.png,Breast
485
+ 0_483.png,Breast
486
+ 0_484.png,Breast
487
+ 0_485.png,Breast
488
+ 0_486.png,Breast
489
+ 0_487.png,Breast
490
+ 0_488.png,Breast
491
+ 0_489.png,Breast
492
+ 0_490.png,Breast
493
+ 0_491.png,Breast
494
+ 0_492.png,Breast
495
+ 0_493.png,Breast
496
+ 0_494.png,Breast
497
+ 0_495.png,Breast
498
+ 0_496.png,Breast
499
+ 0_497.png,Breast
500
+ 0_498.png,Breast
501
+ 0_499.png,Breast
502
+ 0_500.png,Breast
503
+ 0_501.png,Breast
504
+ 0_502.png,Breast
505
+ 0_503.png,Breast
506
+ 0_504.png,Breast
507
+ 0_505.png,Breast
508
+ 0_506.png,Breast
509
+ 0_507.png,Breast
510
+ 0_508.png,Breast
511
+ 0_509.png,Breast
512
+ 0_510.png,Breast
513
+ 0_511.png,Breast
514
+ 0_512.png,Breast
515
+ 0_513.png,Breast
516
+ 0_514.png,Breast
517
+ 0_515.png,Breast
518
+ 0_516.png,Breast
519
+ 0_517.png,Breast
520
+ 0_518.png,Breast
521
+ 0_519.png,Breast
522
+ 0_520.png,Breast
523
+ 0_521.png,Breast
524
+ 0_522.png,Breast
525
+ 0_523.png,Breast
526
+ 0_524.png,Breast
527
+ 0_525.png,Breast
528
+ 0_526.png,Breast
529
+ 0_527.png,Breast
530
+ 0_528.png,Breast
531
+ 0_529.png,Breast
532
+ 0_530.png,Breast
533
+ 0_531.png,Breast
534
+ 0_532.png,Breast
535
+ 0_533.png,Breast
536
+ 0_534.png,Breast
537
+ 0_535.png,Breast
538
+ 0_536.png,Breast
539
+ 0_537.png,Breast
540
+ 0_538.png,Breast
541
+ 0_539.png,Breast
542
+ 0_540.png,Breast
543
+ 0_541.png,Breast
544
+ 0_542.png,Breast
545
+ 0_543.png,Breast
546
+ 0_544.png,Breast
547
+ 0_545.png,Breast
548
+ 0_546.png,Breast
549
+ 0_547.png,Breast
550
+ 0_548.png,Breast
551
+ 0_549.png,Breast
552
+ 0_550.png,Breast
553
+ 0_551.png,Breast
554
+ 0_552.png,Breast
555
+ 0_553.png,Breast
556
+ 0_554.png,Breast
557
+ 0_555.png,Breast
558
+ 0_556.png,Breast
559
+ 0_557.png,Breast
560
+ 0_558.png,Breast
561
+ 0_559.png,Breast
562
+ 0_560.png,Breast
563
+ 0_561.png,Breast
564
+ 0_562.png,Breast
565
+ 0_563.png,Breast
566
+ 0_564.png,Breast
567
+ 0_565.png,Breast
568
+ 0_566.png,Breast
569
+ 0_567.png,Breast
570
+ 0_568.png,Breast
571
+ 0_569.png,Breast
572
+ 0_570.png,Breast
573
+ 0_571.png,Breast
574
+ 0_572.png,Breast
575
+ 0_573.png,Breast
576
+ 0_574.png,Breast
577
+ 0_575.png,Breast
578
+ 0_576.png,Breast
579
+ 0_577.png,Breast
580
+ 0_578.png,Breast
581
+ 0_579.png,Breast
582
+ 0_580.png,Breast
583
+ 0_581.png,Breast
584
+ 0_582.png,Breast
585
+ 0_583.png,Breast
586
+ 0_584.png,Breast
587
+ 0_585.png,Breast
588
+ 0_586.png,Breast
589
+ 0_587.png,Breast
590
+ 0_588.png,Breast
591
+ 0_589.png,Breast
592
+ 0_590.png,Breast
593
+ 0_591.png,Breast
594
+ 0_592.png,Breast
595
+ 0_593.png,Breast
596
+ 0_594.png,Breast
597
+ 0_595.png,Breast
598
+ 0_596.png,Breast
599
+ 0_597.png,Breast
600
+ 0_598.png,Breast
601
+ 0_599.png,Breast
602
+ 0_600.png,Breast
603
+ 0_601.png,Breast
604
+ 0_602.png,Breast
605
+ 0_603.png,Breast
606
+ 0_604.png,Breast
607
+ 0_605.png,Breast
608
+ 0_606.png,Breast
609
+ 0_607.png,Breast
610
+ 0_608.png,Breast
611
+ 0_609.png,Breast
612
+ 0_610.png,Breast
613
+ 0_611.png,Breast
614
+ 0_612.png,Breast
615
+ 0_613.png,Breast
616
+ 0_614.png,Breast
617
+ 0_615.png,Breast
618
+ 0_616.png,Breast
619
+ 0_617.png,Breast
620
+ 0_618.png,Breast
621
+ 0_619.png,Breast
622
+ 0_620.png,Breast
623
+ 0_621.png,Breast
624
+ 0_622.png,Breast
625
+ 0_623.png,Breast
626
+ 0_624.png,Breast
627
+ 0_625.png,Breast
628
+ 0_626.png,Breast
629
+ 0_627.png,Breast
630
+ 0_628.png,Breast
631
+ 0_629.png,Breast
632
+ 0_630.png,Breast
633
+ 0_631.png,Breast
634
+ 0_632.png,Breast
635
+ 0_633.png,Breast
636
+ 0_634.png,Breast
637
+ 0_635.png,Breast
638
+ 0_636.png,Breast
639
+ 0_637.png,Breast
640
+ 0_638.png,Breast
641
+ 0_639.png,Breast
642
+ 0_640.png,Breast
643
+ 0_641.png,Breast
644
+ 0_642.png,Breast
645
+ 0_643.png,Breast
646
+ 0_644.png,Breast
647
+ 0_645.png,Breast
648
+ 0_646.png,Breast
649
+ 0_647.png,Breast
650
+ 0_648.png,Breast
651
+ 0_649.png,Breast
652
+ 0_650.png,Breast
653
+ 0_651.png,Breast
654
+ 0_652.png,Breast
655
+ 0_653.png,Breast
656
+ 0_654.png,Breast
657
+ 0_655.png,Breast
658
+ 0_656.png,Breast
659
+ 0_657.png,Breast
660
+ 0_658.png,Breast
661
+ 0_659.png,Breast
662
+ 0_660.png,Breast
663
+ 0_661.png,Breast
664
+ 0_662.png,Breast
665
+ 0_663.png,Breast
666
+ 0_664.png,Breast
667
+ 0_665.png,Breast
668
+ 0_666.png,Breast
669
+ 0_667.png,Breast
670
+ 0_668.png,Breast
671
+ 0_669.png,Breast
672
+ 0_670.png,Breast
673
+ 0_671.png,Breast
674
+ 0_672.png,Breast
675
+ 0_673.png,Breast
676
+ 0_674.png,Breast
677
+ 0_675.png,Breast
678
+ 0_676.png,Breast
679
+ 0_677.png,Breast
680
+ 0_678.png,Breast
681
+ 0_679.png,Breast
682
+ 0_680.png,Breast
683
+ 0_681.png,Breast
684
+ 0_682.png,Breast
685
+ 0_683.png,Breast
686
+ 0_684.png,Breast
687
+ 0_685.png,Breast
688
+ 0_686.png,Breast
689
+ 0_687.png,Breast
690
+ 0_688.png,Breast
691
+ 0_689.png,Breast
692
+ 0_690.png,Breast
693
+ 0_691.png,Breast
694
+ 0_692.png,Breast
695
+ 0_693.png,Breast
696
+ 0_694.png,Breast
697
+ 0_695.png,Breast
698
+ 0_696.png,Breast
699
+ 0_697.png,Breast
700
+ 0_698.png,Breast
701
+ 0_699.png,Breast
702
+ 0_700.png,Breast
703
+ 0_701.png,Breast
704
+ 0_702.png,Breast
705
+ 0_703.png,Breast
706
+ 0_704.png,Breast
707
+ 0_705.png,Breast
708
+ 0_706.png,Breast
709
+ 0_707.png,Breast
710
+ 0_708.png,Breast
711
+ 0_709.png,Breast
712
+ 0_710.png,Breast
713
+ 0_711.png,Breast
714
+ 0_712.png,Breast
715
+ 0_713.png,Breast
716
+ 0_714.png,Breast
717
+ 0_715.png,Breast
718
+ 0_716.png,Breast
719
+ 0_717.png,Breast
720
+ 0_718.png,Breast
721
+ 0_719.png,Breast
722
+ 0_720.png,Breast
723
+ 0_721.png,Breast
724
+ 0_722.png,Breast
725
+ 0_723.png,Breast
726
+ 0_724.png,Breast
727
+ 0_725.png,Breast
728
+ 0_726.png,Breast
729
+ 0_727.png,Breast
730
+ 0_728.png,Breast
731
+ 0_729.png,Breast
732
+ 0_730.png,Breast
733
+ 0_731.png,Breast
734
+ 0_732.png,Breast
735
+ 0_733.png,Breast
736
+ 0_734.png,Breast
737
+ 0_735.png,Breast
738
+ 0_736.png,Breast
739
+ 0_737.png,Breast
740
+ 0_738.png,Colon
741
+ 0_739.png,Colon
742
+ 0_740.png,Colon
743
+ 0_741.png,Colon
744
+ 0_742.png,Colon
745
+ 0_743.png,Colon
746
+ 0_744.png,Colon
747
+ 0_745.png,Colon
748
+ 0_746.png,Colon
749
+ 0_747.png,Colon
750
+ 0_748.png,Colon
751
+ 0_749.png,Colon
752
+ 0_750.png,Colon
753
+ 0_751.png,Colon
754
+ 0_752.png,Colon
755
+ 0_753.png,Colon
756
+ 0_754.png,Colon
757
+ 0_755.png,Colon
758
+ 0_756.png,Colon
759
+ 0_757.png,Colon
760
+ 0_758.png,Colon
761
+ 0_759.png,Colon
762
+ 0_760.png,Colon
763
+ 0_761.png,Colon
764
+ 0_762.png,Colon
765
+ 0_763.png,Colon
766
+ 0_764.png,Colon
767
+ 0_765.png,Colon
768
+ 0_766.png,Colon
769
+ 0_767.png,Colon
770
+ 0_768.png,Colon
771
+ 0_769.png,Colon
772
+ 0_770.png,Colon
773
+ 0_771.png,Colon
774
+ 0_772.png,Colon
775
+ 0_773.png,Colon
776
+ 0_774.png,Colon
777
+ 0_775.png,Colon
778
+ 0_776.png,Colon
779
+ 0_777.png,Colon
780
+ 0_778.png,Colon
781
+ 0_779.png,Colon
782
+ 0_780.png,Colon
783
+ 0_781.png,Colon
784
+ 0_782.png,Colon
785
+ 0_783.png,Colon
786
+ 0_784.png,Colon
787
+ 0_785.png,Colon
788
+ 0_786.png,Colon
789
+ 0_787.png,Colon
790
+ 0_788.png,Colon
791
+ 0_789.png,Colon
792
+ 0_790.png,Colon
793
+ 0_791.png,Colon
794
+ 0_792.png,Colon
795
+ 0_793.png,Colon
796
+ 0_794.png,Colon
797
+ 0_795.png,Colon
798
+ 0_796.png,Colon
799
+ 0_797.png,Colon
800
+ 0_798.png,Colon
801
+ 0_799.png,Colon
802
+ 0_800.png,Colon
803
+ 0_801.png,Colon
804
+ 0_802.png,Colon
805
+ 0_803.png,Colon
806
+ 0_804.png,Colon
807
+ 0_805.png,Colon
808
+ 0_806.png,Colon
809
+ 0_807.png,Colon
810
+ 0_808.png,Colon
811
+ 0_809.png,Colon
812
+ 0_810.png,Colon
813
+ 0_811.png,Colon
814
+ 0_812.png,Colon
815
+ 0_813.png,Colon
816
+ 0_814.png,Colon
817
+ 0_815.png,Colon
818
+ 0_816.png,Colon
819
+ 0_817.png,Colon
820
+ 0_818.png,Colon
821
+ 0_819.png,Colon
822
+ 0_820.png,Colon
823
+ 0_821.png,Colon
824
+ 0_822.png,Colon
825
+ 0_823.png,Colon
826
+ 0_824.png,Colon
827
+ 0_825.png,Colon
828
+ 0_826.png,Colon
829
+ 0_827.png,Colon
830
+ 0_828.png,Colon
831
+ 0_829.png,Colon
832
+ 0_830.png,Colon
833
+ 0_831.png,Colon
834
+ 0_832.png,Colon
835
+ 0_833.png,Colon
836
+ 0_834.png,Colon
837
+ 0_835.png,Colon
838
+ 0_836.png,Colon
839
+ 0_837.png,Colon
840
+ 0_838.png,Colon
841
+ 0_839.png,Colon
842
+ 0_840.png,Colon
843
+ 0_841.png,Colon
844
+ 0_842.png,Colon
845
+ 0_843.png,Colon
846
+ 0_844.png,Colon
847
+ 0_845.png,Colon
848
+ 0_846.png,Colon
849
+ 0_847.png,Colon
850
+ 0_848.png,Colon
851
+ 0_849.png,Colon
852
+ 0_850.png,Colon
853
+ 0_851.png,Colon
854
+ 0_852.png,Colon
855
+ 0_853.png,Colon
856
+ 0_854.png,Colon
857
+ 0_855.png,Colon
858
+ 0_856.png,Colon
859
+ 0_857.png,Colon
860
+ 0_858.png,Colon
861
+ 0_859.png,Colon
862
+ 0_860.png,Colon
863
+ 0_861.png,Colon
864
+ 0_862.png,Colon
865
+ 0_863.png,Colon
866
+ 0_864.png,Colon
867
+ 0_865.png,Colon
868
+ 0_866.png,Colon
869
+ 0_867.png,Lung
870
+ 0_868.png,Lung
871
+ 0_869.png,Lung
872
+ 0_870.png,Lung
873
+ 0_871.png,Lung
874
+ 0_872.png,Lung
875
+ 0_873.png,Lung
876
+ 0_874.png,Lung
877
+ 0_875.png,Lung
878
+ 0_876.png,Lung
879
+ 0_877.png,Lung
880
+ 0_878.png,Lung
881
+ 0_879.png,Lung
882
+ 0_880.png,Lung
883
+ 0_881.png,Lung
884
+ 0_882.png,Lung
885
+ 0_883.png,Lung
886
+ 0_884.png,Lung
887
+ 0_885.png,Lung
888
+ 0_886.png,Lung
889
+ 0_887.png,Lung
890
+ 0_888.png,Lung
891
+ 0_889.png,Lung
892
+ 0_890.png,Lung
893
+ 0_891.png,Lung
894
+ 0_892.png,Lung
895
+ 0_893.png,Lung
896
+ 0_894.png,Lung
897
+ 0_895.png,Lung
898
+ 0_896.png,Lung
899
+ 0_897.png,Breast
900
+ 0_898.png,Breast
901
+ 0_899.png,Breast
902
+ 0_900.png,Breast
903
+ 0_901.png,Breast
904
+ 0_902.png,Breast
905
+ 0_903.png,Breast
906
+ 0_904.png,Breast
907
+ 0_905.png,Breast
908
+ 0_906.png,Breast
909
+ 0_907.png,Breast
910
+ 0_908.png,Breast
911
+ 0_909.png,Colon
912
+ 0_910.png,Colon
913
+ 0_911.png,Colon
914
+ 0_912.png,Kidney
915
+ 0_913.png,Kidney
916
+ 0_914.png,Kidney
917
+ 0_915.png,Kidney
918
+ 0_916.png,Kidney
919
+ 0_917.png,Kidney
920
+ 0_918.png,Kidney
921
+ 0_919.png,Prostate
922
+ 0_920.png,Prostate
923
+ 0_921.png,Prostate
924
+ 0_922.png,Prostate
925
+ 0_923.png,Bladder
926
+ 0_924.png,Breast
927
+ 0_925.png,Breast
928
+ 0_926.png,Breast
929
+ 0_927.png,Breast
930
+ 0_928.png,Breast
931
+ 0_929.png,Breast
932
+ 0_930.png,Breast
933
+ 0_931.png,Breast
934
+ 0_932.png,Bladder
935
+ 0_933.png,Prostate
936
+ 0_934.png,Prostate
937
+ 0_935.png,Prostate
938
+ 0_936.png,Prostate
939
+ 0_937.png,Prostate
940
+ 0_938.png,Prostate
941
+ 0_939.png,Prostate
942
+ 0_940.png,Prostate
943
+ 0_941.png,Prostate
944
+ 0_942.png,Prostate
945
+ 0_943.png,Prostate
946
+ 0_944.png,Prostate
947
+ 0_945.png,Prostate
948
+ 0_946.png,Prostate
949
+ 0_947.png,Prostate
950
+ 0_948.png,Prostate
951
+ 0_949.png,Prostate
952
+ 0_950.png,Prostate
953
+ 0_951.png,Prostate
954
+ 0_952.png,Prostate
955
+ 0_953.png,Kidney
956
+ 0_954.png,Kidney
957
+ 0_955.png,Kidney
958
+ 0_956.png,Kidney
959
+ 0_957.png,Kidney
960
+ 0_958.png,Kidney
961
+ 0_959.png,Kidney
962
+ 0_960.png,Kidney
963
+ 0_961.png,Kidney
964
+ 0_962.png,Stomach
965
+ 0_963.png,Colon
966
+ 0_964.png,Colon
967
+ 0_965.png,Colon
968
+ 0_966.png,Stomach
969
+ 0_967.png,Stomach
970
+ 0_968.png,Stomach
971
+ 0_969.png,Ovarian
972
+ 0_970.png,Ovarian
973
+ 0_971.png,Ovarian
974
+ 0_972.png,Ovarian
975
+ 0_973.png,Ovarian
976
+ 0_974.png,Ovarian
977
+ 0_975.png,Ovarian
978
+ 0_976.png,Ovarian
979
+ 0_977.png,Ovarian
980
+ 0_978.png,Esophagus
981
+ 0_979.png,Esophagus
982
+ 0_980.png,Esophagus
983
+ 0_981.png,Esophagus
984
+ 0_982.png,Esophagus
985
+ 0_983.png,Esophagus
986
+ 0_984.png,Esophagus
987
+ 0_985.png,Esophagus
988
+ 0_986.png,Esophagus
989
+ 0_987.png,Esophagus
990
+ 0_988.png,Esophagus
991
+ 0_989.png,Esophagus
992
+ 0_990.png,Esophagus
993
+ 0_991.png,Esophagus
994
+ 0_992.png,Esophagus
995
+ 0_993.png,Esophagus
996
+ 0_994.png,Esophagus
997
+ 0_995.png,Esophagus
998
+ 0_996.png,Esophagus
999
+ 0_997.png,Esophagus
1000
+ 0_998.png,Esophagus
1001
+ 0_999.png,Esophagus
1002
+ 0_1000.png,Esophagus
1003
+ 0_1001.png,Esophagus
1004
+ 0_1002.png,Esophagus
1005
+ 0_1003.png,Esophagus
1006
+ 0_1004.png,Esophagus
1007
+ 0_1005.png,Esophagus
1008
+ 0_1006.png,Esophagus
1009
+ 0_1007.png,Esophagus
1010
+ 0_1008.png,Esophagus
1011
+ 0_1009.png,Esophagus
1012
+ 0_1010.png,Esophagus
1013
+ 0_1011.png,Esophagus
1014
+ 0_1012.png,Esophagus
1015
+ 0_1013.png,Esophagus
1016
+ 0_1014.png,Esophagus
1017
+ 0_1015.png,Esophagus
1018
+ 0_1016.png,Esophagus
1019
+ 0_1017.png,Esophagus
1020
+ 0_1018.png,Pancreatic
1021
+ 0_1019.png,Pancreatic
1022
+ 0_1020.png,Pancreatic
1023
+ 0_1021.png,Pancreatic
1024
+ 0_1022.png,Pancreatic
1025
+ 0_1023.png,Pancreatic
1026
+ 0_1024.png,Pancreatic
1027
+ 0_1025.png,Lung
1028
+ 0_1026.png,Lung
1029
+ 0_1027.png,Lung
1030
+ 0_1028.png,Lung
1031
+ 0_1029.png,Lung
1032
+ 0_1030.png,Lung
1033
+ 0_1031.png,Lung
1034
+ 0_1032.png,Lung
1035
+ 0_1033.png,Lung
1036
+ 0_1034.png,Lung
1037
+ 0_1035.png,Lung
1038
+ 0_1036.png,Lung
1039
+ 0_1037.png,Lung
1040
+ 0_1038.png,Lung
1041
+ 0_1039.png,Lung
1042
+ 0_1040.png,Lung
1043
+ 0_1041.png,Lung
1044
+ 0_1042.png,Lung
1045
+ 0_1043.png,Lung
1046
+ 0_1044.png,Lung
1047
+ 0_1045.png,Lung
1048
+ 0_1046.png,Lung
1049
+ 0_1047.png,Lung
1050
+ 0_1048.png,Lung
1051
+ 0_1049.png,Lung
1052
+ 0_1050.png,Lung
1053
+ 0_1051.png,Lung
1054
+ 0_1052.png,Lung
1055
+ 0_1053.png,Lung
1056
+ 0_1054.png,Lung
1057
+ 0_1055.png,Lung
1058
+ 0_1056.png,Lung
1059
+ 0_1057.png,Lung
1060
+ 0_1058.png,Lung
1061
+ 0_1059.png,Lung
1062
+ 0_1060.png,Lung
1063
+ 0_1061.png,Uterus
1064
+ 0_1062.png,Uterus
1065
+ 0_1063.png,Uterus
1066
+ 0_1064.png,Thyroid
1067
+ 0_1065.png,Thyroid
1068
+ 0_1066.png,Thyroid
1069
+ 0_1067.png,Thyroid
1070
+ 0_1068.png,Thyroid
1071
+ 0_1069.png,Thyroid
1072
+ 0_1070.png,Thyroid
1073
+ 0_1071.png,Thyroid
1074
+ 0_1072.png,Thyroid
1075
+ 0_1073.png,Thyroid
1076
+ 0_1074.png,Thyroid
1077
+ 0_1075.png,Thyroid
1078
+ 0_1076.png,Thyroid
1079
+ 0_1077.png,Thyroid
1080
+ 0_1078.png,Thyroid
1081
+ 0_1079.png,Thyroid
1082
+ 0_1080.png,Thyroid
1083
+ 0_1081.png,Thyroid
1084
+ 0_1082.png,Thyroid
1085
+ 0_1083.png,Thyroid
1086
+ 0_1084.png,Thyroid
1087
+ 0_1085.png,Skin
1088
+ 0_1086.png,Skin
1089
+ 0_1087.png,Skin
1090
+ 0_1088.png,Skin
1091
+ 0_1089.png,Skin
1092
+ 0_1090.png,Skin
1093
+ 0_1091.png,Cervix
1094
+ 0_1092.png,Cervix
1095
+ 0_1093.png,Cervix
1096
+ 0_1094.png,Cervix
1097
+ 0_1095.png,Cervix
1098
+ 0_1096.png,Cervix
1099
+ 0_1097.png,Thyroid
1100
+ 0_1098.png,Thyroid
1101
+ 0_1099.png,Thyroid
1102
+ 0_1100.png,Thyroid
1103
+ 0_1101.png,Thyroid
1104
+ 0_1102.png,Thyroid
1105
+ 0_1103.png,Thyroid
1106
+ 0_1104.png,Thyroid
1107
+ 0_1105.png,Thyroid
1108
+ 0_1106.png,Thyroid
1109
+ 0_1107.png,Thyroid
1110
+ 0_1108.png,Thyroid
1111
+ 0_1109.png,Thyroid
1112
+ 0_1110.png,Thyroid
1113
+ 0_1111.png,Thyroid
1114
+ 0_1112.png,Thyroid
1115
+ 0_1113.png,Thyroid
1116
+ 0_1114.png,Thyroid
1117
+ 0_1115.png,Esophagus
1118
+ 0_1116.png,Esophagus
1119
+ 0_1117.png,Esophagus
1120
+ 0_1118.png,Esophagus
1121
+ 0_1119.png,Esophagus
1122
+ 0_1120.png,Esophagus
1123
+ 0_1121.png,Esophagus
1124
+ 0_1122.png,Esophagus
1125
+ 0_1123.png,Esophagus
1126
+ 0_1124.png,Esophagus
1127
+ 0_1125.png,Esophagus
1128
+ 0_1126.png,Esophagus
1129
+ 0_1127.png,Esophagus
1130
+ 0_1128.png,Esophagus
1131
+ 0_1129.png,Esophagus
1132
+ 0_1130.png,Esophagus
1133
+ 0_1131.png,Esophagus
1134
+ 0_1132.png,Esophagus
1135
+ 0_1133.png,Esophagus
1136
+ 0_1134.png,Esophagus
1137
+ 0_1135.png,Esophagus
1138
+ 0_1136.png,Esophagus
1139
+ 0_1137.png,Esophagus
1140
+ 0_1138.png,Esophagus
1141
+ 0_1139.png,Esophagus
1142
+ 0_1140.png,Esophagus
1143
+ 0_1141.png,Esophagus
1144
+ 0_1142.png,Esophagus
1145
+ 0_1143.png,Esophagus
1146
+ 0_1144.png,Esophagus
1147
+ 0_1145.png,Esophagus
1148
+ 0_1146.png,Esophagus
1149
+ 0_1147.png,Cervix
1150
+ 0_1148.png,Cervix
1151
+ 0_1149.png,Cervix
1152
+ 0_1150.png,Cervix
1153
+ 0_1151.png,Cervix
1154
+ 0_1152.png,Cervix
1155
+ 0_1153.png,Cervix
1156
+ 0_1154.png,Cervix
1157
+ 0_1155.png,Cervix
1158
+ 0_1156.png,Cervix
1159
+ 0_1157.png,Cervix
1160
+ 0_1158.png,Cervix
1161
+ 0_1159.png,Cervix
1162
+ 0_1160.png,Cervix
1163
+ 0_1161.png,Cervix
1164
+ 0_1162.png,Cervix
1165
+ 0_1163.png,Cervix
1166
+ 0_1164.png,Cervix
1167
+ 0_1165.png,Cervix
1168
+ 0_1166.png,Cervix
1169
+ 0_1167.png,Cervix
1170
+ 0_1168.png,Cervix
1171
+ 0_1169.png,Cervix
1172
+ 0_1170.png,Cervix
1173
+ 0_1171.png,Adrenal_gland
1174
+ 0_1172.png,Adrenal_gland
1175
+ 0_1173.png,Adrenal_gland
1176
+ 0_1174.png,Adrenal_gland
1177
+ 0_1175.png,Adrenal_gland
1178
+ 0_1176.png,Adrenal_gland
1179
+ 0_1177.png,Adrenal_gland
1180
+ 0_1178.png,Adrenal_gland
1181
+ 0_1179.png,Adrenal_gland
1182
+ 0_1180.png,Adrenal_gland
1183
+ 0_1181.png,Adrenal_gland
1184
+ 0_1182.png,Adrenal_gland
1185
+ 0_1183.png,Adrenal_gland
1186
+ 0_1184.png,Adrenal_gland
1187
+ 0_1185.png,Adrenal_gland
1188
+ 0_1186.png,Adrenal_gland
1189
+ 0_1187.png,Adrenal_gland
1190
+ 0_1188.png,Adrenal_gland
1191
+ 0_1189.png,Adrenal_gland
1192
+ 0_1190.png,Adrenal_gland
1193
+ 0_1191.png,Adrenal_gland
1194
+ 0_1192.png,Adrenal_gland
1195
+ 0_1193.png,Adrenal_gland
1196
+ 0_1194.png,Adrenal_gland
1197
+ 0_1195.png,Adrenal_gland
1198
+ 0_1196.png,Adrenal_gland
1199
+ 0_1197.png,Adrenal_gland
1200
+ 0_1198.png,Adrenal_gland
1201
+ 0_1199.png,Adrenal_gland
1202
+ 0_1200.png,Adrenal_gland
1203
+ 0_1201.png,Adrenal_gland
1204
+ 0_1202.png,Adrenal_gland
1205
+ 0_1203.png,Adrenal_gland
1206
+ 0_1204.png,Adrenal_gland
1207
+ 0_1205.png,Adrenal_gland
1208
+ 0_1206.png,Adrenal_gland
1209
+ 0_1207.png,Adrenal_gland
1210
+ 0_1208.png,Adrenal_gland
1211
+ 0_1209.png,Adrenal_gland
1212
+ 0_1210.png,Adrenal_gland
1213
+ 0_1211.png,Adrenal_gland
1214
+ 0_1212.png,Adrenal_gland
1215
+ 0_1213.png,Adrenal_gland
1216
+ 0_1214.png,Adrenal_gland
1217
+ 0_1215.png,Adrenal_gland
1218
+ 0_1216.png,Adrenal_gland
1219
+ 0_1217.png,Adrenal_gland
1220
+ 0_1218.png,Adrenal_gland
1221
+ 0_1219.png,Esophagus
1222
+ 0_1220.png,Esophagus
1223
+ 0_1221.png,Esophagus
1224
+ 0_1222.png,Esophagus
1225
+ 0_1223.png,Esophagus
1226
+ 0_1224.png,Esophagus
1227
+ 0_1225.png,Esophagus
1228
+ 0_1226.png,Esophagus
1229
+ 0_1227.png,Esophagus
1230
+ 0_1228.png,Esophagus
1231
+ 0_1229.png,Esophagus
1232
+ 0_1230.png,Esophagus
1233
+ 0_1231.png,Esophagus
1234
+ 0_1232.png,Esophagus
1235
+ 0_1233.png,Esophagus
1236
+ 0_1234.png,Esophagus
1237
+ 0_1235.png,Esophagus
1238
+ 0_1236.png,Esophagus
1239
+ 0_1237.png,Esophagus
1240
+ 0_1238.png,Esophagus
1241
+ 0_1239.png,Esophagus
1242
+ 0_1240.png,Esophagus
1243
+ 0_1241.png,Esophagus
1244
+ 0_1242.png,Esophagus
1245
+ 0_1243.png,Esophagus
1246
+ 0_1244.png,Esophagus
1247
+ 0_1245.png,Esophagus
1248
+ 0_1246.png,Esophagus
1249
+ 0_1247.png,Esophagus
1250
+ 0_1248.png,Esophagus
1251
+ 0_1249.png,Esophagus
1252
+ 0_1250.png,Esophagus
1253
+ 0_1251.png,Esophagus
1254
+ 0_1252.png,Esophagus
1255
+ 0_1253.png,Esophagus
1256
+ 0_1254.png,Esophagus
1257
+ 0_1255.png,Esophagus
1258
+ 0_1256.png,Esophagus
1259
+ 0_1257.png,Esophagus
1260
+ 0_1258.png,Esophagus
1261
+ 0_1259.png,Esophagus
1262
+ 0_1260.png,Esophagus
1263
+ 0_1261.png,Esophagus
1264
+ 0_1262.png,Esophagus
1265
+ 0_1263.png,Esophagus
1266
+ 0_1264.png,Adrenal_gland
1267
+ 0_1265.png,Adrenal_gland
1268
+ 0_1266.png,Adrenal_gland
1269
+ 0_1267.png,Adrenal_gland
1270
+ 0_1268.png,Adrenal_gland
1271
+ 0_1269.png,Adrenal_gland
1272
+ 0_1270.png,Adrenal_gland
1273
+ 0_1271.png,Adrenal_gland
1274
+ 0_1272.png,Adrenal_gland
1275
+ 0_1273.png,Adrenal_gland
1276
+ 0_1274.png,Adrenal_gland
1277
+ 0_1275.png,Adrenal_gland
1278
+ 0_1276.png,Adrenal_gland
1279
+ 0_1277.png,Adrenal_gland
1280
+ 0_1278.png,Adrenal_gland
1281
+ 0_1279.png,Pancreatic
1282
+ 0_1280.png,Pancreatic
1283
+ 0_1281.png,Pancreatic
1284
+ 0_1282.png,Pancreatic
1285
+ 0_1283.png,Pancreatic
1286
+ 0_1284.png,Pancreatic
1287
+ 0_1285.png,Pancreatic
1288
+ 0_1286.png,Pancreatic
1289
+ 0_1287.png,Pancreatic
1290
+ 0_1288.png,Pancreatic
1291
+ 0_1289.png,Pancreatic
1292
+ 0_1290.png,Pancreatic
1293
+ 0_1291.png,Pancreatic
1294
+ 0_1292.png,Pancreatic
1295
+ 0_1293.png,Pancreatic
1296
+ 0_1294.png,Pancreatic
1297
+ 0_1295.png,Pancreatic
1298
+ 0_1296.png,Pancreatic
1299
+ 0_1297.png,Pancreatic
1300
+ 0_1298.png,Pancreatic
1301
+ 0_1299.png,Pancreatic
1302
+ 0_1300.png,Pancreatic
1303
+ 0_1301.png,Pancreatic
1304
+ 0_1302.png,Pancreatic
1305
+ 0_1303.png,Adrenal_gland
1306
+ 0_1304.png,Adrenal_gland
1307
+ 0_1305.png,Adrenal_gland
1308
+ 0_1306.png,Adrenal_gland
1309
+ 0_1307.png,Adrenal_gland
1310
+ 0_1308.png,Adrenal_gland
1311
+ 0_1309.png,Adrenal_gland
1312
+ 0_1310.png,Adrenal_gland
1313
+ 0_1311.png,Adrenal_gland
1314
+ 0_1312.png,Adrenal_gland
1315
+ 0_1313.png,Adrenal_gland
1316
+ 0_1314.png,Adrenal_gland
1317
+ 0_1315.png,Adrenal_gland
1318
+ 0_1316.png,Adrenal_gland
1319
+ 0_1317.png,Adrenal_gland
1320
+ 0_1318.png,Adrenal_gland
1321
+ 0_1319.png,Cervix
1322
+ 0_1320.png,Cervix
1323
+ 0_1321.png,Cervix
1324
+ 0_1322.png,Cervix
1325
+ 0_1323.png,Cervix
1326
+ 0_1324.png,Cervix
1327
+ 0_1325.png,Cervix
1328
+ 0_1326.png,Cervix
1329
+ 0_1327.png,Cervix
1330
+ 0_1328.png,Cervix
1331
+ 0_1329.png,Cervix
1332
+ 0_1330.png,Cervix
1333
+ 0_1331.png,Cervix
1334
+ 0_1332.png,Cervix
1335
+ 0_1333.png,Cervix
1336
+ 0_1334.png,Cervix
1337
+ 0_1335.png,Cervix
1338
+ 0_1336.png,Cervix
1339
+ 0_1337.png,Cervix
1340
+ 0_1338.png,Cervix
1341
+ 0_1339.png,Cervix
1342
+ 0_1340.png,Cervix
1343
+ 0_1341.png,Cervix
1344
+ 0_1342.png,Cervix
1345
+ 0_1343.png,Cervix
1346
+ 0_1344.png,Cervix
1347
+ 0_1345.png,Cervix
1348
+ 0_1346.png,Cervix
1349
+ 0_1347.png,Cervix
1350
+ 0_1348.png,Cervix
1351
+ 0_1349.png,Cervix
1352
+ 0_1350.png,Cervix
1353
+ 0_1351.png,Cervix
1354
+ 0_1352.png,Cervix
1355
+ 0_1353.png,Cervix
1356
+ 0_1354.png,Cervix
1357
+ 0_1355.png,Cervix
1358
+ 0_1356.png,Cervix
1359
+ 0_1357.png,Cervix
1360
+ 0_1358.png,Cervix
1361
+ 0_1359.png,Cervix
1362
+ 0_1360.png,Cervix
1363
+ 0_1361.png,Cervix
1364
+ 0_1362.png,Cervix
1365
+ 0_1363.png,Cervix
1366
+ 0_1364.png,Cervix
1367
+ 0_1365.png,Cervix
1368
+ 0_1366.png,Cervix
1369
+ 0_1367.png,Cervix
1370
+ 0_1368.png,Cervix
1371
+ 0_1369.png,Cervix
1372
+ 0_1370.png,Bile-duct
1373
+ 0_1371.png,Bile-duct
1374
+ 0_1372.png,Bile-duct
1375
+ 0_1373.png,Bile-duct
1376
+ 0_1374.png,Bile-duct
1377
+ 0_1375.png,Bile-duct
1378
+ 0_1376.png,Bile-duct
1379
+ 0_1377.png,Bile-duct
1380
+ 0_1378.png,Bile-duct
1381
+ 0_1379.png,Bile-duct
1382
+ 0_1380.png,Bile-duct
1383
+ 0_1381.png,Bile-duct
1384
+ 0_1382.png,Bile-duct
1385
+ 0_1383.png,Bile-duct
1386
+ 0_1384.png,Bile-duct
1387
+ 0_1385.png,Bile-duct
1388
+ 0_1386.png,Bile-duct
1389
+ 0_1387.png,Bile-duct
1390
+ 0_1388.png,Bile-duct
1391
+ 0_1389.png,Bile-duct
1392
+ 0_1390.png,Bile-duct
1393
+ 0_1391.png,Bile-duct
1394
+ 0_1392.png,Bile-duct
1395
+ 0_1393.png,Bile-duct
1396
+ 0_1394.png,Bile-duct
1397
+ 0_1395.png,Bile-duct
1398
+ 0_1396.png,Bile-duct
1399
+ 0_1397.png,Bile-duct
1400
+ 0_1398.png,Bile-duct
1401
+ 0_1399.png,Bile-duct
1402
+ 0_1400.png,Bile-duct
1403
+ 0_1401.png,Bile-duct
1404
+ 0_1402.png,Bile-duct
1405
+ 0_1403.png,Bile-duct
1406
+ 0_1404.png,Bile-duct
1407
+ 0_1405.png,Bile-duct
1408
+ 0_1406.png,Bile-duct
1409
+ 0_1407.png,Bile-duct
1410
+ 0_1408.png,Bile-duct
1411
+ 0_1409.png,Bile-duct
1412
+ 0_1410.png,Bile-duct
1413
+ 0_1411.png,Testis
1414
+ 0_1412.png,Testis
1415
+ 0_1413.png,Testis
1416
+ 0_1414.png,Testis
1417
+ 0_1415.png,Testis
1418
+ 0_1416.png,Testis
1419
+ 0_1417.png,Testis
1420
+ 0_1418.png,Testis
1421
+ 0_1419.png,Testis
1422
+ 0_1420.png,Testis
1423
+ 0_1421.png,Testis
1424
+ 0_1422.png,Testis
1425
+ 0_1423.png,Testis
1426
+ 0_1424.png,Testis
1427
+ 0_1425.png,Testis
1428
+ 0_1426.png,Testis
1429
+ 0_1427.png,Testis
1430
+ 0_1428.png,Testis
1431
+ 0_1429.png,Testis
1432
+ 0_1430.png,Testis
1433
+ 0_1431.png,Testis
1434
+ 0_1432.png,Testis
1435
+ 0_1433.png,Testis
1436
+ 0_1434.png,Testis
1437
+ 0_1435.png,Testis
1438
+ 0_1436.png,Testis
1439
+ 0_1437.png,Testis
1440
+ 0_1438.png,Testis
1441
+ 0_1439.png,Testis
1442
+ 0_1440.png,Testis
1443
+ 0_1441.png,Testis
1444
+ 0_1442.png,Testis
1445
+ 0_1443.png,Testis
1446
+ 0_1444.png,Testis
1447
+ 0_1445.png,Testis
1448
+ 0_1446.png,Bile-duct
1449
+ 0_1447.png,Bile-duct
1450
+ 0_1448.png,Bile-duct
1451
+ 0_1449.png,Bile-duct
1452
+ 0_1450.png,Bile-duct
1453
+ 0_1451.png,Bile-duct
1454
+ 0_1452.png,Bile-duct
1455
+ 0_1453.png,Bile-duct
1456
+ 0_1454.png,Bile-duct
1457
+ 0_1455.png,Bile-duct
1458
+ 0_1456.png,Bile-duct
1459
+ 0_1457.png,Bile-duct
1460
+ 0_1458.png,Bile-duct
1461
+ 0_1459.png,Bile-duct
1462
+ 0_1460.png,Bile-duct
1463
+ 0_1461.png,Bile-duct
1464
+ 0_1462.png,Bile-duct
1465
+ 0_1463.png,Bile-duct
1466
+ 0_1464.png,Bile-duct
1467
+ 0_1465.png,Bile-duct
1468
+ 0_1466.png,Bile-duct
1469
+ 0_1467.png,Bile-duct
1470
+ 0_1468.png,Bile-duct
1471
+ 0_1469.png,Bile-duct
1472
+ 0_1470.png,Bile-duct
1473
+ 0_1471.png,Bile-duct
1474
+ 0_1472.png,Bile-duct
1475
+ 0_1473.png,Bile-duct
1476
+ 0_1474.png,Bile-duct
1477
+ 0_1475.png,Bile-duct
1478
+ 0_1476.png,Bile-duct
1479
+ 0_1477.png,Bile-duct
1480
+ 0_1478.png,Bile-duct
1481
+ 0_1479.png,Colon
1482
+ 0_1480.png,Colon
1483
+ 0_1481.png,Colon
1484
+ 0_1482.png,Colon
1485
+ 0_1483.png,Colon
1486
+ 0_1484.png,Colon
1487
+ 0_1485.png,Colon
1488
+ 0_1486.png,Colon
1489
+ 0_1487.png,Colon
1490
+ 0_1488.png,Colon
1491
+ 0_1489.png,Colon
1492
+ 0_1490.png,Colon
1493
+ 0_1491.png,Colon
1494
+ 0_1492.png,Colon
1495
+ 0_1493.png,Colon
1496
+ 0_1494.png,Colon
1497
+ 0_1495.png,Colon
1498
+ 0_1496.png,Colon
1499
+ 0_1497.png,Colon
1500
+ 0_1498.png,Colon
1501
+ 0_1499.png,Colon
1502
+ 0_1500.png,Colon
1503
+ 0_1501.png,Colon
1504
+ 0_1502.png,Colon
1505
+ 0_1503.png,Colon
1506
+ 0_1504.png,Colon
1507
+ 0_1505.png,Colon
1508
+ 0_1506.png,Colon
1509
+ 0_1507.png,Colon
1510
+ 0_1508.png,Colon
1511
+ 0_1509.png,Colon
1512
+ 0_1510.png,Colon
1513
+ 0_1511.png,Colon
1514
+ 0_1512.png,Colon
1515
+ 0_1513.png,Colon
1516
+ 0_1514.png,Colon
1517
+ 0_1515.png,Colon
1518
+ 0_1516.png,Colon
1519
+ 0_1517.png,Colon
1520
+ 0_1518.png,Colon
1521
+ 0_1519.png,Colon
1522
+ 0_1520.png,Adrenal_gland
1523
+ 0_1521.png,Adrenal_gland
1524
+ 0_1522.png,Adrenal_gland
1525
+ 0_1523.png,Adrenal_gland
1526
+ 0_1524.png,Adrenal_gland
1527
+ 0_1525.png,Adrenal_gland
1528
+ 0_1526.png,Adrenal_gland
1529
+ 0_1527.png,Adrenal_gland
1530
+ 0_1528.png,Adrenal_gland
1531
+ 0_1529.png,Adrenal_gland
1532
+ 0_1530.png,Adrenal_gland
1533
+ 0_1531.png,Adrenal_gland
1534
+ 0_1532.png,Adrenal_gland
1535
+ 0_1533.png,Adrenal_gland
1536
+ 0_1534.png,Adrenal_gland
1537
+ 0_1535.png,Adrenal_gland
1538
+ 0_1536.png,Adrenal_gland
1539
+ 0_1537.png,Adrenal_gland
1540
+ 0_1538.png,Adrenal_gland
1541
+ 0_1539.png,Adrenal_gland
1542
+ 0_1540.png,Adrenal_gland
1543
+ 0_1541.png,Adrenal_gland
1544
+ 0_1542.png,Adrenal_gland
1545
+ 0_1543.png,Adrenal_gland
1546
+ 0_1544.png,Adrenal_gland
1547
+ 0_1545.png,Adrenal_gland
1548
+ 0_1546.png,Adrenal_gland
1549
+ 0_1547.png,Adrenal_gland
1550
+ 0_1548.png,Adrenal_gland
1551
+ 0_1549.png,Adrenal_gland
1552
+ 0_1550.png,Adrenal_gland
1553
+ 0_1551.png,Adrenal_gland
1554
+ 0_1552.png,Adrenal_gland
1555
+ 0_1553.png,Adrenal_gland
1556
+ 0_1554.png,Adrenal_gland
1557
+ 0_1555.png,Adrenal_gland
1558
+ 0_1556.png,Adrenal_gland
1559
+ 0_1557.png,Adrenal_gland
1560
+ 0_1558.png,Adrenal_gland
1561
+ 0_1559.png,Adrenal_gland
1562
+ 0_1560.png,Adrenal_gland
1563
+ 0_1561.png,Adrenal_gland
1564
+ 0_1562.png,Adrenal_gland
1565
+ 0_1563.png,Adrenal_gland
1566
+ 0_1564.png,Adrenal_gland
1567
+ 0_1565.png,Adrenal_gland
1568
+ 0_1566.png,Adrenal_gland
1569
+ 0_1567.png,Adrenal_gland
1570
+ 0_1568.png,Adrenal_gland
1571
+ 0_1569.png,Adrenal_gland
1572
+ 0_1570.png,Adrenal_gland
1573
+ 0_1571.png,Adrenal_gland
1574
+ 0_1572.png,Adrenal_gland
1575
+ 0_1573.png,Adrenal_gland
1576
+ 0_1574.png,Adrenal_gland
1577
+ 0_1575.png,Bile-duct
1578
+ 0_1576.png,Bile-duct
1579
+ 0_1577.png,Bile-duct
1580
+ 0_1578.png,Bile-duct
1581
+ 0_1579.png,Bile-duct
1582
+ 0_1580.png,Bile-duct
1583
+ 0_1581.png,Bile-duct
1584
+ 0_1582.png,Bile-duct
1585
+ 0_1583.png,Bile-duct
1586
+ 0_1584.png,Bile-duct
1587
+ 0_1585.png,Bile-duct
1588
+ 0_1586.png,Bile-duct
1589
+ 0_1587.png,Bile-duct
1590
+ 0_1588.png,Bile-duct
1591
+ 0_1589.png,Bile-duct
1592
+ 0_1590.png,Bile-duct
1593
+ 0_1591.png,Bile-duct
1594
+ 0_1592.png,Bile-duct
1595
+ 0_1593.png,Bile-duct
1596
+ 0_1594.png,Bile-duct
1597
+ 0_1595.png,Bile-duct
1598
+ 0_1596.png,Bile-duct
1599
+ 0_1597.png,Bile-duct
1600
+ 0_1598.png,Bile-duct
1601
+ 0_1599.png,Bile-duct
1602
+ 0_1600.png,Bile-duct
1603
+ 0_1601.png,Bile-duct
1604
+ 0_1602.png,Bile-duct
1605
+ 0_1603.png,Bile-duct
1606
+ 0_1604.png,Bile-duct
1607
+ 0_1605.png,Bile-duct
1608
+ 0_1606.png,Bile-duct
1609
+ 0_1607.png,Bile-duct
1610
+ 0_1608.png,Bile-duct
1611
+ 0_1609.png,Bile-duct
1612
+ 0_1610.png,Bile-duct
1613
+ 0_1611.png,Bile-duct
1614
+ 0_1612.png,Bile-duct
1615
+ 0_1613.png,Bile-duct
1616
+ 0_1614.png,Bile-duct
1617
+ 0_1615.png,Bile-duct
1618
+ 0_1616.png,Bile-duct
1619
+ 0_1617.png,Bile-duct
1620
+ 0_1618.png,Bile-duct
1621
+ 0_1619.png,Bile-duct
1622
+ 0_1620.png,Bile-duct
1623
+ 0_1621.png,Bile-duct
1624
+ 0_1622.png,Bile-duct
1625
+ 0_1623.png,Bile-duct
1626
+ 0_1624.png,Bile-duct
1627
+ 0_1625.png,Bile-duct
1628
+ 0_1626.png,Bile-duct
1629
+ 0_1627.png,Bile-duct
1630
+ 0_1628.png,Bile-duct
1631
+ 0_1629.png,Bile-duct
1632
+ 0_1630.png,Bile-duct
1633
+ 0_1631.png,Bile-duct
1634
+ 0_1632.png,Bile-duct
1635
+ 0_1633.png,Bile-duct
1636
+ 0_1634.png,Bile-duct
1637
+ 0_1635.png,Bile-duct
1638
+ 0_1636.png,Bile-duct
1639
+ 0_1637.png,Bile-duct
1640
+ 0_1638.png,Bile-duct
1641
+ 0_1639.png,Bile-duct
1642
+ 0_1640.png,Bile-duct
1643
+ 0_1641.png,Bile-duct
1644
+ 0_1642.png,Bile-duct
1645
+ 0_1643.png,Bile-duct
1646
+ 0_1644.png,Bile-duct
1647
+ 0_1645.png,Bile-duct
1648
+ 0_1646.png,Bile-duct
1649
+ 0_1647.png,Bile-duct
1650
+ 0_1648.png,Bile-duct
1651
+ 0_1649.png,Bile-duct
1652
+ 0_1650.png,Bile-duct
1653
+ 0_1651.png,Bile-duct
1654
+ 0_1652.png,Bile-duct
1655
+ 0_1653.png,Bile-duct
1656
+ 0_1654.png,Bile-duct
1657
+ 0_1655.png,Bile-duct
1658
+ 0_1656.png,Bile-duct
1659
+ 0_1657.png,Bile-duct
1660
+ 0_1658.png,Bile-duct
1661
+ 0_1659.png,Bile-duct
1662
+ 0_1660.png,Bladder
1663
+ 0_1661.png,Bladder
1664
+ 0_1662.png,Bladder
1665
+ 0_1663.png,Bladder
1666
+ 0_1664.png,Bladder
1667
+ 0_1665.png,Bladder
1668
+ 0_1666.png,Bladder
1669
+ 0_1667.png,Bladder
1670
+ 0_1668.png,Bladder
1671
+ 0_1669.png,Bladder
1672
+ 0_1670.png,Bladder
1673
+ 0_1671.png,Bladder
1674
+ 0_1672.png,Bladder
1675
+ 0_1673.png,Bladder
1676
+ 0_1674.png,Bladder
1677
+ 0_1675.png,Bladder
1678
+ 0_1676.png,Bladder
1679
+ 0_1677.png,Bladder
1680
+ 0_1678.png,Bladder
1681
+ 0_1679.png,Breast
1682
+ 0_1680.png,Breast
1683
+ 0_1681.png,Breast
1684
+ 0_1682.png,Breast
1685
+ 0_1683.png,Breast
1686
+ 0_1684.png,Breast
1687
+ 0_1685.png,Breast
1688
+ 0_1686.png,Breast
1689
+ 0_1687.png,Breast
1690
+ 0_1688.png,Breast
1691
+ 0_1689.png,Breast
1692
+ 0_1690.png,Breast
1693
+ 0_1691.png,Breast
1694
+ 0_1692.png,Breast
1695
+ 0_1693.png,Breast
1696
+ 0_1694.png,Breast
1697
+ 0_1695.png,Breast
1698
+ 0_1696.png,Breast
1699
+ 0_1697.png,Breast
1700
+ 0_1698.png,Breast
1701
+ 0_1699.png,Breast
1702
+ 0_1700.png,Breast
1703
+ 0_1701.png,Breast
1704
+ 0_1702.png,Breast
1705
+ 0_1703.png,Breast
1706
+ 0_1704.png,Breast
1707
+ 0_1705.png,Breast
1708
+ 0_1706.png,Breast
1709
+ 0_1707.png,Breast
1710
+ 0_1708.png,Breast
1711
+ 0_1709.png,Breast
1712
+ 0_1710.png,Breast
1713
+ 0_1711.png,Breast
1714
+ 0_1712.png,Breast
1715
+ 0_1713.png,Breast
1716
+ 0_1714.png,Breast
1717
+ 0_1715.png,Breast
1718
+ 0_1716.png,Breast
1719
+ 0_1717.png,Breast
1720
+ 0_1718.png,Breast
1721
+ 0_1719.png,Breast
1722
+ 0_1720.png,Breast
1723
+ 0_1721.png,Breast
1724
+ 0_1722.png,Breast
1725
+ 0_1723.png,Breast
1726
+ 0_1724.png,Breast
1727
+ 0_1725.png,Breast
1728
+ 0_1726.png,Breast
1729
+ 0_1727.png,Breast
1730
+ 0_1728.png,Breast
1731
+ 0_1729.png,Breast
1732
+ 0_1730.png,Breast
1733
+ 0_1731.png,Breast
1734
+ 0_1732.png,Breast
1735
+ 0_1733.png,Breast
1736
+ 0_1734.png,Breast
1737
+ 0_1735.png,Breast
1738
+ 0_1736.png,Breast
1739
+ 0_1737.png,Breast
1740
+ 0_1738.png,Breast
1741
+ 0_1739.png,Breast
1742
+ 0_1740.png,Breast
1743
+ 0_1741.png,Breast
1744
+ 0_1742.png,Breast
1745
+ 0_1743.png,Breast
1746
+ 0_1744.png,Breast
1747
+ 0_1745.png,Breast
1748
+ 0_1746.png,Breast
1749
+ 0_1747.png,Breast
1750
+ 0_1748.png,Cervix
1751
+ 0_1749.png,Cervix
1752
+ 0_1750.png,Cervix
1753
+ 0_1751.png,Cervix
1754
+ 0_1752.png,Cervix
1755
+ 0_1753.png,Cervix
1756
+ 0_1754.png,Cervix
1757
+ 0_1755.png,Cervix
1758
+ 0_1756.png,Cervix
1759
+ 0_1757.png,Cervix
1760
+ 0_1758.png,Cervix
1761
+ 0_1759.png,Cervix
1762
+ 0_1760.png,Cervix
1763
+ 0_1761.png,Cervix
1764
+ 0_1762.png,Cervix
1765
+ 0_1763.png,Cervix
1766
+ 0_1764.png,Cervix
1767
+ 0_1765.png,Cervix
1768
+ 0_1766.png,Cervix
1769
+ 0_1767.png,Cervix
1770
+ 0_1768.png,Cervix
1771
+ 0_1769.png,Cervix
1772
+ 0_1770.png,Cervix
1773
+ 0_1771.png,Cervix
1774
+ 0_1772.png,Cervix
1775
+ 0_1773.png,Cervix
1776
+ 0_1774.png,Cervix
1777
+ 0_1775.png,Cervix
1778
+ 0_1776.png,Cervix
1779
+ 0_1777.png,Cervix
1780
+ 0_1778.png,Cervix
1781
+ 0_1779.png,Cervix
1782
+ 0_1780.png,Cervix
1783
+ 0_1781.png,Cervix
1784
+ 0_1782.png,Cervix
1785
+ 0_1783.png,Cervix
1786
+ 0_1784.png,Cervix
1787
+ 0_1785.png,Cervix
1788
+ 0_1786.png,Cervix
1789
+ 0_1787.png,Cervix
1790
+ 0_1788.png,Cervix
1791
+ 0_1789.png,Cervix
1792
+ 0_1790.png,Cervix
1793
+ 0_1791.png,Cervix
1794
+ 0_1792.png,Cervix
1795
+ 0_1793.png,Cervix
1796
+ 0_1794.png,Cervix
1797
+ 0_1795.png,Cervix
1798
+ 0_1796.png,Cervix
1799
+ 0_1797.png,Cervix
1800
+ 0_1798.png,Cervix
1801
+ 0_1799.png,Cervix
1802
+ 0_1800.png,Cervix
1803
+ 0_1801.png,Cervix
1804
+ 0_1802.png,Cervix
1805
+ 0_1803.png,Cervix
1806
+ 0_1804.png,Cervix
1807
+ 0_1805.png,Cervix
1808
+ 0_1806.png,Cervix
1809
+ 0_1807.png,Cervix
1810
+ 0_1808.png,Cervix
1811
+ 0_1809.png,Cervix
1812
+ 0_1810.png,Cervix
1813
+ 0_1811.png,Cervix
1814
+ 0_1812.png,Cervix
1815
+ 0_1813.png,Cervix
1816
+ 0_1814.png,Cervix
1817
+ 0_1815.png,Cervix
1818
+ 0_1816.png,Cervix
1819
+ 0_1817.png,Cervix
1820
+ 0_1818.png,Cervix
1821
+ 0_1819.png,Cervix
1822
+ 0_1820.png,Cervix
1823
+ 0_1821.png,Cervix
1824
+ 0_1822.png,Colon
1825
+ 0_1823.png,Colon
1826
+ 0_1824.png,Colon
1827
+ 0_1825.png,Colon
1828
+ 0_1826.png,Colon
1829
+ 0_1827.png,Colon
1830
+ 0_1828.png,Colon
1831
+ 0_1829.png,Colon
1832
+ 0_1830.png,Colon
1833
+ 0_1831.png,Colon
1834
+ 0_1832.png,Colon
1835
+ 0_1833.png,Colon
1836
+ 0_1834.png,Colon
1837
+ 0_1835.png,Colon
1838
+ 0_1836.png,Colon
1839
+ 0_1837.png,Colon
1840
+ 0_1838.png,Colon
1841
+ 0_1839.png,Colon
1842
+ 0_1840.png,Colon
1843
+ 0_1841.png,Colon
1844
+ 0_1842.png,Colon
1845
+ 0_1843.png,Colon
1846
+ 0_1844.png,Colon
1847
+ 0_1845.png,Colon
1848
+ 0_1846.png,Colon
1849
+ 0_1847.png,Colon
1850
+ 0_1848.png,Colon
1851
+ 0_1849.png,Colon
1852
+ 0_1850.png,Colon
1853
+ 0_1851.png,Colon
1854
+ 0_1852.png,Colon
1855
+ 0_1853.png,Colon
1856
+ 0_1854.png,Colon
1857
+ 0_1855.png,Colon
1858
+ 0_1856.png,Colon
1859
+ 0_1857.png,Colon
1860
+ 0_1858.png,Colon
1861
+ 0_1859.png,Colon
1862
+ 0_1860.png,Colon
1863
+ 0_1861.png,Colon
1864
+ 0_1862.png,Colon
1865
+ 0_1863.png,Colon
1866
+ 0_1864.png,Colon
1867
+ 0_1865.png,Colon
1868
+ 0_1866.png,Colon
1869
+ 0_1867.png,Colon
1870
+ 0_1868.png,Colon
1871
+ 0_1869.png,Colon
1872
+ 0_1870.png,Colon
1873
+ 0_1871.png,Colon
1874
+ 0_1872.png,Colon
1875
+ 0_1873.png,Colon
1876
+ 0_1874.png,Colon
1877
+ 0_1875.png,Colon
1878
+ 0_1876.png,Colon
1879
+ 0_1877.png,Colon
1880
+ 0_1878.png,Colon
1881
+ 0_1879.png,Colon
1882
+ 0_1880.png,Colon
1883
+ 0_1881.png,Colon
1884
+ 0_1882.png,Colon
1885
+ 0_1883.png,Colon
1886
+ 0_1884.png,Colon
1887
+ 0_1885.png,Colon
1888
+ 0_1886.png,Colon
1889
+ 0_1887.png,Colon
1890
+ 0_1888.png,Colon
1891
+ 0_1889.png,Colon
1892
+ 0_1890.png,Colon
1893
+ 0_1891.png,Colon
1894
+ 0_1892.png,Colon
1895
+ 0_1893.png,Colon
1896
+ 0_1894.png,Colon
1897
+ 0_1895.png,Colon
1898
+ 0_1896.png,Colon
1899
+ 0_1897.png,Colon
1900
+ 0_1898.png,Colon
1901
+ 0_1899.png,Colon
1902
+ 0_1900.png,Colon
1903
+ 0_1901.png,Colon
1904
+ 0_1902.png,Colon
1905
+ 0_1903.png,Colon
1906
+ 0_1904.png,Colon
1907
+ 0_1905.png,Colon
1908
+ 0_1906.png,Colon
1909
+ 0_1907.png,Colon
1910
+ 0_1908.png,Colon
1911
+ 0_1909.png,Colon
1912
+ 0_1910.png,Colon
1913
+ 0_1911.png,Colon
1914
+ 0_1912.png,Colon
1915
+ 0_1913.png,Colon
1916
+ 0_1914.png,Colon
1917
+ 0_1915.png,Colon
1918
+ 0_1916.png,Colon
1919
+ 0_1917.png,Colon
1920
+ 0_1918.png,Colon
1921
+ 0_1919.png,Colon
1922
+ 0_1920.png,Colon
1923
+ 0_1921.png,Colon
1924
+ 0_1922.png,Colon
1925
+ 0_1923.png,Colon
1926
+ 0_1924.png,Colon
1927
+ 0_1925.png,Colon
1928
+ 0_1926.png,Colon
1929
+ 0_1927.png,Colon
1930
+ 0_1928.png,Colon
1931
+ 0_1929.png,Colon
1932
+ 0_1930.png,Colon
1933
+ 0_1931.png,Colon
1934
+ 0_1932.png,Colon
1935
+ 0_1933.png,Colon
1936
+ 0_1934.png,Colon
1937
+ 0_1935.png,Colon
1938
+ 0_1936.png,Colon
1939
+ 0_1937.png,Colon
1940
+ 0_1938.png,Colon
1941
+ 0_1939.png,Colon
1942
+ 0_1940.png,Colon
1943
+ 0_1941.png,Colon
1944
+ 0_1942.png,Colon
1945
+ 0_1943.png,Colon
1946
+ 0_1944.png,Colon
1947
+ 0_1945.png,Colon
1948
+ 0_1946.png,Colon
1949
+ 0_1947.png,Colon
1950
+ 0_1948.png,Colon
1951
+ 0_1949.png,Colon
1952
+ 0_1950.png,Colon
1953
+ 0_1951.png,Colon
1954
+ 0_1952.png,Colon
1955
+ 0_1953.png,Colon
1956
+ 0_1954.png,Colon
1957
+ 0_1955.png,Colon
1958
+ 0_1956.png,Colon
1959
+ 0_1957.png,Colon
1960
+ 0_1958.png,Colon
1961
+ 0_1959.png,Colon
1962
+ 0_1960.png,Colon
1963
+ 0_1961.png,Colon
1964
+ 0_1962.png,Colon
1965
+ 0_1963.png,Colon
1966
+ 0_1964.png,Colon
1967
+ 0_1965.png,Colon
1968
+ 0_1966.png,Colon
1969
+ 0_1967.png,Colon
1970
+ 0_1968.png,Colon
1971
+ 0_1969.png,Colon
1972
+ 0_1970.png,Colon
1973
+ 0_1971.png,Colon
1974
+ 0_1972.png,Colon
1975
+ 0_1973.png,Colon
1976
+ 0_1974.png,Colon
1977
+ 0_1975.png,Colon
1978
+ 0_1976.png,Colon
1979
+ 0_1977.png,Colon
1980
+ 0_1978.png,Colon
1981
+ 0_1979.png,Colon
1982
+ 0_1980.png,Colon
1983
+ 0_1981.png,Colon
1984
+ 0_1982.png,Colon
1985
+ 0_1983.png,Colon
1986
+ 0_1984.png,Colon
1987
+ 0_1985.png,Colon
1988
+ 0_1986.png,Colon
1989
+ 0_1987.png,Colon
1990
+ 0_1988.png,Colon
1991
+ 0_1989.png,Colon
1992
+ 0_1990.png,Colon
1993
+ 0_1991.png,Colon
1994
+ 0_1992.png,Colon
1995
+ 0_1993.png,Colon
1996
+ 0_1994.png,Colon
1997
+ 0_1995.png,Colon
1998
+ 0_1996.png,Colon
1999
+ 0_1997.png,Colon
2000
+ 0_1998.png,Colon
2001
+ 0_1999.png,Colon
2002
+ 0_2000.png,Colon
2003
+ 0_2001.png,Colon
2004
+ 0_2002.png,Colon
2005
+ 0_2003.png,Colon
2006
+ 0_2004.png,Colon
2007
+ 0_2005.png,Colon
2008
+ 0_2006.png,Colon
2009
+ 0_2007.png,Colon
2010
+ 0_2008.png,Colon
2011
+ 0_2009.png,Colon
2012
+ 0_2010.png,Colon
2013
+ 0_2011.png,Colon
2014
+ 0_2012.png,Colon
2015
+ 0_2013.png,Colon
2016
+ 0_2014.png,Colon
2017
+ 0_2015.png,Colon
2018
+ 0_2016.png,Colon
2019
+ 0_2017.png,Colon
2020
+ 0_2018.png,Colon
2021
+ 0_2019.png,Colon
2022
+ 0_2020.png,Colon
2023
+ 0_2021.png,Colon
2024
+ 0_2022.png,Colon
2025
+ 0_2023.png,Colon
2026
+ 0_2024.png,Colon
2027
+ 0_2025.png,Colon
2028
+ 0_2026.png,Colon
2029
+ 0_2027.png,Colon
2030
+ 0_2028.png,Colon
2031
+ 0_2029.png,Colon
2032
+ 0_2030.png,Colon
2033
+ 0_2031.png,Colon
2034
+ 0_2032.png,Colon
2035
+ 0_2033.png,Colon
2036
+ 0_2034.png,Colon
2037
+ 0_2035.png,Colon
2038
+ 0_2036.png,Colon
2039
+ 0_2037.png,Colon
2040
+ 0_2038.png,Colon
2041
+ 0_2039.png,Colon
2042
+ 0_2040.png,Colon
2043
+ 0_2041.png,Colon
2044
+ 0_2042.png,Colon
2045
+ 0_2043.png,Colon
2046
+ 0_2044.png,Colon
2047
+ 0_2045.png,Colon
2048
+ 0_2046.png,Colon
2049
+ 0_2047.png,Colon
2050
+ 0_2048.png,Colon
2051
+ 0_2049.png,Colon
2052
+ 0_2050.png,Colon
2053
+ 0_2051.png,Colon
2054
+ 0_2052.png,Colon
2055
+ 0_2053.png,Colon
2056
+ 0_2054.png,Colon
2057
+ 0_2055.png,Colon
2058
+ 0_2056.png,Colon
2059
+ 0_2057.png,Colon
2060
+ 0_2058.png,Colon
2061
+ 0_2059.png,Colon
2062
+ 0_2060.png,Esophagus
2063
+ 0_2061.png,Esophagus
2064
+ 0_2062.png,Esophagus
2065
+ 0_2063.png,Esophagus
2066
+ 0_2064.png,Esophagus
2067
+ 0_2065.png,Esophagus
2068
+ 0_2066.png,Esophagus
2069
+ 0_2067.png,Esophagus
2070
+ 0_2068.png,Esophagus
2071
+ 0_2069.png,Esophagus
2072
+ 0_2070.png,Esophagus
2073
+ 0_2071.png,Esophagus
2074
+ 0_2072.png,Esophagus
2075
+ 0_2073.png,Esophagus
2076
+ 0_2074.png,Esophagus
2077
+ 0_2075.png,Esophagus
2078
+ 0_2076.png,Esophagus
2079
+ 0_2077.png,Esophagus
2080
+ 0_2078.png,Esophagus
2081
+ 0_2079.png,Esophagus
2082
+ 0_2080.png,Esophagus
2083
+ 0_2081.png,Esophagus
2084
+ 0_2082.png,Esophagus
2085
+ 0_2083.png,Esophagus
2086
+ 0_2084.png,Esophagus
2087
+ 0_2085.png,Esophagus
2088
+ 0_2086.png,Esophagus
2089
+ 0_2087.png,Esophagus
2090
+ 0_2088.png,Esophagus
2091
+ 0_2089.png,Esophagus
2092
+ 0_2090.png,Esophagus
2093
+ 0_2091.png,Esophagus
2094
+ 0_2092.png,Esophagus
2095
+ 0_2093.png,Esophagus
2096
+ 0_2094.png,Esophagus
2097
+ 0_2095.png,Esophagus
2098
+ 0_2096.png,Esophagus
2099
+ 0_2097.png,Esophagus
2100
+ 0_2098.png,HeadNeck
2101
+ 0_2099.png,HeadNeck
2102
+ 0_2100.png,HeadNeck
2103
+ 0_2101.png,HeadNeck
2104
+ 0_2102.png,HeadNeck
2105
+ 0_2103.png,HeadNeck
2106
+ 0_2104.png,HeadNeck
2107
+ 0_2105.png,HeadNeck
2108
+ 0_2106.png,HeadNeck
2109
+ 0_2107.png,HeadNeck
2110
+ 0_2108.png,HeadNeck
2111
+ 0_2109.png,HeadNeck
2112
+ 0_2110.png,HeadNeck
2113
+ 0_2111.png,HeadNeck
2114
+ 0_2112.png,HeadNeck
2115
+ 0_2113.png,HeadNeck
2116
+ 0_2114.png,HeadNeck
2117
+ 0_2115.png,HeadNeck
2118
+ 0_2116.png,HeadNeck
2119
+ 0_2117.png,HeadNeck
2120
+ 0_2118.png,HeadNeck
2121
+ 0_2119.png,HeadNeck
2122
+ 0_2120.png,HeadNeck
2123
+ 0_2121.png,HeadNeck
2124
+ 0_2122.png,HeadNeck
2125
+ 0_2123.png,HeadNeck
2126
+ 0_2124.png,HeadNeck
2127
+ 0_2125.png,HeadNeck
2128
+ 0_2126.png,HeadNeck
2129
+ 0_2127.png,HeadNeck
2130
+ 0_2128.png,HeadNeck
2131
+ 0_2129.png,HeadNeck
2132
+ 0_2130.png,HeadNeck
2133
+ 0_2131.png,HeadNeck
2134
+ 0_2132.png,HeadNeck
2135
+ 0_2133.png,HeadNeck
2136
+ 0_2134.png,HeadNeck
2137
+ 0_2135.png,HeadNeck
2138
+ 0_2136.png,HeadNeck
2139
+ 0_2137.png,HeadNeck
2140
+ 0_2138.png,HeadNeck
2141
+ 0_2139.png,HeadNeck
2142
+ 0_2140.png,HeadNeck
2143
+ 0_2141.png,HeadNeck
2144
+ 0_2142.png,HeadNeck
2145
+ 0_2143.png,HeadNeck
2146
+ 0_2144.png,HeadNeck
2147
+ 0_2145.png,HeadNeck
2148
+ 0_2146.png,HeadNeck
2149
+ 0_2147.png,HeadNeck
2150
+ 0_2148.png,HeadNeck
2151
+ 0_2149.png,HeadNeck
2152
+ 0_2150.png,HeadNeck
2153
+ 0_2151.png,HeadNeck
2154
+ 0_2152.png,HeadNeck
2155
+ 0_2153.png,HeadNeck
2156
+ 0_2154.png,HeadNeck
2157
+ 0_2155.png,HeadNeck
2158
+ 0_2156.png,HeadNeck
2159
+ 0_2157.png,HeadNeck
2160
+ 0_2158.png,HeadNeck
2161
+ 0_2159.png,HeadNeck
2162
+ 0_2160.png,HeadNeck
2163
+ 0_2161.png,HeadNeck
2164
+ 0_2162.png,HeadNeck
2165
+ 0_2163.png,HeadNeck
2166
+ 0_2164.png,HeadNeck
2167
+ 0_2165.png,HeadNeck
2168
+ 0_2166.png,HeadNeck
2169
+ 0_2167.png,HeadNeck
2170
+ 0_2168.png,Kidney
2171
+ 0_2169.png,Kidney
2172
+ 0_2170.png,Kidney
2173
+ 0_2171.png,Kidney
2174
+ 0_2172.png,Kidney
2175
+ 0_2173.png,Kidney
2176
+ 0_2174.png,Kidney
2177
+ 0_2175.png,Kidney
2178
+ 0_2176.png,Kidney
2179
+ 0_2177.png,Kidney
2180
+ 0_2178.png,Kidney
2181
+ 0_2179.png,Kidney
2182
+ 0_2180.png,Kidney
2183
+ 0_2181.png,Kidney
2184
+ 0_2182.png,Kidney
2185
+ 0_2183.png,Kidney
2186
+ 0_2184.png,Kidney
2187
+ 0_2185.png,Kidney
2188
+ 0_2186.png,Kidney
2189
+ 0_2187.png,Kidney
2190
+ 0_2188.png,Kidney
2191
+ 0_2189.png,Liver
2192
+ 0_2190.png,Liver
2193
+ 0_2191.png,Liver
2194
+ 0_2192.png,Liver
2195
+ 0_2193.png,Liver
2196
+ 0_2194.png,Liver
2197
+ 0_2195.png,Liver
2198
+ 0_2196.png,Liver
2199
+ 0_2197.png,Liver
2200
+ 0_2198.png,Liver
2201
+ 0_2199.png,Liver
2202
+ 0_2200.png,Liver
2203
+ 0_2201.png,Liver
2204
+ 0_2202.png,Liver
2205
+ 0_2203.png,Liver
2206
+ 0_2204.png,Liver
2207
+ 0_2205.png,Liver
2208
+ 0_2206.png,Liver
2209
+ 0_2207.png,Liver
2210
+ 0_2208.png,Liver
2211
+ 0_2209.png,Liver
2212
+ 0_2210.png,Liver
2213
+ 0_2211.png,Liver
2214
+ 0_2212.png,Liver
2215
+ 0_2213.png,Liver
2216
+ 0_2214.png,Liver
2217
+ 0_2215.png,Liver
2218
+ 0_2216.png,Liver
2219
+ 0_2217.png,Liver
2220
+ 0_2218.png,Liver
2221
+ 0_2219.png,Liver
2222
+ 0_2220.png,Liver
2223
+ 0_2221.png,Liver
2224
+ 0_2222.png,Liver
2225
+ 0_2223.png,Liver
2226
+ 0_2224.png,Liver
2227
+ 0_2225.png,Liver
2228
+ 0_2226.png,Liver
2229
+ 0_2227.png,Liver
2230
+ 0_2228.png,Liver
2231
+ 0_2229.png,Liver
2232
+ 0_2230.png,Liver
2233
+ 0_2231.png,Liver
2234
+ 0_2232.png,Liver
2235
+ 0_2233.png,Liver
2236
+ 0_2234.png,Liver
2237
+ 0_2235.png,Liver
2238
+ 0_2236.png,Liver
2239
+ 0_2237.png,Liver
2240
+ 0_2238.png,Liver
2241
+ 0_2239.png,Liver
2242
+ 0_2240.png,Liver
2243
+ 0_2241.png,Liver
2244
+ 0_2242.png,Liver
2245
+ 0_2243.png,Liver
2246
+ 0_2244.png,Liver
2247
+ 0_2245.png,Liver
2248
+ 0_2246.png,Liver
2249
+ 0_2247.png,Liver
2250
+ 0_2248.png,Liver
2251
+ 0_2249.png,Liver
2252
+ 0_2250.png,Liver
2253
+ 0_2251.png,Liver
2254
+ 0_2252.png,Liver
2255
+ 0_2253.png,Liver
2256
+ 0_2254.png,Lung
2257
+ 0_2255.png,Lung
2258
+ 0_2256.png,Lung
2259
+ 0_2257.png,Lung
2260
+ 0_2258.png,Lung
2261
+ 0_2259.png,Lung
2262
+ 0_2260.png,Lung
2263
+ 0_2261.png,Lung
2264
+ 0_2262.png,Lung
2265
+ 0_2263.png,Lung
2266
+ 0_2264.png,Lung
2267
+ 0_2265.png,Lung
2268
+ 0_2266.png,Lung
2269
+ 0_2267.png,Lung
2270
+ 0_2268.png,Ovarian
2271
+ 0_2269.png,Ovarian
2272
+ 0_2270.png,Ovarian
2273
+ 0_2271.png,Ovarian
2274
+ 0_2272.png,Ovarian
2275
+ 0_2273.png,Ovarian
2276
+ 0_2274.png,Ovarian
2277
+ 0_2275.png,Ovarian
2278
+ 0_2276.png,Ovarian
2279
+ 0_2277.png,Ovarian
2280
+ 0_2278.png,Ovarian
2281
+ 0_2279.png,Ovarian
2282
+ 0_2280.png,Ovarian
2283
+ 0_2281.png,Ovarian
2284
+ 0_2282.png,Ovarian
2285
+ 0_2283.png,Ovarian
2286
+ 0_2284.png,Ovarian
2287
+ 0_2285.png,Ovarian
2288
+ 0_2286.png,Ovarian
2289
+ 0_2287.png,Ovarian
2290
+ 0_2288.png,Ovarian
2291
+ 0_2289.png,Ovarian
2292
+ 0_2290.png,Ovarian
2293
+ 0_2291.png,Ovarian
2294
+ 0_2292.png,Ovarian
2295
+ 0_2293.png,Ovarian
2296
+ 0_2294.png,Ovarian
2297
+ 0_2295.png,Ovarian
2298
+ 0_2296.png,Ovarian
2299
+ 0_2297.png,Ovarian
2300
+ 0_2298.png,Ovarian
2301
+ 0_2299.png,Ovarian
2302
+ 0_2300.png,Ovarian
2303
+ 0_2301.png,Ovarian
2304
+ 0_2302.png,Ovarian
2305
+ 0_2303.png,Ovarian
2306
+ 0_2304.png,Ovarian
2307
+ 0_2305.png,Ovarian
2308
+ 0_2306.png,Ovarian
2309
+ 0_2307.png,Ovarian
2310
+ 0_2308.png,Ovarian
2311
+ 0_2309.png,Ovarian
2312
+ 0_2310.png,Ovarian
2313
+ 0_2311.png,Ovarian
2314
+ 0_2312.png,Ovarian
2315
+ 0_2313.png,Pancreatic
2316
+ 0_2314.png,Pancreatic
2317
+ 0_2315.png,Pancreatic
2318
+ 0_2316.png,Pancreatic
2319
+ 0_2317.png,Pancreatic
2320
+ 0_2318.png,Pancreatic
2321
+ 0_2319.png,Pancreatic
2322
+ 0_2320.png,Pancreatic
2323
+ 0_2321.png,Pancreatic
2324
+ 0_2322.png,Pancreatic
2325
+ 0_2323.png,Pancreatic
2326
+ 0_2324.png,Pancreatic
2327
+ 0_2325.png,Pancreatic
2328
+ 0_2326.png,Pancreatic
2329
+ 0_2327.png,Pancreatic
2330
+ 0_2328.png,Pancreatic
2331
+ 0_2329.png,Pancreatic
2332
+ 0_2330.png,Pancreatic
2333
+ 0_2331.png,Pancreatic
2334
+ 0_2332.png,Pancreatic
2335
+ 0_2333.png,Pancreatic
2336
+ 0_2334.png,Pancreatic
2337
+ 0_2335.png,Pancreatic
2338
+ 0_2336.png,Pancreatic
2339
+ 0_2337.png,Pancreatic
2340
+ 0_2338.png,Pancreatic
2341
+ 0_2339.png,Pancreatic
2342
+ 0_2340.png,Pancreatic
2343
+ 0_2341.png,Pancreatic
2344
+ 0_2342.png,Pancreatic
2345
+ 0_2343.png,Pancreatic
2346
+ 0_2344.png,Pancreatic
2347
+ 0_2345.png,Pancreatic
2348
+ 0_2346.png,Pancreatic
2349
+ 0_2347.png,Pancreatic
2350
+ 0_2348.png,Pancreatic
2351
+ 0_2349.png,Pancreatic
2352
+ 0_2350.png,Pancreatic
2353
+ 0_2351.png,Pancreatic
2354
+ 0_2352.png,Pancreatic
2355
+ 0_2353.png,Pancreatic
2356
+ 0_2354.png,Pancreatic
2357
+ 0_2355.png,Pancreatic
2358
+ 0_2356.png,Pancreatic
2359
+ 0_2357.png,Pancreatic
2360
+ 0_2358.png,Pancreatic
2361
+ 0_2359.png,Pancreatic
2362
+ 0_2360.png,Prostate
2363
+ 0_2361.png,Prostate
2364
+ 0_2362.png,Prostate
2365
+ 0_2363.png,Prostate
2366
+ 0_2364.png,Prostate
2367
+ 0_2365.png,Prostate
2368
+ 0_2366.png,Prostate
2369
+ 0_2367.png,Prostate
2370
+ 0_2368.png,Prostate
2371
+ 0_2369.png,Prostate
2372
+ 0_2370.png,Prostate
2373
+ 0_2371.png,Prostate
2374
+ 0_2372.png,Prostate
2375
+ 0_2373.png,Prostate
2376
+ 0_2374.png,Prostate
2377
+ 0_2375.png,Prostate
2378
+ 0_2376.png,Prostate
2379
+ 0_2377.png,Prostate
2380
+ 0_2378.png,Prostate
2381
+ 0_2379.png,Prostate
2382
+ 0_2380.png,Prostate
2383
+ 0_2381.png,Prostate
2384
+ 0_2382.png,Prostate
2385
+ 0_2383.png,Prostate
2386
+ 0_2384.png,Prostate
2387
+ 0_2385.png,Prostate
2388
+ 0_2386.png,Prostate
2389
+ 0_2387.png,Prostate
2390
+ 0_2388.png,Prostate
2391
+ 0_2389.png,Prostate
2392
+ 0_2390.png,Prostate
2393
+ 0_2391.png,Prostate
2394
+ 0_2392.png,Prostate
2395
+ 0_2393.png,Prostate
2396
+ 0_2394.png,Prostate
2397
+ 0_2395.png,Prostate
2398
+ 0_2396.png,Prostate
2399
+ 0_2397.png,Prostate
2400
+ 0_2398.png,Prostate
2401
+ 0_2399.png,Prostate
2402
+ 0_2400.png,Prostate
2403
+ 0_2401.png,Prostate
2404
+ 0_2402.png,Prostate
2405
+ 0_2403.png,Prostate
2406
+ 0_2404.png,Prostate
2407
+ 0_2405.png,Prostate
2408
+ 0_2406.png,Prostate
2409
+ 0_2407.png,Prostate
2410
+ 0_2408.png,Prostate
2411
+ 0_2409.png,Prostate
2412
+ 0_2410.png,Prostate
2413
+ 0_2411.png,Prostate
2414
+ 0_2412.png,Prostate
2415
+ 0_2413.png,Skin
2416
+ 0_2414.png,Skin
2417
+ 0_2415.png,Skin
2418
+ 0_2416.png,Skin
2419
+ 0_2417.png,Skin
2420
+ 0_2418.png,Skin
2421
+ 0_2419.png,Skin
2422
+ 0_2420.png,Skin
2423
+ 0_2421.png,Skin
2424
+ 0_2422.png,Skin
2425
+ 0_2423.png,Skin
2426
+ 0_2424.png,Skin
2427
+ 0_2425.png,Skin
2428
+ 0_2426.png,Skin
2429
+ 0_2427.png,Skin
2430
+ 0_2428.png,Skin
2431
+ 0_2429.png,Skin
2432
+ 0_2430.png,Skin
2433
+ 0_2431.png,Skin
2434
+ 0_2432.png,Skin
2435
+ 0_2433.png,Skin
2436
+ 0_2434.png,Skin
2437
+ 0_2435.png,Skin
2438
+ 0_2436.png,Skin
2439
+ 0_2437.png,Skin
2440
+ 0_2438.png,Skin
2441
+ 0_2439.png,Skin
2442
+ 0_2440.png,Skin
2443
+ 0_2441.png,Skin
2444
+ 0_2442.png,Skin
2445
+ 0_2443.png,Skin
2446
+ 0_2444.png,Skin
2447
+ 0_2445.png,Skin
2448
+ 0_2446.png,Skin
2449
+ 0_2447.png,Skin
2450
+ 0_2448.png,Skin
2451
+ 0_2449.png,Skin
2452
+ 0_2450.png,Skin
2453
+ 0_2451.png,Skin
2454
+ 0_2452.png,Skin
2455
+ 0_2453.png,Skin
2456
+ 0_2454.png,Skin
2457
+ 0_2455.png,Skin
2458
+ 0_2456.png,Skin
2459
+ 0_2457.png,Skin
2460
+ 0_2458.png,Skin
2461
+ 0_2459.png,Skin
2462
+ 0_2460.png,Skin
2463
+ 0_2461.png,Skin
2464
+ 0_2462.png,Skin
2465
+ 0_2463.png,Skin
2466
+ 0_2464.png,Skin
2467
+ 0_2465.png,Skin
2468
+ 0_2466.png,Stomach
2469
+ 0_2467.png,Stomach
2470
+ 0_2468.png,Stomach
2471
+ 0_2469.png,Stomach
2472
+ 0_2470.png,Stomach
2473
+ 0_2471.png,Stomach
2474
+ 0_2472.png,Stomach
2475
+ 0_2473.png,Stomach
2476
+ 0_2474.png,Stomach
2477
+ 0_2475.png,Stomach
2478
+ 0_2476.png,Stomach
2479
+ 0_2477.png,Stomach
2480
+ 0_2478.png,Stomach
2481
+ 0_2479.png,Stomach
2482
+ 0_2480.png,Stomach
2483
+ 0_2481.png,Stomach
2484
+ 0_2482.png,Stomach
2485
+ 0_2483.png,Stomach
2486
+ 0_2484.png,Stomach
2487
+ 0_2485.png,Stomach
2488
+ 0_2486.png,Stomach
2489
+ 0_2487.png,Stomach
2490
+ 0_2488.png,Stomach
2491
+ 0_2489.png,Stomach
2492
+ 0_2490.png,Stomach
2493
+ 0_2491.png,Stomach
2494
+ 0_2492.png,Stomach
2495
+ 0_2493.png,Stomach
2496
+ 0_2494.png,Stomach
2497
+ 0_2495.png,Stomach
2498
+ 0_2496.png,Stomach
2499
+ 0_2497.png,Stomach
2500
+ 0_2498.png,Stomach
2501
+ 0_2499.png,Stomach
2502
+ 0_2500.png,Stomach
2503
+ 0_2501.png,Stomach
2504
+ 0_2502.png,Stomach
2505
+ 0_2503.png,Stomach
2506
+ 0_2504.png,Stomach
2507
+ 0_2505.png,Stomach
2508
+ 0_2506.png,Stomach
2509
+ 0_2507.png,Stomach
2510
+ 0_2508.png,Stomach
2511
+ 0_2509.png,Stomach
2512
+ 0_2510.png,Stomach
2513
+ 0_2511.png,Stomach
2514
+ 0_2512.png,Testis
2515
+ 0_2513.png,Testis
2516
+ 0_2514.png,Testis
2517
+ 0_2515.png,Testis
2518
+ 0_2516.png,Testis
2519
+ 0_2517.png,Testis
2520
+ 0_2518.png,Testis
2521
+ 0_2519.png,Testis
2522
+ 0_2520.png,Testis
2523
+ 0_2521.png,Testis
2524
+ 0_2522.png,Testis
2525
+ 0_2523.png,Testis
2526
+ 0_2524.png,Testis
2527
+ 0_2525.png,Testis
2528
+ 0_2526.png,Testis
2529
+ 0_2527.png,Testis
2530
+ 0_2528.png,Testis
2531
+ 0_2529.png,Testis
2532
+ 0_2530.png,Testis
2533
+ 0_2531.png,Testis
2534
+ 0_2532.png,Testis
2535
+ 0_2533.png,Testis
2536
+ 0_2534.png,Testis
2537
+ 0_2535.png,Testis
2538
+ 0_2536.png,Testis
2539
+ 0_2537.png,Thyroid
2540
+ 0_2538.png,Thyroid
2541
+ 0_2539.png,Thyroid
2542
+ 0_2540.png,Thyroid
2543
+ 0_2541.png,Thyroid
2544
+ 0_2542.png,Thyroid
2545
+ 0_2543.png,Thyroid
2546
+ 0_2544.png,Thyroid
2547
+ 0_2545.png,Thyroid
2548
+ 0_2546.png,Thyroid
2549
+ 0_2547.png,Thyroid
2550
+ 0_2548.png,Thyroid
2551
+ 0_2549.png,Thyroid
2552
+ 0_2550.png,Thyroid
2553
+ 0_2551.png,Thyroid
2554
+ 0_2552.png,Thyroid
2555
+ 0_2553.png,Thyroid
2556
+ 0_2554.png,Thyroid
2557
+ 0_2555.png,Thyroid
2558
+ 0_2556.png,Thyroid
2559
+ 0_2557.png,Thyroid
2560
+ 0_2558.png,Thyroid
2561
+ 0_2559.png,Thyroid
2562
+ 0_2560.png,Thyroid
2563
+ 0_2561.png,Thyroid
2564
+ 0_2562.png,Thyroid
2565
+ 0_2563.png,Thyroid
2566
+ 0_2564.png,Thyroid
2567
+ 0_2565.png,Thyroid
2568
+ 0_2566.png,Thyroid
2569
+ 0_2567.png,Thyroid
2570
+ 0_2568.png,Thyroid
2571
+ 0_2569.png,Thyroid
2572
+ 0_2570.png,Thyroid
2573
+ 0_2571.png,Thyroid
2574
+ 0_2572.png,Thyroid
2575
+ 0_2573.png,Thyroid
2576
+ 0_2574.png,Thyroid
2577
+ 0_2575.png,Thyroid
2578
+ 0_2576.png,Thyroid
2579
+ 0_2577.png,Thyroid
2580
+ 0_2578.png,Uterus
2581
+ 0_2579.png,Uterus
2582
+ 0_2580.png,Uterus
2583
+ 0_2581.png,Uterus
2584
+ 0_2582.png,Uterus
2585
+ 0_2583.png,Uterus
2586
+ 0_2584.png,Uterus
2587
+ 0_2585.png,Uterus
2588
+ 0_2586.png,Uterus
2589
+ 0_2587.png,Uterus
2590
+ 0_2588.png,Uterus
2591
+ 0_2589.png,Uterus
2592
+ 0_2590.png,Uterus
2593
+ 0_2591.png,Uterus
2594
+ 0_2592.png,Colon
2595
+ 0_2593.png,Colon
2596
+ 0_2594.png,Colon
2597
+ 0_2595.png,Colon
2598
+ 0_2596.png,Colon
2599
+ 0_2597.png,Colon
2600
+ 0_2598.png,Colon
2601
+ 0_2599.png,Colon
2602
+ 0_2600.png,Colon
2603
+ 0_2601.png,Colon
2604
+ 0_2602.png,Colon
2605
+ 0_2603.png,Colon
2606
+ 0_2604.png,Colon
2607
+ 0_2605.png,Colon
2608
+ 0_2606.png,Colon
2609
+ 0_2607.png,Colon
2610
+ 0_2608.png,Colon
2611
+ 0_2609.png,Colon
2612
+ 0_2610.png,Colon
2613
+ 0_2611.png,Colon
2614
+ 0_2612.png,Colon
2615
+ 0_2613.png,Colon
2616
+ 0_2614.png,Colon
2617
+ 0_2615.png,Colon
2618
+ 0_2616.png,Colon
2619
+ 0_2617.png,Colon
2620
+ 0_2618.png,Colon
2621
+ 0_2619.png,Colon
2622
+ 0_2620.png,Colon
2623
+ 0_2621.png,Colon
2624
+ 0_2622.png,Colon
2625
+ 0_2623.png,Colon
2626
+ 0_2624.png,Colon
2627
+ 0_2625.png,Colon
2628
+ 0_2626.png,Colon
2629
+ 0_2627.png,Colon
2630
+ 0_2628.png,Colon
2631
+ 0_2629.png,Colon
2632
+ 0_2630.png,Colon
2633
+ 0_2631.png,Colon
2634
+ 0_2632.png,Colon
2635
+ 0_2633.png,Colon
2636
+ 0_2634.png,Colon
2637
+ 0_2635.png,Colon
2638
+ 0_2636.png,Colon
2639
+ 0_2637.png,Colon
2640
+ 0_2638.png,Colon
2641
+ 0_2639.png,Colon
2642
+ 0_2640.png,Colon
2643
+ 0_2641.png,Colon
2644
+ 0_2642.png,Colon
2645
+ 0_2643.png,Colon
2646
+ 0_2644.png,Colon
2647
+ 0_2645.png,Colon
2648
+ 0_2646.png,Colon
2649
+ 0_2647.png,Colon
2650
+ 0_2648.png,Colon
2651
+ 0_2649.png,Colon
2652
+ 0_2650.png,Colon
2653
+ 0_2651.png,Colon
2654
+ 0_2652.png,Colon
2655
+ 0_2653.png,Colon
2656
+ 0_2654.png,Colon
2657
+ 0_2655.png,Colon
docs/datasets/PanNuke/fold1/cell_count.csv ADDED
@@ -0,0 +1,2524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image,Neoplastic,Inflammatory,Connective,Dead,Epithelial
2
+ 1_0.png,6,0,3,0,0
3
+ 1_1.png,1,0,9,0,0
4
+ 1_10.png,7,1,3,0,0
5
+ 1_100.png,0,1,5,0,23
6
+ 1_1000.png,0,0,23,0,0
7
+ 1_1001.png,0,0,13,0,0
8
+ 1_1002.png,0,0,7,0,0
9
+ 1_1003.png,0,0,10,0,0
10
+ 1_1004.png,0,0,8,0,0
11
+ 1_1005.png,0,0,10,0,0
12
+ 1_1006.png,0,0,13,0,0
13
+ 1_1007.png,34,0,8,0,0
14
+ 1_1008.png,36,0,8,0,0
15
+ 1_1009.png,32,0,2,2,0
16
+ 1_101.png,1,0,6,0,17
17
+ 1_1010.png,34,0,1,4,0
18
+ 1_1011.png,33,0,2,0,0
19
+ 1_1012.png,33,0,1,0,0
20
+ 1_1013.png,35,0,5,0,0
21
+ 1_1014.png,22,0,8,0,0
22
+ 1_1015.png,10,0,27,0,0
23
+ 1_1016.png,23,1,10,0,0
24
+ 1_1017.png,4,1,42,0,0
25
+ 1_1018.png,4,1,24,0,0
26
+ 1_1019.png,29,0,1,0,0
27
+ 1_102.png,2,1,6,0,0
28
+ 1_1020.png,0,0,35,0,0
29
+ 1_1021.png,0,0,28,0,0
30
+ 1_1022.png,6,1,30,0,0
31
+ 1_1023.png,0,5,0,0,0
32
+ 1_1024.png,0,2,0,0,0
33
+ 1_1025.png,0,3,2,0,0
34
+ 1_1026.png,0,5,0,0,0
35
+ 1_1027.png,0,0,1,0,0
36
+ 1_1028.png,0,0,0,0,0
37
+ 1_1029.png,0,0,0,0,0
38
+ 1_103.png,0,1,3,0,41
39
+ 1_1030.png,0,7,1,0,0
40
+ 1_1031.png,17,0,1,0,0
41
+ 1_1032.png,18,0,6,0,0
42
+ 1_1033.png,20,0,4,0,0
43
+ 1_1034.png,12,0,5,0,0
44
+ 1_1035.png,26,1,2,0,0
45
+ 1_1036.png,20,0,6,0,0
46
+ 1_1037.png,20,1,2,0,0
47
+ 1_1038.png,19,1,9,0,0
48
+ 1_1039.png,13,0,4,0,0
49
+ 1_104.png,0,0,6,0,32
50
+ 1_1040.png,13,0,3,0,0
51
+ 1_1041.png,17,0,2,0,0
52
+ 1_1042.png,17,0,0,0,0
53
+ 1_1043.png,20,0,1,0,0
54
+ 1_1044.png,22,0,0,0,0
55
+ 1_1045.png,8,0,5,0,0
56
+ 1_1046.png,0,0,4,0,0
57
+ 1_1047.png,6,1,4,0,0
58
+ 1_1048.png,11,0,0,0,0
59
+ 1_1049.png,11,0,1,0,0
60
+ 1_105.png,31,0,1,0,0
61
+ 1_1050.png,8,1,7,0,0
62
+ 1_1051.png,14,0,4,0,0
63
+ 1_1052.png,14,0,4,0,0
64
+ 1_1053.png,15,0,0,0,0
65
+ 1_1054.png,10,0,3,0,0
66
+ 1_1055.png,9,0,1,0,0
67
+ 1_1056.png,6,1,1,0,0
68
+ 1_1057.png,14,0,1,0,0
69
+ 1_1058.png,9,0,3,0,0
70
+ 1_1059.png,12,0,1,0,0
71
+ 1_106.png,24,0,9,0,0
72
+ 1_1060.png,6,1,2,0,0
73
+ 1_1061.png,13,0,1,0,0
74
+ 1_1062.png,18,0,0,0,0
75
+ 1_1063.png,10,0,3,0,0
76
+ 1_1064.png,10,0,3,0,0
77
+ 1_1065.png,8,0,2,0,0
78
+ 1_1066.png,9,0,2,0,0
79
+ 1_1067.png,5,1,1,0,0
80
+ 1_1068.png,10,1,2,0,0
81
+ 1_1069.png,13,0,0,0,0
82
+ 1_107.png,43,0,1,0,0
83
+ 1_1070.png,10,0,1,0,0
84
+ 1_1071.png,17,0,1,0,0
85
+ 1_1072.png,8,0,1,0,0
86
+ 1_1073.png,8,0,0,0,0
87
+ 1_1074.png,15,0,0,0,0
88
+ 1_1075.png,11,0,0,0,0
89
+ 1_1076.png,11,0,1,0,0
90
+ 1_1077.png,12,0,0,0,0
91
+ 1_1078.png,13,0,0,0,0
92
+ 1_1079.png,10,0,0,0,0
93
+ 1_108.png,38,1,0,0,0
94
+ 1_1080.png,8,0,2,0,0
95
+ 1_1081.png,14,0,0,0,0
96
+ 1_1082.png,11,0,0,0,0
97
+ 1_1083.png,10,0,4,0,0
98
+ 1_1084.png,10,1,4,0,0
99
+ 1_1085.png,8,1,8,0,0
100
+ 1_1086.png,2,1,15,0,0
101
+ 1_1087.png,7,0,6,0,0
102
+ 1_1088.png,0,5,5,0,12
103
+ 1_1089.png,0,0,0,0,4
104
+ 1_109.png,0,67,2,0,0
105
+ 1_1090.png,0,3,8,0,13
106
+ 1_1091.png,0,8,4,0,14
107
+ 1_1092.png,0,0,3,0,18
108
+ 1_1093.png,24,0,0,0,0
109
+ 1_1094.png,23,0,1,0,0
110
+ 1_1095.png,5,2,5,0,0
111
+ 1_1096.png,6,0,7,0,0
112
+ 1_1097.png,12,0,5,0,0
113
+ 1_1098.png,13,0,8,0,0
114
+ 1_1099.png,20,0,5,0,0
115
+ 1_11.png,6,1,4,0,0
116
+ 1_110.png,3,35,14,0,0
117
+ 1_1100.png,15,1,3,0,0
118
+ 1_1101.png,6,0,7,0,0
119
+ 1_1102.png,2,0,11,0,0
120
+ 1_1103.png,4,0,10,0,0
121
+ 1_1104.png,1,0,5,0,0
122
+ 1_1105.png,31,0,0,0,0
123
+ 1_1106.png,0,0,9,0,0
124
+ 1_1107.png,6,0,5,0,0
125
+ 1_1108.png,0,1,3,0,8
126
+ 1_1109.png,0,0,1,0,16
127
+ 1_111.png,9,25,10,0,0
128
+ 1_1110.png,0,2,3,0,23
129
+ 1_1111.png,0,0,3,0,20
130
+ 1_1112.png,0,0,4,0,19
131
+ 1_1113.png,0,1,3,0,16
132
+ 1_1114.png,0,1,2,0,18
133
+ 1_1115.png,0,1,4,0,11
134
+ 1_1116.png,0,0,1,0,35
135
+ 1_1117.png,0,1,8,0,16
136
+ 1_1118.png,0,1,2,0,21
137
+ 1_1119.png,0,2,11,0,20
138
+ 1_112.png,3,37,10,0,0
139
+ 1_1120.png,0,2,4,0,36
140
+ 1_1121.png,0,1,14,0,12
141
+ 1_1122.png,0,3,4,0,22
142
+ 1_1123.png,0,3,0,0,29
143
+ 1_1124.png,0,5,5,0,29
144
+ 1_1125.png,0,7,32,0,0
145
+ 1_1126.png,0,12,14,0,32
146
+ 1_1127.png,0,0,13,0,0
147
+ 1_1128.png,0,0,7,0,0
148
+ 1_1129.png,0,0,19,0,0
149
+ 1_113.png,0,1,11,0,27
150
+ 1_1130.png,0,0,14,0,0
151
+ 1_1131.png,0,2,20,0,0
152
+ 1_1132.png,0,0,11,0,0
153
+ 1_1133.png,0,2,15,0,0
154
+ 1_1134.png,4,0,4,0,0
155
+ 1_1135.png,0,0,14,0,0
156
+ 1_1136.png,7,0,6,0,0
157
+ 1_1137.png,0,0,15,0,0
158
+ 1_1138.png,3,1,12,0,0
159
+ 1_1139.png,17,0,7,0,0
160
+ 1_114.png,0,0,0,0,21
161
+ 1_1140.png,9,0,8,0,0
162
+ 1_1141.png,1,0,21,0,0
163
+ 1_1142.png,0,1,19,0,0
164
+ 1_1143.png,25,0,2,0,0
165
+ 1_1144.png,1,0,15,0,0
166
+ 1_1145.png,0,0,19,0,0
167
+ 1_1146.png,22,0,6,0,0
168
+ 1_1147.png,2,2,8,0,0
169
+ 1_1148.png,0,1,11,0,0
170
+ 1_1149.png,0,3,14,0,0
171
+ 1_115.png,0,0,6,0,25
172
+ 1_1150.png,12,0,2,0,0
173
+ 1_1151.png,3,1,9,0,0
174
+ 1_1152.png,1,1,14,0,0
175
+ 1_1153.png,1,0,16,0,0
176
+ 1_1154.png,7,4,0,0,0
177
+ 1_1155.png,6,1,7,0,0
178
+ 1_1156.png,10,1,9,0,0
179
+ 1_1157.png,3,1,3,0,0
180
+ 1_1158.png,17,0,0,0,0
181
+ 1_1159.png,0,1,15,0,0
182
+ 1_116.png,0,0,5,0,34
183
+ 1_1160.png,8,0,5,0,0
184
+ 1_1161.png,10,1,8,0,0
185
+ 1_1162.png,16,0,0,0,0
186
+ 1_1163.png,5,1,5,0,0
187
+ 1_1164.png,18,0,0,0,0
188
+ 1_1165.png,15,1,2,0,0
189
+ 1_1166.png,14,1,8,0,0
190
+ 1_1167.png,18,1,2,0,0
191
+ 1_1168.png,6,2,9,0,0
192
+ 1_1169.png,19,0,4,0,0
193
+ 1_117.png,0,1,1,0,35
194
+ 1_1170.png,16,0,2,0,0
195
+ 1_1171.png,18,2,2,0,0
196
+ 1_1172.png,15,3,0,0,0
197
+ 1_1173.png,27,1,0,0,0
198
+ 1_1174.png,16,5,2,0,0
199
+ 1_1175.png,13,4,0,0,0
200
+ 1_1176.png,17,13,0,0,0
201
+ 1_1177.png,17,13,0,0,0
202
+ 1_1178.png,21,2,0,0,0
203
+ 1_1179.png,15,6,5,0,0
204
+ 1_118.png,0,0,0,0,36
205
+ 1_1180.png,12,8,3,0,0
206
+ 1_1181.png,16,4,1,0,0
207
+ 1_1182.png,22,1,0,0,0
208
+ 1_1183.png,16,3,2,1,0
209
+ 1_1184.png,49,2,1,0,0
210
+ 1_1185.png,55,0,1,0,0
211
+ 1_1186.png,27,0,0,5,0
212
+ 1_1187.png,48,0,0,0,0
213
+ 1_1188.png,41,13,3,0,0
214
+ 1_1189.png,45,0,0,0,0
215
+ 1_119.png,0,2,8,0,19
216
+ 1_1190.png,0,12,4,0,0
217
+ 1_1191.png,0,10,2,0,0
218
+ 1_1192.png,0,17,6,0,0
219
+ 1_1193.png,0,12,7,0,0
220
+ 1_1194.png,5,0,11,0,0
221
+ 1_1195.png,9,0,9,0,0
222
+ 1_1196.png,16,4,0,0,0
223
+ 1_1197.png,9,1,4,0,0
224
+ 1_1198.png,12,4,4,0,0
225
+ 1_1199.png,10,1,4,0,0
226
+ 1_12.png,5,2,3,0,0
227
+ 1_120.png,0,0,3,0,28
228
+ 1_1200.png,10,0,11,0,0
229
+ 1_1201.png,30,5,4,0,0
230
+ 1_1202.png,9,0,0,0,0
231
+ 1_1203.png,11,0,0,0,0
232
+ 1_1204.png,15,0,0,0,0
233
+ 1_1205.png,14,0,0,0,0
234
+ 1_1206.png,23,0,1,0,0
235
+ 1_1207.png,20,0,0,0,0
236
+ 1_1208.png,20,0,0,0,0
237
+ 1_1209.png,18,0,0,0,0
238
+ 1_121.png,0,2,14,0,20
239
+ 1_1210.png,14,0,2,0,0
240
+ 1_1211.png,20,0,3,0,0
241
+ 1_1212.png,20,0,1,0,0
242
+ 1_1213.png,11,1,3,0,0
243
+ 1_1214.png,19,0,0,0,0
244
+ 1_1215.png,19,0,0,0,0
245
+ 1_1216.png,27,0,0,0,0
246
+ 1_1217.png,22,1,0,0,0
247
+ 1_1218.png,2,2,13,0,0
248
+ 1_1219.png,17,0,0,0,0
249
+ 1_122.png,0,2,3,0,37
250
+ 1_1220.png,13,0,0,0,0
251
+ 1_1221.png,20,2,5,0,0
252
+ 1_1222.png,21,0,1,0,0
253
+ 1_1223.png,19,3,5,0,0
254
+ 1_1224.png,11,9,3,0,0
255
+ 1_1225.png,17,3,5,0,0
256
+ 1_1226.png,15,0,9,0,0
257
+ 1_1227.png,7,0,17,0,0
258
+ 1_1228.png,5,2,13,0,0
259
+ 1_1229.png,14,3,6,0,0
260
+ 1_123.png,0,2,3,0,23
261
+ 1_1230.png,16,5,11,0,0
262
+ 1_1231.png,0,0,12,0,0
263
+ 1_1232.png,3,4,13,0,0
264
+ 1_1233.png,0,11,12,0,0
265
+ 1_1234.png,0,3,15,0,0
266
+ 1_1235.png,1,10,10,0,0
267
+ 1_1236.png,0,13,12,0,0
268
+ 1_1237.png,0,1,6,0,25
269
+ 1_1238.png,0,0,4,0,27
270
+ 1_1239.png,0,3,13,0,19
271
+ 1_124.png,1,1,6,0,0
272
+ 1_1240.png,0,4,7,0,17
273
+ 1_1241.png,0,0,0,0,0
274
+ 1_1242.png,0,0,8,0,20
275
+ 1_1243.png,0,0,0,0,3
276
+ 1_1244.png,0,0,0,0,9
277
+ 1_1245.png,0,0,2,0,1
278
+ 1_1246.png,0,1,2,0,23
279
+ 1_1247.png,0,1,7,0,24
280
+ 1_1248.png,0,0,0,0,0
281
+ 1_1249.png,0,0,0,0,0
282
+ 1_125.png,22,0,2,0,0
283
+ 1_1250.png,0,0,0,0,1
284
+ 1_1251.png,0,4,23,0,13
285
+ 1_1252.png,0,9,14,0,14
286
+ 1_1253.png,0,10,15,0,0
287
+ 1_1254.png,0,0,11,0,12
288
+ 1_1255.png,0,0,0,0,2
289
+ 1_1256.png,0,0,5,0,5
290
+ 1_1257.png,0,1,9,0,24
291
+ 1_1258.png,0,17,24,0,0
292
+ 1_1259.png,1,24,13,0,0
293
+ 1_126.png,21,0,1,0,0
294
+ 1_1260.png,0,16,28,0,0
295
+ 1_1261.png,9,18,18,0,0
296
+ 1_1262.png,0,4,10,0,0
297
+ 1_1263.png,0,0,22,0,16
298
+ 1_1264.png,0,0,20,0,3
299
+ 1_1265.png,24,0,1,4,0
300
+ 1_1266.png,19,0,5,2,0
301
+ 1_1267.png,0,0,6,0,0
302
+ 1_1268.png,14,0,9,0,0
303
+ 1_1269.png,29,0,7,0,0
304
+ 1_127.png,8,0,4,0,0
305
+ 1_1270.png,8,0,4,0,0
306
+ 1_1271.png,2,0,14,0,0
307
+ 1_1272.png,0,0,10,0,0
308
+ 1_1273.png,1,0,8,0,0
309
+ 1_1274.png,1,0,13,0,0
310
+ 1_1275.png,0,0,10,0,0
311
+ 1_1276.png,12,0,8,0,0
312
+ 1_1277.png,0,0,6,0,0
313
+ 1_1278.png,0,0,5,0,0
314
+ 1_1279.png,0,0,2,0,0
315
+ 1_128.png,3,0,2,0,0
316
+ 1_1280.png,0,0,27,0,0
317
+ 1_1281.png,0,8,18,0,0
318
+ 1_1282.png,11,7,15,0,0
319
+ 1_1283.png,15,4,12,0,0
320
+ 1_1284.png,7,18,13,0,0
321
+ 1_1285.png,0,30,13,0,0
322
+ 1_1286.png,42,1,0,0,0
323
+ 1_1287.png,3,29,9,0,0
324
+ 1_1288.png,6,2,18,0,0
325
+ 1_1289.png,15,0,3,0,0
326
+ 1_129.png,0,1,7,0,19
327
+ 1_1290.png,41,2,4,0,0
328
+ 1_1291.png,19,22,5,0,0
329
+ 1_1292.png,0,42,14,0,0
330
+ 1_1293.png,0,24,14,0,0
331
+ 1_1294.png,8,20,3,0,0
332
+ 1_1295.png,0,7,11,0,29
333
+ 1_1296.png,0,22,16,0,33
334
+ 1_1297.png,81,1,0,0,0
335
+ 1_1298.png,44,0,0,20,0
336
+ 1_1299.png,46,4,12,1,0
337
+ 1_13.png,12,3,1,0,0
338
+ 1_130.png,0,1,2,0,36
339
+ 1_1300.png,0,0,0,0,10
340
+ 1_1301.png,0,3,9,0,27
341
+ 1_1302.png,0,49,61,0,0
342
+ 1_1303.png,0,1,6,0,0
343
+ 1_1304.png,0,62,70,0,0
344
+ 1_1305.png,0,6,26,0,0
345
+ 1_1306.png,78,1,0,0,0
346
+ 1_1307.png,56,1,20,0,0
347
+ 1_1308.png,46,4,28,0,0
348
+ 1_1309.png,0,2,53,0,0
349
+ 1_131.png,0,1,5,0,36
350
+ 1_1310.png,0,4,38,0,0
351
+ 1_1311.png,0,7,24,0,0
352
+ 1_1312.png,24,0,0,1,0
353
+ 1_1313.png,30,2,7,0,0
354
+ 1_1314.png,44,0,0,0,0
355
+ 1_1315.png,43,0,0,0,0
356
+ 1_1316.png,26,0,0,1,0
357
+ 1_1317.png,43,1,0,0,0
358
+ 1_1318.png,18,1,3,0,0
359
+ 1_1319.png,12,11,35,0,0
360
+ 1_132.png,0,1,1,0,45
361
+ 1_1320.png,0,7,18,0,0
362
+ 1_1321.png,0,6,14,0,0
363
+ 1_1322.png,0,10,18,0,0
364
+ 1_1323.png,0,8,35,0,0
365
+ 1_1324.png,0,1,6,0,0
366
+ 1_1325.png,0,0,12,0,0
367
+ 1_1326.png,103,0,0,0,0
368
+ 1_1327.png,47,0,0,0,0
369
+ 1_1328.png,12,0,0,0,0
370
+ 1_1329.png,0,8,11,0,0
371
+ 1_133.png,8,3,3,0,0
372
+ 1_1330.png,0,3,25,0,0
373
+ 1_1331.png,0,2,34,0,0
374
+ 1_1332.png,26,0,4,0,0
375
+ 1_1333.png,32,0,1,0,0
376
+ 1_1334.png,25,1,0,0,0
377
+ 1_1335.png,37,0,0,0,0
378
+ 1_1336.png,25,0,4,0,0
379
+ 1_1337.png,0,3,16,0,0
380
+ 1_1338.png,0,1,9,0,0
381
+ 1_1339.png,0,0,14,0,0
382
+ 1_134.png,12,6,2,0,0
383
+ 1_1340.png,0,1,0,0,0
384
+ 1_1341.png,0,0,2,0,0
385
+ 1_1342.png,8,0,0,0,0
386
+ 1_1343.png,25,0,0,0,0
387
+ 1_1344.png,6,0,3,0,0
388
+ 1_1345.png,12,0,2,0,0
389
+ 1_1346.png,20,0,0,0,0
390
+ 1_1347.png,31,0,0,0,0
391
+ 1_1348.png,33,0,0,0,0
392
+ 1_1349.png,17,0,0,0,0
393
+ 1_135.png,0,0,11,0,17
394
+ 1_1350.png,23,0,0,0,0
395
+ 1_1351.png,15,0,3,0,0
396
+ 1_1352.png,0,0,0,0,8
397
+ 1_1353.png,0,0,0,0,6
398
+ 1_1354.png,0,0,2,0,8
399
+ 1_1355.png,0,0,1,0,1
400
+ 1_1356.png,0,0,5,0,0
401
+ 1_1357.png,0,0,6,0,0
402
+ 1_1358.png,0,0,3,0,0
403
+ 1_1359.png,0,0,15,0,0
404
+ 1_136.png,0,2,5,0,1
405
+ 1_1360.png,0,0,5,0,0
406
+ 1_1361.png,0,0,4,0,0
407
+ 1_1362.png,0,0,7,0,0
408
+ 1_1363.png,0,0,7,0,0
409
+ 1_1364.png,11,0,0,0,0
410
+ 1_1365.png,6,0,1,0,0
411
+ 1_1366.png,4,0,10,0,0
412
+ 1_1367.png,10,0,2,0,0
413
+ 1_1368.png,9,0,0,0,0
414
+ 1_1369.png,0,0,2,0,0
415
+ 1_137.png,0,0,4,0,6
416
+ 1_1370.png,19,0,5,0,0
417
+ 1_1371.png,15,2,0,0,0
418
+ 1_1372.png,17,3,1,0,0
419
+ 1_1373.png,21,0,5,0,0
420
+ 1_1374.png,13,0,0,0,0
421
+ 1_1375.png,10,1,1,0,0
422
+ 1_1376.png,16,0,2,0,0
423
+ 1_1377.png,10,0,0,0,0
424
+ 1_1378.png,9,3,3,0,0
425
+ 1_1379.png,10,1,1,0,0
426
+ 1_138.png,0,1,9,0,1
427
+ 1_1380.png,11,0,1,0,0
428
+ 1_1381.png,11,0,6,0,0
429
+ 1_1382.png,14,2,1,0,0
430
+ 1_1383.png,14,0,4,0,0
431
+ 1_1384.png,11,0,7,0,0
432
+ 1_1385.png,9,0,10,0,0
433
+ 1_1386.png,14,2,9,0,0
434
+ 1_1387.png,16,1,10,0,0
435
+ 1_1388.png,6,0,1,0,0
436
+ 1_1389.png,0,4,1,0,0
437
+ 1_139.png,0,2,11,0,12
438
+ 1_1390.png,0,2,0,0,0
439
+ 1_1391.png,0,5,9,0,0
440
+ 1_1392.png,0,5,4,0,0
441
+ 1_1393.png,4,0,10,0,0
442
+ 1_1394.png,5,1,10,0,0
443
+ 1_1395.png,4,0,6,0,0
444
+ 1_1396.png,0,0,1,0,0
445
+ 1_1397.png,8,0,0,0,0
446
+ 1_1398.png,5,0,5,0,0
447
+ 1_1399.png,0,0,2,0,0
448
+ 1_14.png,10,0,0,0,0
449
+ 1_140.png,0,0,4,0,0
450
+ 1_1400.png,0,0,5,0,0
451
+ 1_1401.png,0,0,8,0,0
452
+ 1_1402.png,0,0,1,0,0
453
+ 1_1403.png,0,0,0,0,0
454
+ 1_1404.png,0,0,0,0,0
455
+ 1_1405.png,0,7,18,0,0
456
+ 1_1406.png,0,6,21,0,11
457
+ 1_1407.png,0,12,10,0,11
458
+ 1_1408.png,0,1,16,0,12
459
+ 1_1409.png,0,18,16,0,11
460
+ 1_141.png,2,0,5,0,0
461
+ 1_1410.png,0,7,13,0,0
462
+ 1_1411.png,0,9,18,0,0
463
+ 1_1412.png,0,4,17,0,0
464
+ 1_1413.png,1,2,12,0,0
465
+ 1_1414.png,1,1,7,0,0
466
+ 1_1415.png,2,5,10,0,0
467
+ 1_1416.png,7,2,6,0,0
468
+ 1_1417.png,3,0,16,0,0
469
+ 1_1418.png,0,1,23,0,0
470
+ 1_1419.png,0,0,6,0,0
471
+ 1_142.png,4,0,3,0,0
472
+ 1_1420.png,0,0,10,0,0
473
+ 1_1421.png,0,3,4,0,8
474
+ 1_1422.png,0,3,1,0,13
475
+ 1_1423.png,0,1,5,0,11
476
+ 1_1424.png,0,0,11,0,0
477
+ 1_1425.png,0,5,5,0,0
478
+ 1_1426.png,0,2,17,0,0
479
+ 1_1427.png,0,15,15,0,0
480
+ 1_1428.png,0,5,22,0,0
481
+ 1_1429.png,0,5,18,0,0
482
+ 1_143.png,2,1,1,0,0
483
+ 1_1430.png,15,0,9,0,0
484
+ 1_1431.png,16,2,14,0,0
485
+ 1_1432.png,21,1,15,0,0
486
+ 1_1433.png,22,1,8,0,0
487
+ 1_1434.png,0,0,0,0,0
488
+ 1_1435.png,0,0,0,0,0
489
+ 1_1436.png,0,0,0,0,0
490
+ 1_1437.png,18,9,15,0,0
491
+ 1_1438.png,20,6,9,0,0
492
+ 1_1439.png,26,1,0,0,0
493
+ 1_144.png,2,0,1,0,0
494
+ 1_1440.png,0,0,0,0,0
495
+ 1_1441.png,0,0,3,0,0
496
+ 1_1442.png,0,0,2,0,0
497
+ 1_1443.png,0,2,4,0,0
498
+ 1_1444.png,0,0,2,0,0
499
+ 1_1445.png,7,9,5,0,0
500
+ 1_1446.png,7,30,13,0,0
501
+ 1_1447.png,5,1,11,0,0
502
+ 1_1448.png,1,36,15,0,0
503
+ 1_1449.png,0,2,16,0,0
504
+ 1_145.png,2,2,3,0,0
505
+ 1_1450.png,0,2,7,0,11
506
+ 1_1451.png,0,6,10,0,0
507
+ 1_1452.png,0,2,8,0,12
508
+ 1_1453.png,8,4,10,0,0
509
+ 1_1454.png,19,1,4,0,0
510
+ 1_1455.png,15,0,10,0,0
511
+ 1_1456.png,22,2,2,0,0
512
+ 1_1457.png,20,0,3,0,0
513
+ 1_1458.png,7,1,23,0,0
514
+ 1_1459.png,20,1,2,0,0
515
+ 1_146.png,2,1,0,0,0
516
+ 1_1460.png,0,0,19,0,0
517
+ 1_1461.png,0,4,31,0,0
518
+ 1_1462.png,9,5,23,0,0
519
+ 1_1463.png,0,3,28,0,0
520
+ 1_1464.png,0,3,23,0,0
521
+ 1_1465.png,0,8,22,0,0
522
+ 1_1466.png,0,3,6,0,0
523
+ 1_1467.png,0,1,17,0,0
524
+ 1_1468.png,0,8,19,0,0
525
+ 1_1469.png,0,4,12,0,0
526
+ 1_147.png,0,0,7,0,35
527
+ 1_1470.png,0,0,0,0,0
528
+ 1_1471.png,0,0,0,0,0
529
+ 1_1472.png,0,0,0,0,0
530
+ 1_1473.png,0,0,0,0,0
531
+ 1_1474.png,0,0,0,0,0
532
+ 1_1475.png,0,0,0,0,0
533
+ 1_1476.png,0,0,0,0,0
534
+ 1_1477.png,0,0,0,0,0
535
+ 1_1478.png,0,0,0,0,0
536
+ 1_1479.png,0,2,12,0,0
537
+ 1_148.png,0,0,12,0,2
538
+ 1_1480.png,0,0,23,0,0
539
+ 1_1481.png,0,0,22,0,0
540
+ 1_1482.png,0,1,21,0,0
541
+ 1_1483.png,0,1,20,0,0
542
+ 1_1484.png,0,0,15,0,0
543
+ 1_1485.png,0,2,14,0,0
544
+ 1_1486.png,0,1,27,0,0
545
+ 1_1487.png,0,2,20,0,0
546
+ 1_1488.png,23,0,0,0,0
547
+ 1_1489.png,7,2,14,0,0
548
+ 1_149.png,0,0,9,0,4
549
+ 1_1490.png,1,2,29,0,0
550
+ 1_1491.png,0,6,29,0,0
551
+ 1_1492.png,0,4,38,0,0
552
+ 1_1493.png,30,0,0,0,0
553
+ 1_1494.png,10,3,29,0,0
554
+ 1_1495.png,34,1,0,0,0
555
+ 1_1496.png,30,0,0,0,0
556
+ 1_1497.png,26,0,2,0,0
557
+ 1_1498.png,17,0,10,0,0
558
+ 1_1499.png,30,0,0,0,0
559
+ 1_15.png,11,0,1,0,0
560
+ 1_150.png,0,4,7,0,18
561
+ 1_1500.png,23,0,8,0,0
562
+ 1_1501.png,16,5,17,0,0
563
+ 1_1502.png,27,1,4,0,0
564
+ 1_1503.png,28,0,0,0,0
565
+ 1_1504.png,0,0,3,0,0
566
+ 1_1505.png,0,0,2,0,0
567
+ 1_1506.png,0,0,7,0,0
568
+ 1_1507.png,0,9,18,0,0
569
+ 1_1508.png,11,4,4,0,0
570
+ 1_1509.png,14,2,1,0,0
571
+ 1_151.png,0,0,3,0,34
572
+ 1_1510.png,14,3,3,0,0
573
+ 1_1511.png,10,1,11,0,0
574
+ 1_1512.png,23,2,4,0,0
575
+ 1_1513.png,13,3,4,0,0
576
+ 1_1514.png,1,0,8,0,0
577
+ 1_1515.png,36,0,6,0,0
578
+ 1_1516.png,44,1,1,0,0
579
+ 1_1517.png,8,7,8,0,0
580
+ 1_1518.png,25,0,0,0,0
581
+ 1_1519.png,0,1,2,0,0
582
+ 1_152.png,0,0,2,0,50
583
+ 1_1520.png,0,1,0,0,0
584
+ 1_1521.png,0,0,1,0,0
585
+ 1_1522.png,0,1,0,0,0
586
+ 1_1523.png,0,2,3,0,0
587
+ 1_1524.png,0,0,0,0,0
588
+ 1_1525.png,0,1,3,0,0
589
+ 1_1526.png,0,0,0,0,0
590
+ 1_1527.png,15,1,0,0,0
591
+ 1_1528.png,14,0,1,0,0
592
+ 1_1529.png,21,0,0,0,0
593
+ 1_153.png,0,2,1,0,66
594
+ 1_1530.png,14,0,0,0,0
595
+ 1_1531.png,21,0,0,0,0
596
+ 1_1532.png,0,2,8,0,0
597
+ 1_1533.png,0,0,6,0,0
598
+ 1_1534.png,0,0,10,0,0
599
+ 1_1535.png,0,0,5,0,0
600
+ 1_1536.png,0,1,6,0,0
601
+ 1_1537.png,0,0,7,0,0
602
+ 1_1538.png,0,0,0,0,0
603
+ 1_1539.png,0,0,0,0,0
604
+ 1_154.png,0,3,3,0,0
605
+ 1_1540.png,0,0,0,0,0
606
+ 1_1541.png,0,0,0,0,0
607
+ 1_1542.png,0,0,0,0,0
608
+ 1_1543.png,0,0,0,0,0
609
+ 1_1544.png,0,0,0,0,0
610
+ 1_1545.png,0,0,0,0,0
611
+ 1_1546.png,0,0,0,0,0
612
+ 1_1547.png,82,0,1,0,0
613
+ 1_1548.png,80,0,4,0,0
614
+ 1_1549.png,88,3,3,0,0
615
+ 1_155.png,0,3,6,6,0
616
+ 1_1550.png,84,2,6,0,0
617
+ 1_1551.png,52,0,12,0,0
618
+ 1_1552.png,89,0,5,0,0
619
+ 1_1553.png,85,2,7,0,0
620
+ 1_1554.png,26,0,4,0,0
621
+ 1_1555.png,13,0,11,0,0
622
+ 1_1556.png,12,0,11,0,0
623
+ 1_1557.png,24,0,2,0,0
624
+ 1_1558.png,0,0,0,0,0
625
+ 1_1559.png,0,0,0,0,0
626
+ 1_156.png,1,1,3,0,0
627
+ 1_1560.png,0,0,0,0,0
628
+ 1_1561.png,24,4,0,0,0
629
+ 1_1562.png,18,0,0,0,0
630
+ 1_1563.png,32,0,0,0,0
631
+ 1_1564.png,0,0,6,0,0
632
+ 1_1565.png,0,2,12,0,0
633
+ 1_1566.png,2,3,10,0,0
634
+ 1_1567.png,9,4,14,0,0
635
+ 1_1568.png,25,1,1,0,0
636
+ 1_1569.png,25,9,0,0,0
637
+ 1_157.png,1,1,1,0,0
638
+ 1_1570.png,0,33,35,0,0
639
+ 1_1571.png,6,29,11,0,0
640
+ 1_1572.png,35,1,0,0,0
641
+ 1_1573.png,36,3,0,0,0
642
+ 1_1574.png,33,0,0,0,0
643
+ 1_1575.png,0,0,0,0,9
644
+ 1_1576.png,0,0,3,0,19
645
+ 1_1577.png,0,2,7,0,14
646
+ 1_1578.png,0,6,12,0,9
647
+ 1_1579.png,0,0,0,0,13
648
+ 1_158.png,0,0,8,0,0
649
+ 1_1580.png,0,6,7,0,14
650
+ 1_1581.png,0,3,4,0,8
651
+ 1_1582.png,0,0,0,0,6
652
+ 1_1583.png,0,3,7,0,6
653
+ 1_1584.png,0,2,5,0,10
654
+ 1_1585.png,0,6,6,0,9
655
+ 1_1586.png,0,5,10,0,10
656
+ 1_1587.png,0,10,10,0,10
657
+ 1_1588.png,0,6,9,0,11
658
+ 1_1589.png,1,1,3,0,20
659
+ 1_159.png,0,1,15,0,14
660
+ 1_1590.png,17,14,5,0,0
661
+ 1_1591.png,0,12,4,0,6
662
+ 1_1592.png,0,1,0,0,6
663
+ 1_1593.png,0,4,4,0,12
664
+ 1_1594.png,0,9,5,0,11
665
+ 1_1595.png,0,0,1,0,8
666
+ 1_1596.png,0,0,0,0,1
667
+ 1_1597.png,5,8,4,0,3
668
+ 1_1598.png,0,3,1,0,13
669
+ 1_1599.png,0,1,5,0,15
670
+ 1_16.png,3,3,4,0,0
671
+ 1_160.png,0,0,4,0,56
672
+ 1_1600.png,0,0,0,0,0
673
+ 1_1601.png,0,0,0,0,0
674
+ 1_1602.png,0,11,13,0,1
675
+ 1_1603.png,0,2,0,0,7
676
+ 1_1604.png,0,3,5,2,13
677
+ 1_1605.png,0,5,2,1,0
678
+ 1_1606.png,0,1,0,1,4
679
+ 1_1607.png,0,0,0,0,0
680
+ 1_1608.png,3,2,4,1,0
681
+ 1_1609.png,0,1,7,0,8
682
+ 1_161.png,0,5,17,0,23
683
+ 1_1610.png,0,1,6,0,8
684
+ 1_1611.png,0,0,10,0,4
685
+ 1_1612.png,0,0,5,0,1
686
+ 1_1613.png,0,0,4,0,7
687
+ 1_1614.png,0,0,0,0,8
688
+ 1_1615.png,0,1,6,0,7
689
+ 1_1616.png,0,0,3,0,10
690
+ 1_1617.png,0,0,1,0,7
691
+ 1_1618.png,0,0,9,0,4
692
+ 1_1619.png,0,0,10,0,5
693
+ 1_162.png,0,1,17,0,5
694
+ 1_1620.png,0,0,2,0,0
695
+ 1_1621.png,0,0,3,0,0
696
+ 1_1622.png,2,0,0,0,0
697
+ 1_1623.png,9,0,8,0,0
698
+ 1_1624.png,0,0,0,0,1
699
+ 1_1625.png,0,6,1,0,6
700
+ 1_1626.png,0,11,5,0,1
701
+ 1_1627.png,0,2,3,0,13
702
+ 1_1628.png,0,8,6,0,2
703
+ 1_1629.png,0,3,11,0,6
704
+ 1_163.png,0,0,16,0,18
705
+ 1_1630.png,0,4,10,0,10
706
+ 1_1631.png,0,0,0,0,6
707
+ 1_1632.png,0,9,9,0,3
708
+ 1_1633.png,0,1,2,0,15
709
+ 1_1634.png,0,0,1,0,14
710
+ 1_1635.png,0,0,1,0,27
711
+ 1_1636.png,0,1,2,0,16
712
+ 1_1637.png,0,7,16,0,4
713
+ 1_1638.png,0,1,0,0,13
714
+ 1_1639.png,0,0,1,0,21
715
+ 1_164.png,0,1,9,0,13
716
+ 1_1640.png,0,2,6,0,5
717
+ 1_1641.png,0,1,5,0,7
718
+ 1_1642.png,0,0,9,0,0
719
+ 1_1643.png,0,0,8,0,0
720
+ 1_1644.png,0,0,0,0,33
721
+ 1_1645.png,0,6,19,0,17
722
+ 1_1646.png,0,2,3,0,14
723
+ 1_1647.png,0,1,8,0,12
724
+ 1_1648.png,0,6,22,0,1
725
+ 1_1649.png,0,0,0,0,13
726
+ 1_165.png,0,3,10,0,12
727
+ 1_1650.png,0,0,0,0,4
728
+ 1_1651.png,0,0,0,0,0
729
+ 1_1652.png,0,0,0,0,13
730
+ 1_1653.png,0,0,0,0,0
731
+ 1_1654.png,0,1,4,0,17
732
+ 1_1655.png,0,3,24,0,3
733
+ 1_1656.png,0,10,25,0,0
734
+ 1_1657.png,0,0,0,0,0
735
+ 1_1658.png,0,0,0,0,3
736
+ 1_1659.png,1,2,6,0,21
737
+ 1_166.png,0,0,0,0,52
738
+ 1_1660.png,0,0,0,0,0
739
+ 1_1661.png,0,0,0,0,0
740
+ 1_1662.png,0,0,0,0,0
741
+ 1_1663.png,0,1,3,0,11
742
+ 1_1664.png,0,7,4,0,9
743
+ 1_1665.png,0,0,0,0,0
744
+ 1_1666.png,0,12,3,0,4
745
+ 1_1667.png,0,8,0,0,17
746
+ 1_1668.png,0,6,3,0,11
747
+ 1_1669.png,0,0,0,0,16
748
+ 1_167.png,0,0,0,0,53
749
+ 1_1670.png,0,6,2,0,11
750
+ 1_1671.png,0,9,4,0,17
751
+ 1_1672.png,0,0,0,0,7
752
+ 1_1673.png,0,16,5,0,6
753
+ 1_1674.png,0,0,0,0,14
754
+ 1_1675.png,0,7,5,0,11
755
+ 1_1676.png,0,10,1,0,9
756
+ 1_1677.png,0,6,0,0,11
757
+ 1_1678.png,0,0,0,0,2
758
+ 1_1679.png,0,0,0,0,9
759
+ 1_168.png,0,0,3,0,40
760
+ 1_1680.png,0,8,2,0,9
761
+ 1_1681.png,0,7,6,0,3
762
+ 1_1682.png,0,16,3,0,13
763
+ 1_1683.png,0,4,6,0,4
764
+ 1_1684.png,0,0,0,0,0
765
+ 1_1685.png,0,0,0,0,0
766
+ 1_1686.png,0,0,0,0,0
767
+ 1_1687.png,0,0,0,0,0
768
+ 1_1688.png,0,0,0,0,0
769
+ 1_1689.png,0,0,0,0,0
770
+ 1_169.png,0,2,10,0,20
771
+ 1_1690.png,0,0,0,0,0
772
+ 1_1691.png,0,0,0,0,0
773
+ 1_1692.png,0,0,0,0,0
774
+ 1_1693.png,0,0,0,0,0
775
+ 1_1694.png,0,0,0,0,0
776
+ 1_1695.png,0,0,0,0,0
777
+ 1_1696.png,0,0,0,0,0
778
+ 1_1697.png,0,0,0,0,0
779
+ 1_1698.png,0,0,0,0,0
780
+ 1_1699.png,0,0,0,0,0
781
+ 1_17.png,13,2,0,0,0
782
+ 1_170.png,0,0,7,0,12
783
+ 1_1700.png,0,0,0,0,0
784
+ 1_1701.png,0,0,0,0,0
785
+ 1_1702.png,0,0,0,0,0
786
+ 1_1703.png,0,20,1,0,9
787
+ 1_1704.png,0,30,0,0,2
788
+ 1_1705.png,0,29,0,0,1
789
+ 1_1706.png,0,10,1,0,11
790
+ 1_1707.png,0,1,0,0,7
791
+ 1_1708.png,0,1,0,0,19
792
+ 1_1709.png,0,0,0,0,10
793
+ 1_171.png,0,0,5,0,0
794
+ 1_1710.png,0,1,0,0,23
795
+ 1_1711.png,0,25,0,0,16
796
+ 1_1712.png,0,3,0,0,25
797
+ 1_1713.png,0,9,1,0,14
798
+ 1_1714.png,0,18,0,0,12
799
+ 1_1715.png,0,1,0,0,2
800
+ 1_1716.png,0,13,1,0,10
801
+ 1_1717.png,0,20,0,0,9
802
+ 1_1718.png,0,9,2,0,14
803
+ 1_1719.png,0,7,7,0,4
804
+ 1_172.png,0,1,1,0,0
805
+ 1_1720.png,0,6,0,0,11
806
+ 1_1721.png,0,11,0,0,14
807
+ 1_1722.png,0,5,6,0,3
808
+ 1_1723.png,0,5,1,0,22
809
+ 1_1724.png,0,19,2,0,9
810
+ 1_1725.png,0,6,6,0,2
811
+ 1_1726.png,0,1,0,0,15
812
+ 1_1727.png,0,8,3,0,16
813
+ 1_1728.png,0,19,0,0,1
814
+ 1_1729.png,0,5,1,0,20
815
+ 1_173.png,33,0,1,0,0
816
+ 1_1730.png,0,0,0,0,10
817
+ 1_1731.png,0,14,10,0,0
818
+ 1_1732.png,0,0,2,0,17
819
+ 1_1733.png,0,2,9,0,7
820
+ 1_1734.png,0,0,0,0,1
821
+ 1_1735.png,0,15,6,0,0
822
+ 1_1736.png,0,4,11,0,5
823
+ 1_1737.png,0,3,10,0,1
824
+ 1_1738.png,0,0,0,0,0
825
+ 1_1739.png,0,8,4,0,8
826
+ 1_174.png,11,1,0,0,0
827
+ 1_1740.png,0,0,1,0,14
828
+ 1_1741.png,0,0,0,0,0
829
+ 1_1742.png,0,0,0,0,0
830
+ 1_1743.png,0,0,0,0,0
831
+ 1_1744.png,0,10,5,0,2
832
+ 1_1745.png,0,15,8,0,0
833
+ 1_1746.png,0,0,0,0,0
834
+ 1_1747.png,0,13,6,0,0
835
+ 1_1748.png,0,2,5,0,4
836
+ 1_1749.png,0,0,0,0,0
837
+ 1_175.png,0,3,5,0,20
838
+ 1_1750.png,0,0,0,0,11
839
+ 1_1751.png,0,0,2,0,13
840
+ 1_1752.png,0,0,0,0,4
841
+ 1_1753.png,0,0,10,0,0
842
+ 1_1754.png,0,2,20,0,0
843
+ 1_1755.png,0,3,32,0,0
844
+ 1_1756.png,0,0,19,0,0
845
+ 1_1757.png,0,0,13,0,0
846
+ 1_1758.png,0,0,18,0,0
847
+ 1_1759.png,0,0,13,0,0
848
+ 1_176.png,0,0,6,0,0
849
+ 1_1760.png,0,0,21,0,0
850
+ 1_1761.png,0,0,17,0,0
851
+ 1_1762.png,0,1,19,0,0
852
+ 1_1763.png,0,4,15,0,0
853
+ 1_1764.png,0,0,6,0,0
854
+ 1_1765.png,0,0,10,0,0
855
+ 1_1766.png,0,0,9,0,0
856
+ 1_1767.png,0,0,20,0,0
857
+ 1_1768.png,0,0,10,0,0
858
+ 1_1769.png,0,0,11,0,0
859
+ 1_177.png,0,4,3,0,30
860
+ 1_1770.png,0,0,11,0,0
861
+ 1_1771.png,0,0,6,0,0
862
+ 1_1772.png,0,0,10,0,0
863
+ 1_1773.png,0,0,11,0,0
864
+ 1_1774.png,0,0,19,0,0
865
+ 1_1775.png,0,1,7,0,0
866
+ 1_1776.png,0,1,11,0,0
867
+ 1_1777.png,0,0,2,0,7
868
+ 1_1778.png,0,0,6,0,6
869
+ 1_1779.png,0,1,11,0,11
870
+ 1_178.png,0,4,19,0,36
871
+ 1_1780.png,0,3,0,0,1
872
+ 1_1781.png,0,0,6,0,10
873
+ 1_1782.png,0,0,21,0,10
874
+ 1_1783.png,0,1,9,0,11
875
+ 1_1784.png,0,0,1,0,14
876
+ 1_1785.png,0,1,10,0,7
877
+ 1_1786.png,0,0,21,0,8
878
+ 1_1787.png,0,0,4,0,9
879
+ 1_1788.png,0,0,0,0,17
880
+ 1_1789.png,0,0,7,0,11
881
+ 1_179.png,0,0,2,0,39
882
+ 1_1790.png,0,0,6,0,11
883
+ 1_1791.png,0,0,5,0,15
884
+ 1_1792.png,0,0,23,0,4
885
+ 1_1793.png,0,1,7,0,15
886
+ 1_1794.png,0,0,0,0,9
887
+ 1_1795.png,0,0,10,0,21
888
+ 1_1796.png,0,0,2,0,7
889
+ 1_1797.png,0,0,10,0,10
890
+ 1_1798.png,0,0,10,0,7
891
+ 1_1799.png,0,0,5,0,24
892
+ 1_18.png,11,0,2,0,0
893
+ 1_180.png,0,0,6,0,5
894
+ 1_1800.png,0,0,6,0,0
895
+ 1_1801.png,0,0,10,0,0
896
+ 1_1802.png,0,0,6,0,1
897
+ 1_1803.png,0,0,9,0,5
898
+ 1_1804.png,0,1,11,0,4
899
+ 1_1805.png,0,1,9,0,2
900
+ 1_1806.png,27,0,0,0,0
901
+ 1_1807.png,22,0,0,0,0
902
+ 1_1808.png,11,0,0,6,0
903
+ 1_1809.png,23,1,0,0,0
904
+ 1_181.png,0,1,16,0,17
905
+ 1_1810.png,28,0,0,0,0
906
+ 1_1811.png,2,4,11,0,0
907
+ 1_1812.png,13,4,4,0,0
908
+ 1_1813.png,28,2,2,0,0
909
+ 1_1814.png,27,0,0,0,0
910
+ 1_1815.png,42,0,1,0,0
911
+ 1_1816.png,6,0,20,0,0
912
+ 1_1817.png,0,1,0,18,0
913
+ 1_1818.png,20,1,2,0,0
914
+ 1_1819.png,13,0,3,0,0
915
+ 1_182.png,0,2,16,0,12
916
+ 1_1820.png,17,1,3,0,0
917
+ 1_1821.png,11,0,0,23,0
918
+ 1_1822.png,30,1,0,0,0
919
+ 1_1823.png,36,2,5,0,0
920
+ 1_1824.png,0,7,12,0,0
921
+ 1_1825.png,6,5,7,0,0
922
+ 1_1826.png,12,2,6,0,0
923
+ 1_1827.png,16,3,12,0,0
924
+ 1_1828.png,24,1,14,0,0
925
+ 1_1829.png,3,1,15,0,0
926
+ 1_183.png,0,3,15,0,16
927
+ 1_1830.png,0,2,12,0,0
928
+ 1_1831.png,0,2,18,0,0
929
+ 1_1832.png,0,2,21,0,0
930
+ 1_1833.png,0,1,11,0,0
931
+ 1_1834.png,0,2,20,0,0
932
+ 1_1835.png,19,2,4,0,0
933
+ 1_1836.png,0,6,18,0,0
934
+ 1_1837.png,5,6,11,0,0
935
+ 1_1838.png,5,4,11,0,0
936
+ 1_1839.png,0,4,20,1,0
937
+ 1_184.png,0,0,2,0,7
938
+ 1_1840.png,9,8,12,0,0
939
+ 1_1841.png,0,4,17,0,0
940
+ 1_1842.png,0,0,18,0,0
941
+ 1_1843.png,0,0,21,0,0
942
+ 1_1844.png,0,0,10,0,0
943
+ 1_1845.png,0,0,0,0,0
944
+ 1_1846.png,0,0,0,0,0
945
+ 1_1847.png,0,0,0,0,0
946
+ 1_1848.png,0,0,0,0,0
947
+ 1_1849.png,0,0,0,0,0
948
+ 1_185.png,0,0,11,0,9
949
+ 1_1850.png,0,0,0,0,0
950
+ 1_1851.png,0,0,0,0,0
951
+ 1_1852.png,0,0,0,0,0
952
+ 1_1853.png,0,0,0,0,0
953
+ 1_1854.png,14,0,6,0,0
954
+ 1_1855.png,15,1,1,0,0
955
+ 1_1856.png,9,1,4,0,0
956
+ 1_1857.png,15,0,1,0,0
957
+ 1_1858.png,0,0,19,0,0
958
+ 1_1859.png,0,0,11,0,0
959
+ 1_186.png,0,0,1,0,23
960
+ 1_1860.png,0,0,13,0,0
961
+ 1_1861.png,0,0,13,0,0
962
+ 1_1862.png,2,0,26,0,0
963
+ 1_1863.png,0,1,32,0,0
964
+ 1_1864.png,19,1,0,0,0
965
+ 1_1865.png,0,3,11,0,0
966
+ 1_1866.png,13,1,3,0,0
967
+ 1_1867.png,0,6,19,0,0
968
+ 1_1868.png,0,12,27,0,0
969
+ 1_1869.png,0,13,20,0,0
970
+ 1_187.png,0,0,2,0,26
971
+ 1_1870.png,14,4,16,0,0
972
+ 1_1871.png,0,19,21,0,0
973
+ 1_1872.png,0,17,20,0,0
974
+ 1_1873.png,9,8,9,0,0
975
+ 1_1874.png,12,1,9,0,0
976
+ 1_1875.png,0,17,10,0,0
977
+ 1_1876.png,0,0,0,0,0
978
+ 1_1877.png,0,38,13,0,0
979
+ 1_1878.png,2,37,5,0,0
980
+ 1_1879.png,3,13,2,0,0
981
+ 1_188.png,0,1,13,0,11
982
+ 1_1880.png,2,3,0,0,0
983
+ 1_1881.png,21,32,2,0,0
984
+ 1_1882.png,14,27,1,0,0
985
+ 1_1883.png,19,0,0,0,0
986
+ 1_1884.png,15,0,0,0,0
987
+ 1_1885.png,14,0,0,0,0
988
+ 1_1886.png,20,0,1,0,0
989
+ 1_1887.png,15,0,2,0,0
990
+ 1_1888.png,8,0,2,0,0
991
+ 1_1889.png,0,0,0,0,0
992
+ 1_189.png,0,0,3,0,17
993
+ 1_1890.png,8,0,6,0,0
994
+ 1_1891.png,2,0,4,0,0
995
+ 1_1892.png,9,0,2,0,0
996
+ 1_1893.png,10,0,0,0,0
997
+ 1_1894.png,0,85,0,0,0
998
+ 1_1895.png,0,88,11,0,0
999
+ 1_1896.png,6,57,3,0,0
1000
+ 1_1897.png,10,1,8,0,0
1001
+ 1_1898.png,18,1,2,0,0
1002
+ 1_1899.png,13,0,5,0,0
1003
+ 1_19.png,9,1,1,0,0
1004
+ 1_190.png,0,0,9,0,21
1005
+ 1_1900.png,17,6,0,0,0
1006
+ 1_1901.png,0,2,4,0,0
1007
+ 1_1902.png,0,4,11,0,0
1008
+ 1_1903.png,0,1,7,0,0
1009
+ 1_1904.png,0,1,13,0,0
1010
+ 1_1905.png,0,3,5,0,0
1011
+ 1_1906.png,0,1,5,0,0
1012
+ 1_1907.png,0,3,12,0,0
1013
+ 1_1908.png,16,2,11,0,0
1014
+ 1_1909.png,1,1,11,0,0
1015
+ 1_191.png,0,2,5,0,19
1016
+ 1_1910.png,30,0,5,0,0
1017
+ 1_1911.png,0,1,11,0,0
1018
+ 1_1912.png,0,2,9,0,0
1019
+ 1_1913.png,0,4,9,0,0
1020
+ 1_1914.png,0,7,15,0,0
1021
+ 1_1915.png,0,1,14,0,0
1022
+ 1_1916.png,0,6,24,0,0
1023
+ 1_1917.png,0,7,0,0,0
1024
+ 1_1918.png,0,4,8,0,0
1025
+ 1_1919.png,0,1,4,0,0
1026
+ 1_192.png,0,0,1,0,39
1027
+ 1_1920.png,0,0,0,0,8
1028
+ 1_1921.png,0,0,0,0,0
1029
+ 1_1922.png,0,0,0,0,12
1030
+ 1_1923.png,0,0,0,0,1
1031
+ 1_1924.png,0,0,0,0,2
1032
+ 1_1925.png,0,0,0,0,1
1033
+ 1_1926.png,0,0,0,0,7
1034
+ 1_1927.png,0,0,0,0,11
1035
+ 1_1928.png,0,0,0,0,0
1036
+ 1_1929.png,0,0,0,0,0
1037
+ 1_193.png,0,3,4,0,33
1038
+ 1_1930.png,0,0,0,0,0
1039
+ 1_1931.png,0,0,0,0,0
1040
+ 1_1932.png,0,0,0,0,0
1041
+ 1_1933.png,0,0,0,0,0
1042
+ 1_1934.png,0,0,5,0,0
1043
+ 1_1935.png,0,0,4,0,0
1044
+ 1_1936.png,0,0,0,0,0
1045
+ 1_1937.png,0,0,4,0,0
1046
+ 1_1938.png,0,0,3,0,0
1047
+ 1_1939.png,0,0,1,0,0
1048
+ 1_194.png,0,1,14,0,3
1049
+ 1_1940.png,0,0,1,0,0
1050
+ 1_1941.png,0,0,20,0,0
1051
+ 1_1942.png,0,0,17,0,0
1052
+ 1_1943.png,0,0,11,0,0
1053
+ 1_1944.png,0,1,21,0,0
1054
+ 1_1945.png,0,0,20,0,0
1055
+ 1_1946.png,0,0,15,0,0
1056
+ 1_1947.png,0,0,29,0,0
1057
+ 1_1948.png,0,0,35,0,0
1058
+ 1_1949.png,0,0,21,0,0
1059
+ 1_195.png,0,2,12,0,24
1060
+ 1_1950.png,0,0,18,0,0
1061
+ 1_1951.png,0,0,30,0,0
1062
+ 1_1952.png,0,0,32,0,0
1063
+ 1_1953.png,0,0,1,0,3
1064
+ 1_1954.png,0,1,2,0,0
1065
+ 1_1955.png,0,0,0,0,3
1066
+ 1_1956.png,0,0,2,0,0
1067
+ 1_1957.png,0,1,5,0,0
1068
+ 1_1958.png,0,1,5,0,2
1069
+ 1_1959.png,0,0,4,0,2
1070
+ 1_196.png,0,1,1,0,12
1071
+ 1_1960.png,0,0,3,0,0
1072
+ 1_1961.png,0,0,7,0,3
1073
+ 1_1962.png,30,1,5,0,0
1074
+ 1_1963.png,25,1,13,0,0
1075
+ 1_1964.png,20,1,13,0,0
1076
+ 1_1965.png,0,0,6,0,0
1077
+ 1_1966.png,0,1,12,0,0
1078
+ 1_1967.png,0,1,11,0,0
1079
+ 1_1968.png,0,1,11,0,0
1080
+ 1_1969.png,45,0,0,0,0
1081
+ 1_197.png,0,1,9,0,20
1082
+ 1_1970.png,27,0,0,0,0
1083
+ 1_1971.png,33,0,3,0,0
1084
+ 1_1972.png,9,0,13,0,0
1085
+ 1_1973.png,7,0,15,0,0
1086
+ 1_1974.png,20,0,3,0,0
1087
+ 1_1975.png,23,0,2,0,0
1088
+ 1_1976.png,22,0,0,0,0
1089
+ 1_1977.png,20,7,3,0,0
1090
+ 1_1978.png,0,23,10,0,0
1091
+ 1_1979.png,5,37,11,0,0
1092
+ 1_198.png,0,1,1,0,41
1093
+ 1_1980.png,5,20,8,0,0
1094
+ 1_1981.png,1,16,10,0,0
1095
+ 1_1982.png,18,0,0,0,0
1096
+ 1_1983.png,21,2,0,0,0
1097
+ 1_1984.png,40,3,2,0,0
1098
+ 1_1985.png,17,0,11,0,0
1099
+ 1_1986.png,33,2,0,0,0
1100
+ 1_1987.png,23,3,0,0,0
1101
+ 1_1988.png,19,1,6,0,0
1102
+ 1_1989.png,15,1,9,0,0
1103
+ 1_199.png,0,0,6,0,17
1104
+ 1_1990.png,17,0,6,0,0
1105
+ 1_1991.png,52,2,0,0,0
1106
+ 1_1992.png,4,10,0,5,0
1107
+ 1_1993.png,13,10,0,6,0
1108
+ 1_1994.png,1,25,0,9,0
1109
+ 1_1995.png,33,3,0,3,0
1110
+ 1_1996.png,0,8,0,8,0
1111
+ 1_1997.png,5,12,0,3,0
1112
+ 1_1998.png,0,0,0,0,0
1113
+ 1_1999.png,0,0,0,0,0
1114
+ 1_2.png,6,0,3,0,0
1115
+ 1_20.png,2,0,3,0,0
1116
+ 1_200.png,0,1,7,0,47
1117
+ 1_2000.png,0,0,0,0,0
1118
+ 1_2001.png,0,0,1,0,0
1119
+ 1_2002.png,0,2,1,0,0
1120
+ 1_2003.png,0,0,0,0,0
1121
+ 1_2004.png,0,0,0,0,0
1122
+ 1_2005.png,0,0,0,0,0
1123
+ 1_2006.png,0,0,1,0,0
1124
+ 1_2007.png,27,0,8,0,0
1125
+ 1_2008.png,25,2,10,0,0
1126
+ 1_2009.png,2,16,28,0,0
1127
+ 1_201.png,0,0,7,0,22
1128
+ 1_2010.png,8,4,17,0,0
1129
+ 1_2011.png,14,7,23,0,0
1130
+ 1_2012.png,20,1,12,0,0
1131
+ 1_2013.png,38,1,0,0,0
1132
+ 1_2014.png,15,0,7,0,0
1133
+ 1_2015.png,9,0,4,0,0
1134
+ 1_2016.png,18,0,2,0,0
1135
+ 1_2017.png,20,0,1,0,0
1136
+ 1_2018.png,18,0,4,0,0
1137
+ 1_2019.png,0,0,0,0,0
1138
+ 1_202.png,0,0,1,0,23
1139
+ 1_2020.png,0,1,0,0,0
1140
+ 1_2021.png,0,0,0,0,0
1141
+ 1_2022.png,0,5,0,0,0
1142
+ 1_2023.png,0,1,0,0,0
1143
+ 1_2024.png,0,3,0,0,0
1144
+ 1_2025.png,0,0,13,0,0
1145
+ 1_2026.png,0,0,4,0,0
1146
+ 1_2027.png,0,0,7,0,0
1147
+ 1_2028.png,0,0,11,0,0
1148
+ 1_2029.png,0,0,13,0,0
1149
+ 1_203.png,0,0,4,0,7
1150
+ 1_2030.png,19,7,8,0,0
1151
+ 1_2031.png,30,3,4,0,0
1152
+ 1_2032.png,18,2,7,0,0
1153
+ 1_2033.png,24,2,6,0,0
1154
+ 1_2034.png,23,0,6,0,0
1155
+ 1_2035.png,24,2,2,0,0
1156
+ 1_2036.png,0,0,2,0,2
1157
+ 1_2037.png,0,0,2,0,5
1158
+ 1_2038.png,0,0,9,0,13
1159
+ 1_2039.png,0,0,6,0,5
1160
+ 1_204.png,0,0,3,0,33
1161
+ 1_2040.png,0,0,2,0,7
1162
+ 1_2041.png,0,2,1,0,10
1163
+ 1_2042.png,10,1,17,0,0
1164
+ 1_2043.png,9,2,4,0,0
1165
+ 1_2044.png,10,2,1,0,0
1166
+ 1_2045.png,6,0,3,0,0
1167
+ 1_2046.png,11,0,2,0,0
1168
+ 1_2047.png,11,0,2,0,0
1169
+ 1_2048.png,9,0,1,0,0
1170
+ 1_2049.png,9,0,1,0,0
1171
+ 1_205.png,0,2,16,0,17
1172
+ 1_2050.png,9,0,0,0,0
1173
+ 1_2051.png,0,1,8,0,0
1174
+ 1_2052.png,0,3,5,0,0
1175
+ 1_2053.png,0,3,10,0,0
1176
+ 1_2054.png,0,3,13,0,0
1177
+ 1_2055.png,0,1,8,0,0
1178
+ 1_2056.png,0,3,5,0,0
1179
+ 1_2057.png,0,1,6,0,0
1180
+ 1_2058.png,0,2,9,0,0
1181
+ 1_2059.png,0,2,11,0,0
1182
+ 1_206.png,0,0,1,0,68
1183
+ 1_2060.png,1,6,20,0,0
1184
+ 1_2061.png,14,0,0,0,0
1185
+ 1_2062.png,0,1,23,0,0
1186
+ 1_2063.png,6,8,10,0,0
1187
+ 1_2064.png,0,2,2,0,61
1188
+ 1_2065.png,0,3,17,0,50
1189
+ 1_2066.png,0,5,9,0,39
1190
+ 1_2067.png,3,2,8,0,28
1191
+ 1_2068.png,0,1,14,0,23
1192
+ 1_2069.png,0,1,14,0,20
1193
+ 1_207.png,0,1,5,0,17
1194
+ 1_2070.png,0,1,23,0,2
1195
+ 1_2071.png,0,1,0,0,12
1196
+ 1_2072.png,0,0,2,0,10
1197
+ 1_2073.png,33,0,4,0,0
1198
+ 1_2074.png,30,1,6,0,0
1199
+ 1_2075.png,18,1,3,0,0
1200
+ 1_2076.png,4,7,15,0,0
1201
+ 1_2077.png,0,9,18,0,0
1202
+ 1_2078.png,31,1,0,0,0
1203
+ 1_2079.png,26,3,0,0,0
1204
+ 1_208.png,0,0,4,0,25
1205
+ 1_2080.png,0,0,12,0,0
1206
+ 1_2081.png,0,0,10,0,0
1207
+ 1_2082.png,0,1,14,0,0
1208
+ 1_2083.png,17,0,0,0,0
1209
+ 1_2084.png,11,9,4,0,1
1210
+ 1_2085.png,25,3,0,0,0
1211
+ 1_2086.png,0,0,12,0,0
1212
+ 1_2087.png,13,0,7,0,0
1213
+ 1_2088.png,0,2,15,0,0
1214
+ 1_2089.png,5,0,8,0,0
1215
+ 1_209.png,0,0,6,0,34
1216
+ 1_2090.png,0,0,0,0,7
1217
+ 1_2091.png,0,3,1,0,18
1218
+ 1_2092.png,0,0,1,0,17
1219
+ 1_2093.png,0,1,0,0,12
1220
+ 1_2094.png,0,1,2,0,12
1221
+ 1_2095.png,0,2,0,0,19
1222
+ 1_2096.png,0,1,1,0,14
1223
+ 1_2097.png,0,0,1,0,13
1224
+ 1_2098.png,0,2,1,0,15
1225
+ 1_2099.png,0,1,11,0,0
1226
+ 1_21.png,10,0,1,0,0
1227
+ 1_210.png,0,2,6,0,15
1228
+ 1_2100.png,11,0,4,0,0
1229
+ 1_2101.png,5,0,13,0,0
1230
+ 1_2102.png,18,1,3,0,0
1231
+ 1_2103.png,27,1,6,0,0
1232
+ 1_2104.png,29,0,0,0,0
1233
+ 1_2105.png,9,3,10,0,0
1234
+ 1_2106.png,18,0,0,0,0
1235
+ 1_2107.png,17,1,1,0,0
1236
+ 1_2108.png,7,0,0,0,0
1237
+ 1_2109.png,16,0,1,0,0
1238
+ 1_211.png,0,0,9,0,31
1239
+ 1_2110.png,21,1,1,0,0
1240
+ 1_2111.png,23,3,1,0,0
1241
+ 1_2112.png,20,1,1,0,0
1242
+ 1_2113.png,4,2,6,0,0
1243
+ 1_2114.png,18,2,1,0,0
1244
+ 1_2115.png,20,3,0,0,0
1245
+ 1_2116.png,1,3,0,0,0
1246
+ 1_2117.png,0,2,6,0,0
1247
+ 1_2118.png,0,2,7,0,0
1248
+ 1_2119.png,0,5,12,0,0
1249
+ 1_212.png,0,0,8,0,0
1250
+ 1_2120.png,0,0,11,0,0
1251
+ 1_2121.png,0,1,9,0,0
1252
+ 1_2122.png,0,3,40,0,0
1253
+ 1_2123.png,0,3,38,0,0
1254
+ 1_2124.png,0,1,46,0,0
1255
+ 1_2125.png,0,1,15,0,27
1256
+ 1_2126.png,0,0,0,0,26
1257
+ 1_2127.png,0,2,24,0,19
1258
+ 1_2128.png,0,1,11,0,18
1259
+ 1_2129.png,0,1,5,0,25
1260
+ 1_213.png,0,3,8,0,12
1261
+ 1_2130.png,0,3,33,0,0
1262
+ 1_2131.png,24,0,3,0,0
1263
+ 1_2132.png,17,0,1,0,0
1264
+ 1_2133.png,21,0,0,0,0
1265
+ 1_2134.png,4,0,1,0,0
1266
+ 1_2135.png,23,0,1,0,0
1267
+ 1_2136.png,18,0,1,0,0
1268
+ 1_2137.png,1,3,9,0,0
1269
+ 1_2138.png,8,2,10,0,0
1270
+ 1_2139.png,0,1,5,0,0
1271
+ 1_214.png,0,5,4,0,14
1272
+ 1_2140.png,5,2,10,0,0
1273
+ 1_2141.png,12,5,15,0,0
1274
+ 1_2142.png,25,2,5,0,0
1275
+ 1_2143.png,14,0,0,0,0
1276
+ 1_2144.png,21,0,0,0,0
1277
+ 1_2145.png,21,0,0,0,0
1278
+ 1_2146.png,19,1,0,0,0
1279
+ 1_2147.png,19,0,0,0,0
1280
+ 1_2148.png,24,0,0,0,0
1281
+ 1_2149.png,34,3,1,0,0
1282
+ 1_215.png,0,0,0,0,32
1283
+ 1_2150.png,34,0,0,0,0
1284
+ 1_2151.png,38,0,0,0,0
1285
+ 1_2152.png,0,0,4,0,8
1286
+ 1_2153.png,0,4,13,0,6
1287
+ 1_2154.png,0,1,6,0,10
1288
+ 1_2155.png,0,1,4,0,10
1289
+ 1_2156.png,0,0,13,0,11
1290
+ 1_2157.png,0,2,10,0,7
1291
+ 1_2158.png,0,0,10,0,10
1292
+ 1_2159.png,0,0,8,0,8
1293
+ 1_216.png,0,0,10,0,4
1294
+ 1_2160.png,0,2,10,0,4
1295
+ 1_2161.png,1,0,2,0,0
1296
+ 1_2162.png,7,0,0,0,0
1297
+ 1_2163.png,1,2,15,0,0
1298
+ 1_2164.png,0,3,24,0,0
1299
+ 1_2165.png,0,3,28,0,0
1300
+ 1_2166.png,15,5,15,0,0
1301
+ 1_2167.png,5,3,12,0,0
1302
+ 1_2168.png,10,1,16,0,0
1303
+ 1_2169.png,19,4,11,0,0
1304
+ 1_217.png,0,0,6,0,32
1305
+ 1_2170.png,14,2,21,0,0
1306
+ 1_2171.png,2,2,19,0,0
1307
+ 1_2172.png,28,0,6,0,0
1308
+ 1_2173.png,0,1,9,0,0
1309
+ 1_2174.png,18,0,11,0,0
1310
+ 1_2175.png,0,0,4,0,0
1311
+ 1_2176.png,0,3,25,0,0
1312
+ 1_2177.png,0,4,16,0,0
1313
+ 1_2178.png,0,1,24,0,0
1314
+ 1_2179.png,0,0,13,0,0
1315
+ 1_218.png,0,3,1,0,47
1316
+ 1_2180.png,0,0,15,0,0
1317
+ 1_2181.png,0,0,26,0,0
1318
+ 1_2182.png,0,10,30,0,0
1319
+ 1_2183.png,0,2,17,0,0
1320
+ 1_2184.png,0,6,19,0,0
1321
+ 1_2185.png,0,0,17,0,0
1322
+ 1_2186.png,0,7,13,0,0
1323
+ 1_2187.png,0,7,21,0,0
1324
+ 1_2188.png,0,18,15,0,0
1325
+ 1_2189.png,0,19,18,0,3
1326
+ 1_219.png,0,4,2,0,31
1327
+ 1_2190.png,0,0,0,0,30
1328
+ 1_2191.png,0,0,7,0,0
1329
+ 1_2192.png,0,0,8,0,0
1330
+ 1_2193.png,0,0,6,0,0
1331
+ 1_2194.png,0,3,14,0,0
1332
+ 1_2195.png,0,1,10,0,0
1333
+ 1_2196.png,0,4,15,0,0
1334
+ 1_2197.png,0,2,13,0,0
1335
+ 1_2198.png,0,0,10,0,0
1336
+ 1_2199.png,0,3,10,0,0
1337
+ 1_22.png,0,3,10,0,0
1338
+ 1_220.png,0,2,12,0,19
1339
+ 1_2200.png,0,0,11,0,0
1340
+ 1_2201.png,0,0,10,0,0
1341
+ 1_2202.png,0,2,9,0,0
1342
+ 1_2203.png,0,0,9,0,0
1343
+ 1_2204.png,0,1,13,0,0
1344
+ 1_2205.png,0,1,14,0,0
1345
+ 1_2206.png,0,8,7,0,0
1346
+ 1_2207.png,0,7,12,0,0
1347
+ 1_2208.png,0,8,11,0,0
1348
+ 1_2209.png,0,8,15,0,0
1349
+ 1_221.png,0,3,1,0,42
1350
+ 1_2210.png,7,4,19,0,0
1351
+ 1_2211.png,9,1,26,0,0
1352
+ 1_2212.png,24,1,1,0,0
1353
+ 1_2213.png,0,0,6,0,19
1354
+ 1_2214.png,0,0,3,0,28
1355
+ 1_2215.png,0,0,0,0,18
1356
+ 1_2216.png,0,0,14,0,16
1357
+ 1_2217.png,0,0,12,0,26
1358
+ 1_2218.png,0,0,0,0,4
1359
+ 1_2219.png,8,0,7,0,0
1360
+ 1_222.png,0,0,13,0,19
1361
+ 1_2220.png,0,0,25,0,0
1362
+ 1_2221.png,1,0,30,0,0
1363
+ 1_2222.png,0,0,22,0,0
1364
+ 1_2223.png,0,3,25,0,0
1365
+ 1_2224.png,0,0,8,0,0
1366
+ 1_2225.png,0,0,12,0,0
1367
+ 1_2226.png,0,1,8,0,0
1368
+ 1_2227.png,0,0,14,0,0
1369
+ 1_2228.png,0,3,5,0,0
1370
+ 1_2229.png,38,0,0,0,0
1371
+ 1_223.png,17,0,3,0,0
1372
+ 1_2230.png,36,0,5,0,0
1373
+ 1_2231.png,36,0,1,0,0
1374
+ 1_2232.png,16,0,9,0,0
1375
+ 1_2233.png,0,0,7,0,0
1376
+ 1_2234.png,0,0,5,0,0
1377
+ 1_2235.png,0,0,4,0,0
1378
+ 1_2236.png,0,0,2,0,0
1379
+ 1_2237.png,26,0,2,0,0
1380
+ 1_2238.png,9,0,15,0,0
1381
+ 1_2239.png,17,0,9,0,0
1382
+ 1_224.png,0,2,0,0,0
1383
+ 1_2240.png,25,0,7,0,0
1384
+ 1_2241.png,1,0,25,0,0
1385
+ 1_2242.png,16,0,5,0,0
1386
+ 1_2243.png,12,0,0,0,0
1387
+ 1_2244.png,22,0,0,0,0
1388
+ 1_2245.png,24,63,0,0,0
1389
+ 1_2246.png,48,30,0,0,0
1390
+ 1_2247.png,9,22,0,0,0
1391
+ 1_2248.png,17,23,0,0,0
1392
+ 1_2249.png,30,36,0,0,0
1393
+ 1_225.png,8,0,7,0,0
1394
+ 1_2250.png,19,36,0,0,0
1395
+ 1_2251.png,0,2,15,0,0
1396
+ 1_2252.png,0,4,15,0,0
1397
+ 1_2253.png,0,10,15,0,0
1398
+ 1_2254.png,0,22,36,0,0
1399
+ 1_2255.png,0,4,8,0,0
1400
+ 1_2256.png,0,6,9,0,0
1401
+ 1_2257.png,0,1,9,0,0
1402
+ 1_2258.png,52,2,4,0,0
1403
+ 1_2259.png,79,2,4,0,0
1404
+ 1_226.png,0,6,8,0,28
1405
+ 1_2260.png,64,0,2,0,0
1406
+ 1_2261.png,66,2,1,0,0
1407
+ 1_2262.png,65,3,4,0,0
1408
+ 1_2263.png,52,0,2,0,0
1409
+ 1_2264.png,60,3,10,0,0
1410
+ 1_2265.png,59,0,5,0,0
1411
+ 1_2266.png,0,1,6,0,3
1412
+ 1_2267.png,0,0,2,0,0
1413
+ 1_2268.png,0,0,0,0,0
1414
+ 1_2269.png,0,0,0,0,0
1415
+ 1_227.png,0,3,3,0,30
1416
+ 1_2270.png,0,0,4,0,0
1417
+ 1_2271.png,0,0,2,0,33
1418
+ 1_2272.png,0,67,27,0,0
1419
+ 1_2273.png,0,79,34,0,0
1420
+ 1_2274.png,0,50,26,0,0
1421
+ 1_2275.png,0,49,34,0,0
1422
+ 1_2276.png,0,82,27,0,0
1423
+ 1_2277.png,0,56,28,0,0
1424
+ 1_2278.png,8,0,0,0,0
1425
+ 1_2279.png,19,1,0,0,0
1426
+ 1_228.png,0,7,13,0,24
1427
+ 1_2280.png,21,1,0,0,0
1428
+ 1_2281.png,16,1,0,0,0
1429
+ 1_2282.png,11,1,0,0,0
1430
+ 1_2283.png,19,1,0,0,0
1431
+ 1_2284.png,10,3,0,0,0
1432
+ 1_2285.png,5,2,0,0,0
1433
+ 1_2286.png,34,2,0,0,0
1434
+ 1_2287.png,34,0,0,0,0
1435
+ 1_2288.png,30,0,2,0,0
1436
+ 1_2289.png,24,0,2,0,0
1437
+ 1_229.png,0,10,10,0,10
1438
+ 1_2290.png,20,0,13,0,0
1439
+ 1_2291.png,27,0,4,0,0
1440
+ 1_2292.png,28,0,5,0,0
1441
+ 1_2293.png,29,0,2,0,0
1442
+ 1_2294.png,40,0,0,0,0
1443
+ 1_2295.png,12,4,0,0,0
1444
+ 1_2296.png,8,5,7,0,0
1445
+ 1_2297.png,12,2,0,0,0
1446
+ 1_2298.png,12,3,6,0,0
1447
+ 1_2299.png,17,2,3,0,0
1448
+ 1_23.png,1,5,8,0,0
1449
+ 1_230.png,0,3,13,0,24
1450
+ 1_2300.png,9,4,10,0,0
1451
+ 1_2301.png,12,0,0,0,0
1452
+ 1_2302.png,12,0,5,0,0
1453
+ 1_2303.png,30,4,2,0,0
1454
+ 1_2304.png,20,7,0,0,0
1455
+ 1_2305.png,32,4,1,0,0
1456
+ 1_2306.png,19,4,1,0,0
1457
+ 1_2307.png,20,8,2,0,0
1458
+ 1_2308.png,0,0,8,0,0
1459
+ 1_2309.png,0,0,6,0,0
1460
+ 1_231.png,0,5,15,0,0
1461
+ 1_2310.png,0,0,0,0,0
1462
+ 1_2311.png,0,0,1,0,0
1463
+ 1_2312.png,0,0,1,0,0
1464
+ 1_2313.png,0,0,4,0,0
1465
+ 1_2314.png,0,0,16,0,0
1466
+ 1_2315.png,0,0,0,0,0
1467
+ 1_2316.png,0,0,16,0,0
1468
+ 1_2317.png,0,0,16,0,0
1469
+ 1_2318.png,0,0,18,0,0
1470
+ 1_2319.png,0,0,28,0,0
1471
+ 1_232.png,0,2,1,0,26
1472
+ 1_2320.png,19,3,8,2,0
1473
+ 1_2321.png,24,1,9,13,0
1474
+ 1_2322.png,15,6,16,0,0
1475
+ 1_2323.png,13,2,8,0,0
1476
+ 1_2324.png,14,11,7,1,0
1477
+ 1_2325.png,21,11,6,1,0
1478
+ 1_2326.png,17,11,1,2,0
1479
+ 1_2327.png,24,5,2,1,0
1480
+ 1_2328.png,27,0,0,3,0
1481
+ 1_2329.png,34,1,0,0,0
1482
+ 1_233.png,0,3,5,0,41
1483
+ 1_2330.png,24,4,10,1,0
1484
+ 1_2331.png,29,7,2,0,0
1485
+ 1_2332.png,28,8,2,0,0
1486
+ 1_2333.png,19,19,13,0,0
1487
+ 1_2334.png,12,6,6,4,0
1488
+ 1_2335.png,30,2,3,0,0
1489
+ 1_2336.png,29,9,3,1,0
1490
+ 1_2337.png,15,1,9,0,0
1491
+ 1_2338.png,7,0,13,0,0
1492
+ 1_2339.png,36,1,1,0,0
1493
+ 1_234.png,0,2,11,0,18
1494
+ 1_2340.png,34,1,1,5,0
1495
+ 1_2341.png,27,0,2,1,0
1496
+ 1_2342.png,12,3,11,0,0
1497
+ 1_2343.png,33,1,2,0,0
1498
+ 1_2344.png,25,1,1,0,0
1499
+ 1_2345.png,19,1,4,0,0
1500
+ 1_2346.png,16,5,4,0,0
1501
+ 1_2347.png,0,0,11,0,0
1502
+ 1_2348.png,1,0,9,0,0
1503
+ 1_2349.png,0,0,13,0,0
1504
+ 1_235.png,0,0,9,0,0
1505
+ 1_2350.png,0,0,18,0,0
1506
+ 1_2351.png,0,0,11,0,0
1507
+ 1_2352.png,3,24,24,1,0
1508
+ 1_2353.png,11,9,15,1,0
1509
+ 1_2354.png,5,57,6,4,0
1510
+ 1_2355.png,0,1,3,0,0
1511
+ 1_2356.png,0,3,1,0,0
1512
+ 1_2357.png,0,0,11,0,0
1513
+ 1_2358.png,3,0,3,0,0
1514
+ 1_2359.png,0,1,10,0,0
1515
+ 1_236.png,0,3,2,0,44
1516
+ 1_2360.png,7,2,11,0,0
1517
+ 1_2361.png,10,0,1,0,0
1518
+ 1_2362.png,2,2,15,0,0
1519
+ 1_2363.png,6,5,4,0,0
1520
+ 1_2364.png,10,0,3,0,0
1521
+ 1_2365.png,0,4,5,0,0
1522
+ 1_2366.png,1,1,18,0,0
1523
+ 1_2367.png,15,3,0,0,0
1524
+ 1_2368.png,17,0,0,0,0
1525
+ 1_2369.png,26,0,0,0,0
1526
+ 1_237.png,0,0,7,0,34
1527
+ 1_2370.png,20,3,4,0,0
1528
+ 1_2371.png,20,3,6,0,0
1529
+ 1_2372.png,27,0,2,0,0
1530
+ 1_2373.png,7,54,12,0,0
1531
+ 1_2374.png,6,58,8,0,0
1532
+ 1_2375.png,15,40,3,0,0
1533
+ 1_2376.png,11,69,9,0,0
1534
+ 1_2377.png,9,27,3,0,0
1535
+ 1_2378.png,24,6,0,0,0
1536
+ 1_2379.png,19,4,1,0,0
1537
+ 1_238.png,0,5,6,0,33
1538
+ 1_2380.png,22,2,3,0,0
1539
+ 1_2381.png,16,4,10,0,0
1540
+ 1_2382.png,0,0,0,0,11
1541
+ 1_2383.png,0,0,3,0,17
1542
+ 1_2384.png,0,3,2,0,18
1543
+ 1_2385.png,0,0,0,0,8
1544
+ 1_2386.png,0,6,3,0,13
1545
+ 1_2387.png,0,5,2,0,15
1546
+ 1_2388.png,0,0,1,0,16
1547
+ 1_2389.png,0,1,3,0,23
1548
+ 1_239.png,0,17,2,0,21
1549
+ 1_2390.png,0,1,1,0,24
1550
+ 1_2391.png,0,0,3,0,17
1551
+ 1_2392.png,0,0,0,0,3
1552
+ 1_2393.png,0,1,0,0,18
1553
+ 1_2394.png,0,0,1,0,7
1554
+ 1_2395.png,0,0,1,0,0
1555
+ 1_2396.png,0,0,0,0,4
1556
+ 1_2397.png,0,0,0,0,0
1557
+ 1_2398.png,0,0,4,0,18
1558
+ 1_2399.png,0,0,11,0,12
1559
+ 1_24.png,5,0,3,0,0
1560
+ 1_240.png,0,3,14,0,31
1561
+ 1_2400.png,0,1,5,0,29
1562
+ 1_2401.png,0,1,5,0,27
1563
+ 1_2402.png,0,0,1,0,22
1564
+ 1_2403.png,0,0,1,0,35
1565
+ 1_2404.png,0,0,4,0,32
1566
+ 1_2405.png,0,2,8,0,18
1567
+ 1_2406.png,0,6,9,0,18
1568
+ 1_2407.png,0,2,6,0,30
1569
+ 1_2408.png,0,22,11,0,7
1570
+ 1_2409.png,30,0,4,0,0
1571
+ 1_241.png,0,11,6,0,27
1572
+ 1_2410.png,37,0,2,0,0
1573
+ 1_2411.png,5,0,3,0,10
1574
+ 1_2412.png,16,1,10,0,1
1575
+ 1_2413.png,24,0,5,0,0
1576
+ 1_2414.png,21,0,6,0,2
1577
+ 1_2415.png,10,8,11,0,0
1578
+ 1_2416.png,4,12,9,0,0
1579
+ 1_2417.png,14,9,8,0,0
1580
+ 1_2418.png,3,19,11,0,0
1581
+ 1_2419.png,4,13,11,0,0
1582
+ 1_242.png,0,6,5,0,34
1583
+ 1_2420.png,8,12,8,0,0
1584
+ 1_2421.png,19,0,7,0,0
1585
+ 1_2422.png,12,4,11,0,0
1586
+ 1_2423.png,0,3,9,0,20
1587
+ 1_2424.png,0,1,4,0,15
1588
+ 1_2425.png,0,3,2,0,36
1589
+ 1_2426.png,0,2,6,0,19
1590
+ 1_2427.png,0,1,6,0,15
1591
+ 1_2428.png,0,0,6,0,22
1592
+ 1_2429.png,48,3,2,0,0
1593
+ 1_243.png,0,4,4,0,39
1594
+ 1_2430.png,27,1,0,0,0
1595
+ 1_2431.png,30,0,0,0,0
1596
+ 1_2432.png,0,22,31,0,32
1597
+ 1_2433.png,0,25,24,0,41
1598
+ 1_2434.png,0,9,16,0,14
1599
+ 1_2435.png,0,1,2,0,0
1600
+ 1_2436.png,0,0,2,0,0
1601
+ 1_2437.png,0,0,16,0,0
1602
+ 1_2438.png,0,0,1,0,0
1603
+ 1_2439.png,0,1,5,0,0
1604
+ 1_244.png,0,1,3,0,24
1605
+ 1_2440.png,0,2,7,0,0
1606
+ 1_2441.png,65,0,12,0,0
1607
+ 1_2442.png,66,1,1,0,0
1608
+ 1_2443.png,86,0,1,0,0
1609
+ 1_2444.png,36,0,0,0,0
1610
+ 1_2445.png,31,0,0,0,0
1611
+ 1_2446.png,54,1,1,0,0
1612
+ 1_2447.png,5,3,21,0,0
1613
+ 1_2448.png,32,1,34,0,0
1614
+ 1_2449.png,0,2,10,0,0
1615
+ 1_245.png,0,0,6,0,21
1616
+ 1_2450.png,0,0,0,0,0
1617
+ 1_2451.png,0,8,18,0,0
1618
+ 1_2452.png,0,3,6,0,0
1619
+ 1_2453.png,0,6,34,0,0
1620
+ 1_2454.png,0,1,11,0,0
1621
+ 1_2455.png,58,7,1,0,0
1622
+ 1_2456.png,101,3,5,0,0
1623
+ 1_2457.png,47,0,0,0,0
1624
+ 1_2458.png,55,2,2,0,0
1625
+ 1_2459.png,0,120,34,0,0
1626
+ 1_246.png,0,0,11,0,22
1627
+ 1_2460.png,0,154,10,0,0
1628
+ 1_2461.png,42,0,17,0,0
1629
+ 1_2462.png,16,0,41,0,0
1630
+ 1_2463.png,0,0,0,1,0
1631
+ 1_2464.png,39,0,1,14,0
1632
+ 1_2465.png,49,0,48,0,0
1633
+ 1_2466.png,2,0,0,3,0
1634
+ 1_2467.png,47,0,4,10,0
1635
+ 1_2468.png,37,2,0,0,0
1636
+ 1_2469.png,80,8,32,0,0
1637
+ 1_247.png,10,0,6,0,0
1638
+ 1_2470.png,40,2,8,0,0
1639
+ 1_2471.png,0,0,0,0,28
1640
+ 1_2472.png,0,0,2,0,25
1641
+ 1_2473.png,0,0,3,0,31
1642
+ 1_2474.png,0,0,22,0,0
1643
+ 1_2475.png,0,0,24,0,0
1644
+ 1_2476.png,0,1,13,0,0
1645
+ 1_2477.png,0,0,22,0,0
1646
+ 1_2478.png,0,0,13,0,0
1647
+ 1_2479.png,0,21,43,0,0
1648
+ 1_248.png,21,0,0,0,0
1649
+ 1_2480.png,40,1,13,0,0
1650
+ 1_2481.png,0,5,15,0,0
1651
+ 1_2482.png,0,11,51,0,0
1652
+ 1_2483.png,0,14,33,0,0
1653
+ 1_2484.png,0,17,59,0,0
1654
+ 1_2485.png,0,15,76,0,0
1655
+ 1_2486.png,0,43,74,0,0
1656
+ 1_2487.png,0,78,89,0,0
1657
+ 1_2488.png,0,0,66,0,0
1658
+ 1_2489.png,53,2,7,0,0
1659
+ 1_249.png,18,0,1,0,0
1660
+ 1_2490.png,45,5,4,0,0
1661
+ 1_2491.png,26,12,3,0,0
1662
+ 1_2492.png,0,9,2,0,0
1663
+ 1_2493.png,0,5,5,0,0
1664
+ 1_2494.png,0,0,0,0,0
1665
+ 1_2495.png,0,0,1,0,0
1666
+ 1_2496.png,0,0,2,0,0
1667
+ 1_2497.png,0,0,8,0,0
1668
+ 1_2498.png,0,0,2,0,0
1669
+ 1_2499.png,0,0,23,0,0
1670
+ 1_25.png,10,0,0,0,0
1671
+ 1_250.png,13,0,0,0,0
1672
+ 1_2500.png,0,0,12,0,0
1673
+ 1_2501.png,0,16,0,0,0
1674
+ 1_2502.png,0,1,0,0,0
1675
+ 1_2503.png,0,0,4,0,0
1676
+ 1_2504.png,0,3,25,0,0
1677
+ 1_2505.png,0,1,19,0,0
1678
+ 1_2506.png,0,4,30,0,0
1679
+ 1_2507.png,58,1,2,0,0
1680
+ 1_2508.png,0,0,0,0,0
1681
+ 1_2509.png,0,0,14,0,0
1682
+ 1_251.png,23,0,0,0,0
1683
+ 1_2510.png,0,0,2,0,0
1684
+ 1_2511.png,0,0,0,0,0
1685
+ 1_2512.png,0,0,1,0,0
1686
+ 1_2513.png,0,12,14,0,24
1687
+ 1_2514.png,0,32,39,0,23
1688
+ 1_2515.png,0,15,22,0,27
1689
+ 1_2516.png,21,0,24,0,0
1690
+ 1_2517.png,42,9,0,0,0
1691
+ 1_2518.png,62,0,0,23,0
1692
+ 1_2519.png,23,46,35,0,0
1693
+ 1_252.png,23,0,2,0,0
1694
+ 1_2520.png,0,8,22,0,0
1695
+ 1_2521.png,0,16,40,0,0
1696
+ 1_2522.png,0,16,33,0,0
1697
+ 1_253.png,8,4,6,0,0
1698
+ 1_254.png,0,27,11,0,0
1699
+ 1_255.png,14,0,0,0,0
1700
+ 1_256.png,24,0,0,0,0
1701
+ 1_257.png,25,0,0,0,0
1702
+ 1_258.png,22,0,1,0,0
1703
+ 1_259.png,19,0,1,0,0
1704
+ 1_26.png,7,0,6,0,0
1705
+ 1_260.png,11,0,6,0,0
1706
+ 1_261.png,22,0,5,0,0
1707
+ 1_262.png,0,0,14,0,10
1708
+ 1_263.png,0,1,11,0,25
1709
+ 1_264.png,0,2,8,0,26
1710
+ 1_265.png,0,11,15,0,7
1711
+ 1_266.png,19,1,4,0,0
1712
+ 1_267.png,25,0,2,0,0
1713
+ 1_268.png,21,0,4,0,0
1714
+ 1_269.png,20,1,1,0,0
1715
+ 1_27.png,0,0,7,0,0
1716
+ 1_270.png,1,32,10,0,0
1717
+ 1_271.png,0,18,18,0,0
1718
+ 1_272.png,6,9,10,0,0
1719
+ 1_273.png,6,16,12,0,0
1720
+ 1_274.png,8,15,7,0,0
1721
+ 1_275.png,9,3,8,0,0
1722
+ 1_276.png,0,27,23,0,0
1723
+ 1_277.png,0,39,6,0,0
1724
+ 1_278.png,14,25,10,0,0
1725
+ 1_279.png,12,2,14,0,0
1726
+ 1_28.png,2,0,6,0,0
1727
+ 1_280.png,23,2,4,0,0
1728
+ 1_281.png,0,5,11,0,0
1729
+ 1_282.png,0,4,7,0,0
1730
+ 1_283.png,0,1,14,0,0
1731
+ 1_284.png,7,0,6,0,0
1732
+ 1_285.png,1,0,11,0,0
1733
+ 1_286.png,0,2,5,0,24
1734
+ 1_287.png,0,0,3,0,20
1735
+ 1_288.png,0,0,7,0,30
1736
+ 1_289.png,0,0,5,0,32
1737
+ 1_29.png,0,0,7,0,0
1738
+ 1_290.png,0,2,10,0,23
1739
+ 1_291.png,0,3,2,0,15
1740
+ 1_292.png,0,4,0,0,5
1741
+ 1_293.png,0,1,2,0,13
1742
+ 1_294.png,0,3,1,0,30
1743
+ 1_295.png,8,1,3,0,0
1744
+ 1_296.png,21,1,0,0,0
1745
+ 1_297.png,14,2,5,0,0
1746
+ 1_298.png,9,1,2,0,1
1747
+ 1_299.png,4,5,5,0,0
1748
+ 1_3.png,8,1,1,0,0
1749
+ 1_30.png,11,0,2,0,0
1750
+ 1_300.png,2,0,4,0,0
1751
+ 1_301.png,18,0,2,0,0
1752
+ 1_302.png,21,3,2,0,0
1753
+ 1_303.png,18,0,2,0,0
1754
+ 1_304.png,22,6,0,0,0
1755
+ 1_305.png,6,6,10,0,0
1756
+ 1_306.png,23,0,1,0,0
1757
+ 1_307.png,22,3,5,0,0
1758
+ 1_308.png,22,0,1,0,0
1759
+ 1_309.png,15,0,2,0,0
1760
+ 1_31.png,0,1,8,0,12
1761
+ 1_310.png,37,0,1,0,0
1762
+ 1_311.png,22,0,0,0,0
1763
+ 1_312.png,23,0,1,0,0
1764
+ 1_313.png,25,1,1,0,0
1765
+ 1_314.png,20,3,3,0,0
1766
+ 1_315.png,8,8,10,0,0
1767
+ 1_316.png,8,12,20,0,0
1768
+ 1_317.png,8,0,0,0,0
1769
+ 1_318.png,5,0,0,0,0
1770
+ 1_319.png,5,0,0,0,0
1771
+ 1_32.png,0,3,7,0,7
1772
+ 1_320.png,15,0,0,0,0
1773
+ 1_321.png,12,0,1,0,0
1774
+ 1_322.png,4,0,0,0,0
1775
+ 1_323.png,9,0,0,0,0
1776
+ 1_324.png,0,5,8,0,0
1777
+ 1_325.png,0,6,8,0,0
1778
+ 1_326.png,0,4,7,0,0
1779
+ 1_327.png,0,3,11,0,0
1780
+ 1_328.png,0,5,12,0,0
1781
+ 1_329.png,7,3,2,0,0
1782
+ 1_33.png,0,1,4,0,19
1783
+ 1_330.png,6,1,3,0,0
1784
+ 1_331.png,6,1,5,0,0
1785
+ 1_332.png,4,1,2,0,0
1786
+ 1_333.png,28,0,0,0,0
1787
+ 1_334.png,31,0,0,0,0
1788
+ 1_335.png,23,0,0,0,0
1789
+ 1_336.png,28,0,0,0,0
1790
+ 1_337.png,36,0,0,0,0
1791
+ 1_338.png,33,0,0,0,0
1792
+ 1_339.png,0,1,1,0,0
1793
+ 1_34.png,0,0,5,0,31
1794
+ 1_340.png,24,1,2,0,0
1795
+ 1_341.png,2,1,3,0,0
1796
+ 1_342.png,7,0,2,0,0
1797
+ 1_343.png,3,0,6,0,0
1798
+ 1_344.png,4,5,8,0,0
1799
+ 1_345.png,11,0,2,0,0
1800
+ 1_346.png,1,3,6,0,0
1801
+ 1_347.png,1,1,10,0,0
1802
+ 1_348.png,10,0,2,0,0
1803
+ 1_349.png,5,1,7,0,0
1804
+ 1_35.png,0,2,6,0,27
1805
+ 1_350.png,5,2,8,0,0
1806
+ 1_351.png,13,0,2,0,0
1807
+ 1_352.png,12,0,6,0,0
1808
+ 1_353.png,9,0,8,0,0
1809
+ 1_354.png,5,1,5,0,0
1810
+ 1_355.png,14,0,1,0,0
1811
+ 1_356.png,3,0,8,0,0
1812
+ 1_357.png,15,0,1,0,0
1813
+ 1_358.png,9,3,2,0,0
1814
+ 1_359.png,8,0,2,0,0
1815
+ 1_36.png,0,0,1,0,8
1816
+ 1_360.png,20,0,0,0,0
1817
+ 1_361.png,13,0,2,0,0
1818
+ 1_362.png,9,1,5,0,0
1819
+ 1_363.png,8,0,6,0,0
1820
+ 1_364.png,7,7,7,0,0
1821
+ 1_365.png,4,2,4,0,0
1822
+ 1_366.png,12,0,1,0,0
1823
+ 1_367.png,16,0,1,0,0
1824
+ 1_368.png,19,1,5,0,0
1825
+ 1_369.png,3,17,24,0,0
1826
+ 1_37.png,0,0,2,0,11
1827
+ 1_370.png,21,1,9,0,0
1828
+ 1_371.png,7,17,21,0,0
1829
+ 1_372.png,31,1,6,0,0
1830
+ 1_373.png,4,2,4,0,0
1831
+ 1_374.png,0,16,7,0,0
1832
+ 1_375.png,6,10,5,0,0
1833
+ 1_376.png,2,22,8,0,0
1834
+ 1_377.png,13,2,1,0,0
1835
+ 1_378.png,9,0,2,0,0
1836
+ 1_379.png,27,2,0,0,0
1837
+ 1_38.png,0,3,2,0,18
1838
+ 1_380.png,8,0,1,0,0
1839
+ 1_381.png,17,1,0,0,0
1840
+ 1_382.png,0,3,12,0,0
1841
+ 1_383.png,0,0,5,0,0
1842
+ 1_384.png,0,11,8,0,0
1843
+ 1_385.png,14,0,1,0,0
1844
+ 1_386.png,0,12,6,0,0
1845
+ 1_387.png,14,0,0,0,0
1846
+ 1_388.png,7,6,3,0,0
1847
+ 1_389.png,6,0,3,0,0
1848
+ 1_39.png,0,1,1,0,10
1849
+ 1_390.png,0,2,2,0,0
1850
+ 1_391.png,12,1,1,0,0
1851
+ 1_392.png,2,1,2,0,0
1852
+ 1_393.png,11,0,3,0,0
1853
+ 1_394.png,14,1,2,0,0
1854
+ 1_395.png,11,1,3,0,0
1855
+ 1_396.png,3,6,2,0,0
1856
+ 1_397.png,10,5,4,0,0
1857
+ 1_398.png,5,1,5,0,0
1858
+ 1_399.png,14,0,2,0,0
1859
+ 1_4.png,5,0,2,0,0
1860
+ 1_40.png,0,0,2,0,18
1861
+ 1_400.png,9,2,16,0,0
1862
+ 1_401.png,21,4,2,0,0
1863
+ 1_402.png,17,0,6,0,0
1864
+ 1_403.png,20,2,4,0,0
1865
+ 1_404.png,16,0,4,0,0
1866
+ 1_405.png,8,1,6,0,0
1867
+ 1_406.png,12,1,4,0,0
1868
+ 1_407.png,12,0,1,0,0
1869
+ 1_408.png,20,1,4,0,0
1870
+ 1_409.png,7,7,7,0,0
1871
+ 1_41.png,1,6,14,0,0
1872
+ 1_410.png,14,3,5,0,0
1873
+ 1_411.png,7,1,2,0,0
1874
+ 1_412.png,13,0,3,0,0
1875
+ 1_413.png,18,3,4,0,0
1876
+ 1_414.png,21,2,1,0,0
1877
+ 1_415.png,0,1,2,0,0
1878
+ 1_416.png,31,1,2,0,0
1879
+ 1_417.png,18,0,1,0,0
1880
+ 1_418.png,0,0,8,0,0
1881
+ 1_419.png,2,4,9,0,0
1882
+ 1_42.png,29,0,0,0,0
1883
+ 1_420.png,2,2,7,0,0
1884
+ 1_421.png,3,2,14,0,0
1885
+ 1_422.png,0,1,10,0,0
1886
+ 1_423.png,4,3,8,0,0
1887
+ 1_424.png,0,0,6,0,0
1888
+ 1_425.png,6,1,13,0,0
1889
+ 1_426.png,4,0,12,0,0
1890
+ 1_427.png,1,1,11,0,0
1891
+ 1_428.png,15,0,2,0,0
1892
+ 1_429.png,1,3,11,0,0
1893
+ 1_43.png,0,1,7,0,9
1894
+ 1_430.png,3,0,11,0,0
1895
+ 1_431.png,1,7,16,0,0
1896
+ 1_432.png,3,4,4,0,0
1897
+ 1_433.png,3,0,6,0,0
1898
+ 1_434.png,6,1,4,0,0
1899
+ 1_435.png,0,4,5,0,0
1900
+ 1_436.png,0,2,5,0,0
1901
+ 1_437.png,0,2,5,0,0
1902
+ 1_438.png,2,2,4,0,0
1903
+ 1_439.png,0,1,7,0,0
1904
+ 1_44.png,0,0,2,0,35
1905
+ 1_440.png,11,3,3,0,0
1906
+ 1_441.png,11,4,1,0,0
1907
+ 1_442.png,6,3,3,0,0
1908
+ 1_443.png,6,0,3,0,0
1909
+ 1_444.png,5,1,4,0,0
1910
+ 1_445.png,0,2,7,0,0
1911
+ 1_446.png,2,2,4,0,0
1912
+ 1_447.png,3,20,6,0,0
1913
+ 1_448.png,6,2,2,0,0
1914
+ 1_449.png,11,2,5,0,0
1915
+ 1_45.png,0,0,5,0,30
1916
+ 1_450.png,2,1,5,0,0
1917
+ 1_451.png,2,0,7,0,0
1918
+ 1_452.png,0,2,13,0,0
1919
+ 1_453.png,10,0,8,0,0
1920
+ 1_454.png,18,2,0,0,0
1921
+ 1_455.png,4,2,5,0,0
1922
+ 1_456.png,4,2,9,0,0
1923
+ 1_457.png,2,3,7,0,0
1924
+ 1_458.png,14,0,7,0,0
1925
+ 1_459.png,14,0,2,0,0
1926
+ 1_46.png,0,1,4,0,17
1927
+ 1_460.png,10,2,8,0,0
1928
+ 1_461.png,24,1,1,0,0
1929
+ 1_462.png,7,2,6,0,0
1930
+ 1_463.png,9,1,4,0,0
1931
+ 1_464.png,7,4,5,0,0
1932
+ 1_465.png,16,0,2,0,0
1933
+ 1_466.png,5,1,5,0,0
1934
+ 1_467.png,4,2,8,0,0
1935
+ 1_468.png,0,10,7,0,0
1936
+ 1_469.png,3,3,4,0,0
1937
+ 1_47.png,0,1,6,0,15
1938
+ 1_470.png,10,0,1,0,0
1939
+ 1_471.png,1,4,3,0,0
1940
+ 1_472.png,3,2,0,0,0
1941
+ 1_473.png,3,0,4,0,0
1942
+ 1_474.png,5,0,0,0,0
1943
+ 1_475.png,7,0,0,0,0
1944
+ 1_476.png,36,0,0,0,0
1945
+ 1_477.png,18,3,6,0,0
1946
+ 1_478.png,11,1,8,0,0
1947
+ 1_479.png,13,0,10,0,0
1948
+ 1_48.png,0,1,4,0,29
1949
+ 1_480.png,16,1,8,0,0
1950
+ 1_481.png,15,0,10,0,0
1951
+ 1_482.png,5,0,10,0,0
1952
+ 1_483.png,22,0,2,0,0
1953
+ 1_484.png,22,3,3,0,0
1954
+ 1_485.png,19,0,1,0,0
1955
+ 1_486.png,0,11,8,0,0
1956
+ 1_487.png,6,3,8,0,0
1957
+ 1_488.png,8,5,3,0,0
1958
+ 1_489.png,23,5,0,0,0
1959
+ 1_49.png,0,1,6,0,24
1960
+ 1_490.png,28,2,0,0,0
1961
+ 1_491.png,29,0,0,0,0
1962
+ 1_492.png,31,0,0,0,0
1963
+ 1_493.png,29,0,0,0,0
1964
+ 1_494.png,0,4,18,0,0
1965
+ 1_495.png,14,5,1,0,0
1966
+ 1_496.png,28,0,2,0,0
1967
+ 1_497.png,0,11,9,0,0
1968
+ 1_498.png,0,24,12,0,0
1969
+ 1_499.png,0,24,6,0,0
1970
+ 1_5.png,4,0,2,0,0
1971
+ 1_50.png,0,2,8,0,10
1972
+ 1_500.png,11,4,10,0,0
1973
+ 1_501.png,21,3,4,0,0
1974
+ 1_502.png,30,2,1,0,0
1975
+ 1_503.png,28,3,4,0,0
1976
+ 1_504.png,24,1,7,0,0
1977
+ 1_505.png,11,5,8,0,0
1978
+ 1_506.png,19,2,4,0,0
1979
+ 1_507.png,21,6,8,0,0
1980
+ 1_508.png,1,12,8,0,0
1981
+ 1_509.png,14,2,5,0,0
1982
+ 1_51.png,0,0,13,0,4
1983
+ 1_510.png,16,5,4,0,0
1984
+ 1_511.png,11,2,13,0,0
1985
+ 1_512.png,19,0,0,0,0
1986
+ 1_513.png,11,11,6,0,0
1987
+ 1_514.png,16,6,3,0,0
1988
+ 1_515.png,2,2,6,0,0
1989
+ 1_516.png,0,7,9,0,0
1990
+ 1_517.png,1,2,5,0,0
1991
+ 1_518.png,2,2,8,0,0
1992
+ 1_519.png,2,2,8,0,0
1993
+ 1_52.png,0,2,2,0,30
1994
+ 1_520.png,1,2,4,0,0
1995
+ 1_521.png,0,13,6,0,0
1996
+ 1_522.png,0,2,8,0,0
1997
+ 1_523.png,0,2,4,0,14
1998
+ 1_524.png,0,0,0,0,25
1999
+ 1_525.png,0,2,0,0,37
2000
+ 1_526.png,0,2,7,0,5
2001
+ 1_527.png,0,0,4,0,8
2002
+ 1_528.png,0,1,1,0,24
2003
+ 1_529.png,0,3,5,0,2
2004
+ 1_53.png,0,0,4,0,22
2005
+ 1_530.png,0,4,6,0,2
2006
+ 1_531.png,0,0,0,0,26
2007
+ 1_532.png,0,2,2,0,24
2008
+ 1_533.png,14,0,0,0,0
2009
+ 1_534.png,22,0,0,0,0
2010
+ 1_535.png,17,0,2,0,0
2011
+ 1_536.png,19,1,0,0,0
2012
+ 1_537.png,14,1,0,0,0
2013
+ 1_538.png,0,2,2,0,0
2014
+ 1_539.png,10,0,0,0,0
2015
+ 1_54.png,0,1,0,0,29
2016
+ 1_540.png,20,0,3,0,0
2017
+ 1_541.png,33,1,0,0,0
2018
+ 1_542.png,0,0,2,0,0
2019
+ 1_543.png,44,0,0,0,0
2020
+ 1_544.png,0,0,0,0,0
2021
+ 1_545.png,0,0,2,0,0
2022
+ 1_546.png,11,0,0,0,0
2023
+ 1_547.png,29,0,0,0,0
2024
+ 1_548.png,0,0,1,0,0
2025
+ 1_549.png,13,0,0,0,0
2026
+ 1_55.png,0,0,2,0,22
2027
+ 1_550.png,1,3,4,0,0
2028
+ 1_551.png,89,0,0,0,0
2029
+ 1_552.png,25,1,11,0,0
2030
+ 1_553.png,26,0,4,0,0
2031
+ 1_554.png,38,0,3,0,0
2032
+ 1_555.png,47,0,4,0,0
2033
+ 1_556.png,2,0,3,0,0
2034
+ 1_557.png,31,0,0,0,0
2035
+ 1_558.png,25,0,0,0,0
2036
+ 1_559.png,69,1,0,0,0
2037
+ 1_56.png,0,0,0,0,23
2038
+ 1_560.png,12,0,3,0,0
2039
+ 1_561.png,9,0,6,0,0
2040
+ 1_562.png,0,0,6,0,0
2041
+ 1_563.png,14,0,3,0,0
2042
+ 1_564.png,12,0,1,0,0
2043
+ 1_565.png,10,3,2,0,0
2044
+ 1_566.png,0,6,8,0,0
2045
+ 1_567.png,1,10,9,0,0
2046
+ 1_568.png,5,10,4,0,0
2047
+ 1_569.png,0,0,6,0,36
2048
+ 1_57.png,0,0,10,0,1
2049
+ 1_570.png,0,3,0,0,41
2050
+ 1_571.png,0,3,2,0,56
2051
+ 1_572.png,0,3,3,0,34
2052
+ 1_573.png,0,1,9,0,6
2053
+ 1_574.png,0,1,7,0,47
2054
+ 1_575.png,3,7,5,0,0
2055
+ 1_576.png,2,4,6,0,0
2056
+ 1_577.png,2,2,2,0,0
2057
+ 1_578.png,3,2,6,0,0
2058
+ 1_579.png,0,1,3,0,0
2059
+ 1_58.png,0,0,3,0,31
2060
+ 1_580.png,1,1,0,0,0
2061
+ 1_581.png,11,6,2,0,0
2062
+ 1_582.png,1,1,4,0,0
2063
+ 1_583.png,20,4,0,0,0
2064
+ 1_584.png,21,5,0,0,0
2065
+ 1_585.png,2,2,7,0,0
2066
+ 1_586.png,14,3,0,0,0
2067
+ 1_587.png,0,2,6,0,0
2068
+ 1_588.png,2,15,8,0,0
2069
+ 1_589.png,0,12,1,0,0
2070
+ 1_59.png,0,0,7,0,43
2071
+ 1_590.png,0,31,1,0,0
2072
+ 1_591.png,0,30,9,0,0
2073
+ 1_592.png,12,8,4,0,0
2074
+ 1_593.png,0,19,12,0,0
2075
+ 1_594.png,11,1,4,0,0
2076
+ 1_595.png,0,29,4,0,0
2077
+ 1_596.png,0,14,6,0,0
2078
+ 1_597.png,0,4,5,0,0
2079
+ 1_598.png,2,4,5,0,0
2080
+ 1_599.png,3,5,8,0,0
2081
+ 1_6.png,8,0,1,0,0
2082
+ 1_60.png,5,0,5,0,0
2083
+ 1_600.png,3,2,4,0,0
2084
+ 1_601.png,18,1,1,0,0
2085
+ 1_602.png,0,7,4,0,0
2086
+ 1_603.png,7,4,8,0,0
2087
+ 1_604.png,0,12,2,0,0
2088
+ 1_605.png,10,2,4,0,0
2089
+ 1_606.png,10,4,4,0,0
2090
+ 1_607.png,8,3,7,0,0
2091
+ 1_608.png,15,4,2,0,0
2092
+ 1_609.png,6,3,9,0,0
2093
+ 1_61.png,0,0,2,0,0
2094
+ 1_610.png,1,17,9,0,0
2095
+ 1_611.png,0,9,12,0,0
2096
+ 1_612.png,20,0,2,0,0
2097
+ 1_613.png,32,0,0,0,0
2098
+ 1_614.png,25,0,0,0,0
2099
+ 1_615.png,27,0,0,0,0
2100
+ 1_616.png,19,0,4,0,0
2101
+ 1_617.png,27,0,0,0,0
2102
+ 1_618.png,0,0,2,0,35
2103
+ 1_619.png,0,1,4,0,28
2104
+ 1_62.png,5,0,3,0,0
2105
+ 1_620.png,0,3,5,0,28
2106
+ 1_621.png,0,2,1,0,47
2107
+ 1_622.png,0,0,5,0,32
2108
+ 1_623.png,0,0,14,0,8
2109
+ 1_624.png,0,2,5,0,36
2110
+ 1_625.png,0,0,4,0,55
2111
+ 1_626.png,16,0,0,0,0
2112
+ 1_627.png,17,0,0,0,0
2113
+ 1_628.png,13,3,1,0,0
2114
+ 1_629.png,13,5,0,0,0
2115
+ 1_63.png,2,0,1,0,0
2116
+ 1_630.png,15,7,2,0,0
2117
+ 1_631.png,0,41,6,0,0
2118
+ 1_632.png,5,14,9,0,0
2119
+ 1_633.png,2,30,2,0,0
2120
+ 1_634.png,2,9,6,0,0
2121
+ 1_635.png,0,30,6,0,0
2122
+ 1_636.png,0,14,13,0,0
2123
+ 1_637.png,1,18,5,0,0
2124
+ 1_638.png,0,30,10,0,0
2125
+ 1_639.png,3,6,10,0,0
2126
+ 1_64.png,10,0,1,0,0
2127
+ 1_640.png,0,19,10,0,0
2128
+ 1_641.png,1,13,15,0,0
2129
+ 1_642.png,2,7,11,0,0
2130
+ 1_643.png,0,11,10,0,0
2131
+ 1_644.png,4,0,4,0,0
2132
+ 1_645.png,4,0,1,0,0
2133
+ 1_646.png,6,0,0,0,0
2134
+ 1_647.png,5,0,3,0,0
2135
+ 1_648.png,2,0,4,0,0
2136
+ 1_649.png,7,0,3,0,0
2137
+ 1_65.png,3,0,9,0,0
2138
+ 1_650.png,7,1,1,0,0
2139
+ 1_651.png,9,0,1,0,0
2140
+ 1_652.png,7,0,2,0,0
2141
+ 1_653.png,14,0,0,0,0
2142
+ 1_654.png,17,1,0,0,0
2143
+ 1_655.png,9,1,0,0,0
2144
+ 1_656.png,14,0,2,0,0
2145
+ 1_657.png,10,0,2,0,0
2146
+ 1_658.png,1,0,5,0,0
2147
+ 1_659.png,0,0,3,0,0
2148
+ 1_66.png,0,0,8,0,0
2149
+ 1_660.png,0,2,1,0,0
2150
+ 1_661.png,21,0,0,0,0
2151
+ 1_662.png,20,0,0,0,0
2152
+ 1_663.png,8,0,1,0,0
2153
+ 1_664.png,19,0,0,0,0
2154
+ 1_665.png,24,0,0,0,0
2155
+ 1_666.png,0,0,7,0,0
2156
+ 1_667.png,12,0,2,0,0
2157
+ 1_668.png,11,3,4,0,0
2158
+ 1_669.png,0,3,10,0,0
2159
+ 1_67.png,0,0,6,0,0
2160
+ 1_670.png,11,0,0,0,0
2161
+ 1_671.png,14,1,1,0,0
2162
+ 1_672.png,8,2,4,0,0
2163
+ 1_673.png,5,0,3,0,0
2164
+ 1_674.png,7,1,4,0,0
2165
+ 1_675.png,11,0,0,0,0
2166
+ 1_676.png,9,0,3,0,0
2167
+ 1_677.png,22,1,1,0,0
2168
+ 1_678.png,0,9,4,0,0
2169
+ 1_679.png,0,0,0,0,0
2170
+ 1_68.png,5,1,3,0,0
2171
+ 1_680.png,0,0,0,0,0
2172
+ 1_681.png,0,15,3,0,0
2173
+ 1_682.png,0,20,6,0,0
2174
+ 1_683.png,0,2,0,0,0
2175
+ 1_684.png,35,1,0,0,0
2176
+ 1_685.png,0,0,6,0,0
2177
+ 1_686.png,0,4,10,0,0
2178
+ 1_687.png,0,10,10,0,0
2179
+ 1_688.png,0,2,4,0,0
2180
+ 1_689.png,0,1,5,0,0
2181
+ 1_69.png,2,0,8,0,0
2182
+ 1_690.png,20,2,0,0,0
2183
+ 1_691.png,30,2,0,0,0
2184
+ 1_692.png,30,1,0,0,0
2185
+ 1_693.png,18,6,0,8,0
2186
+ 1_694.png,25,1,5,0,0
2187
+ 1_695.png,31,0,0,0,0
2188
+ 1_696.png,22,0,1,0,0
2189
+ 1_697.png,40,0,1,0,0
2190
+ 1_698.png,0,0,2,0,0
2191
+ 1_699.png,2,2,2,0,0
2192
+ 1_7.png,11,1,4,0,0
2193
+ 1_70.png,1,1,6,0,9
2194
+ 1_700.png,4,4,4,0,0
2195
+ 1_701.png,0,4,4,1,0
2196
+ 1_702.png,0,1,3,0,0
2197
+ 1_703.png,33,0,2,0,0
2198
+ 1_704.png,30,0,3,0,0
2199
+ 1_705.png,9,0,0,10,0
2200
+ 1_706.png,14,1,3,0,0
2201
+ 1_707.png,16,1,0,1,0
2202
+ 1_708.png,0,3,31,0,0
2203
+ 1_709.png,0,0,18,0,0
2204
+ 1_71.png,2,2,6,0,0
2205
+ 1_710.png,0,3,18,0,0
2206
+ 1_711.png,0,5,22,0,0
2207
+ 1_712.png,0,5,21,0,0
2208
+ 1_713.png,0,4,20,0,0
2209
+ 1_714.png,0,2,8,0,0
2210
+ 1_715.png,0,2,5,0,0
2211
+ 1_716.png,0,41,17,0,0
2212
+ 1_717.png,0,7,19,0,0
2213
+ 1_718.png,0,3,19,0,0
2214
+ 1_719.png,0,1,7,0,0
2215
+ 1_72.png,1,2,14,0,6
2216
+ 1_720.png,0,1,6,0,0
2217
+ 1_721.png,0,0,1,0,0
2218
+ 1_722.png,0,2,4,0,0
2219
+ 1_723.png,0,0,2,0,0
2220
+ 1_724.png,0,0,0,0,0
2221
+ 1_725.png,0,110,3,0,0
2222
+ 1_726.png,0,117,1,0,0
2223
+ 1_727.png,0,94,9,0,0
2224
+ 1_728.png,0,112,4,0,0
2225
+ 1_729.png,0,0,1,0,0
2226
+ 1_73.png,2,1,12,0,9
2227
+ 1_730.png,0,0,5,0,0
2228
+ 1_731.png,0,5,9,0,0
2229
+ 1_732.png,0,4,5,0,0
2230
+ 1_733.png,0,1,8,0,0
2231
+ 1_734.png,13,0,0,0,0
2232
+ 1_735.png,39,0,0,0,0
2233
+ 1_736.png,6,1,5,0,0
2234
+ 1_737.png,7,2,5,0,0
2235
+ 1_738.png,16,0,0,0,0
2236
+ 1_739.png,23,0,0,0,0
2237
+ 1_74.png,0,1,22,0,12
2238
+ 1_740.png,20,0,0,4,0
2239
+ 1_741.png,18,0,0,0,0
2240
+ 1_742.png,21,0,4,0,0
2241
+ 1_743.png,22,0,1,0,0
2242
+ 1_744.png,2,3,2,0,0
2243
+ 1_745.png,0,72,10,0,0
2244
+ 1_746.png,0,83,10,0,0
2245
+ 1_747.png,0,103,3,0,0
2246
+ 1_748.png,0,75,2,0,0
2247
+ 1_749.png,0,0,0,0,0
2248
+ 1_75.png,0,0,13,0,36
2249
+ 1_750.png,0,0,0,0,0
2250
+ 1_751.png,0,0,1,0,0
2251
+ 1_752.png,0,0,0,0,0
2252
+ 1_753.png,0,0,0,0,0
2253
+ 1_754.png,0,8,4,0,0
2254
+ 1_755.png,0,4,12,0,0
2255
+ 1_756.png,0,4,3,0,0
2256
+ 1_757.png,0,11,9,0,0
2257
+ 1_758.png,0,18,7,0,0
2258
+ 1_759.png,0,2,8,0,0
2259
+ 1_76.png,0,1,9,0,28
2260
+ 1_760.png,0,4,21,0,0
2261
+ 1_761.png,0,5,7,0,0
2262
+ 1_762.png,2,13,15,0,0
2263
+ 1_763.png,0,100,1,0,0
2264
+ 1_764.png,0,87,3,0,0
2265
+ 1_765.png,0,9,25,0,0
2266
+ 1_766.png,0,4,7,0,0
2267
+ 1_767.png,25,0,0,0,0
2268
+ 1_768.png,15,2,1,0,0
2269
+ 1_769.png,12,0,1,0,0
2270
+ 1_77.png,0,0,6,0,28
2271
+ 1_770.png,13,8,19,0,0
2272
+ 1_771.png,22,0,0,0,0
2273
+ 1_772.png,32,2,4,0,0
2274
+ 1_773.png,19,0,0,4,0
2275
+ 1_774.png,26,0,6,0,0
2276
+ 1_775.png,21,0,0,0,0
2277
+ 1_776.png,22,7,4,0,0
2278
+ 1_777.png,19,7,5,0,0
2279
+ 1_778.png,24,0,6,0,0
2280
+ 1_779.png,15,1,15,0,0
2281
+ 1_78.png,0,0,11,0,38
2282
+ 1_780.png,0,15,16,0,0
2283
+ 1_781.png,0,19,12,0,0
2284
+ 1_782.png,9,9,3,0,0
2285
+ 1_783.png,21,0,2,0,0
2286
+ 1_784.png,9,2,3,0,0
2287
+ 1_785.png,8,1,12,0,2
2288
+ 1_786.png,2,3,19,0,0
2289
+ 1_787.png,1,1,6,0,26
2290
+ 1_788.png,22,7,9,0,0
2291
+ 1_789.png,24,2,5,0,0
2292
+ 1_79.png,0,0,4,0,37
2293
+ 1_790.png,7,0,11,0,0
2294
+ 1_791.png,17,0,3,0,0
2295
+ 1_792.png,24,0,3,0,0
2296
+ 1_793.png,13,2,6,0,0
2297
+ 1_794.png,34,0,0,0,0
2298
+ 1_795.png,25,0,9,0,0
2299
+ 1_796.png,25,1,4,0,0
2300
+ 1_797.png,28,0,0,0,0
2301
+ 1_798.png,19,0,5,0,0
2302
+ 1_799.png,23,0,2,0,0
2303
+ 1_8.png,13,0,3,0,0
2304
+ 1_80.png,0,0,3,0,42
2305
+ 1_800.png,18,0,1,0,0
2306
+ 1_801.png,27,0,1,0,0
2307
+ 1_802.png,24,0,2,0,0
2308
+ 1_803.png,25,0,0,0,0
2309
+ 1_804.png,24,0,2,0,0
2310
+ 1_805.png,19,0,0,0,0
2311
+ 1_806.png,20,0,3,0,0
2312
+ 1_807.png,23,0,4,0,0
2313
+ 1_808.png,26,0,5,0,0
2314
+ 1_809.png,15,0,6,0,0
2315
+ 1_81.png,0,0,6,0,1
2316
+ 1_810.png,16,0,7,0,0
2317
+ 1_811.png,13,1,3,0,0
2318
+ 1_812.png,22,0,10,0,0
2319
+ 1_813.png,17,0,4,0,0
2320
+ 1_814.png,23,0,5,0,0
2321
+ 1_815.png,19,0,0,0,0
2322
+ 1_816.png,22,0,1,0,0
2323
+ 1_817.png,21,0,0,0,0
2324
+ 1_818.png,22,0,0,0,0
2325
+ 1_819.png,17,0,2,0,0
2326
+ 1_82.png,0,0,3,0,31
2327
+ 1_820.png,16,1,2,0,0
2328
+ 1_821.png,2,0,12,0,2
2329
+ 1_822.png,5,0,5,0,22
2330
+ 1_823.png,1,0,8,0,5
2331
+ 1_824.png,0,11,2,0,0
2332
+ 1_825.png,0,8,7,0,0
2333
+ 1_826.png,3,29,7,0,1
2334
+ 1_827.png,16,0,13,0,5
2335
+ 1_828.png,19,0,0,0,0
2336
+ 1_829.png,17,1,7,0,0
2337
+ 1_83.png,0,0,2,0,32
2338
+ 1_830.png,31,0,7,0,0
2339
+ 1_831.png,17,0,10,0,0
2340
+ 1_832.png,63,21,1,0,0
2341
+ 1_833.png,52,13,3,0,0
2342
+ 1_834.png,44,36,7,0,0
2343
+ 1_835.png,48,17,2,0,0
2344
+ 1_836.png,103,0,0,0,0
2345
+ 1_837.png,97,0,0,0,0
2346
+ 1_838.png,116,0,0,0,0
2347
+ 1_839.png,85,0,0,0,0
2348
+ 1_84.png,0,0,6,0,14
2349
+ 1_840.png,125,0,0,0,0
2350
+ 1_841.png,85,0,0,0,0
2351
+ 1_842.png,100,0,1,0,0
2352
+ 1_843.png,106,4,0,0,0
2353
+ 1_844.png,11,63,74,0,0
2354
+ 1_845.png,0,113,0,0,0
2355
+ 1_846.png,0,105,5,0,0
2356
+ 1_847.png,0,96,4,0,0
2357
+ 1_848.png,0,85,7,0,0
2358
+ 1_849.png,28,1,1,2,0
2359
+ 1_85.png,2,1,8,0,0
2360
+ 1_850.png,24,1,7,0,0
2361
+ 1_851.png,27,1,2,0,0
2362
+ 1_852.png,17,0,6,0,0
2363
+ 1_853.png,26,0,1,1,0
2364
+ 1_854.png,22,2,5,0,0
2365
+ 1_855.png,0,44,19,0,0
2366
+ 1_856.png,0,76,2,0,0
2367
+ 1_857.png,0,38,14,0,0
2368
+ 1_858.png,0,84,5,0,0
2369
+ 1_859.png,0,94,4,0,0
2370
+ 1_86.png,4,1,9,0,0
2371
+ 1_860.png,0,0,14,0,0
2372
+ 1_861.png,17,0,0,0,0
2373
+ 1_862.png,19,0,1,0,0
2374
+ 1_863.png,34,1,0,0,0
2375
+ 1_864.png,10,1,13,0,0
2376
+ 1_865.png,52,0,0,0,0
2377
+ 1_866.png,48,0,1,0,0
2378
+ 1_867.png,57,1,1,0,0
2379
+ 1_868.png,51,0,0,0,0
2380
+ 1_869.png,39,0,0,0,0
2381
+ 1_87.png,10,1,0,0,0
2382
+ 1_870.png,46,1,0,0,0
2383
+ 1_871.png,46,0,0,0,0
2384
+ 1_872.png,49,0,0,0,0
2385
+ 1_873.png,48,0,0,0,0
2386
+ 1_874.png,35,1,0,0,0
2387
+ 1_875.png,41,1,0,0,0
2388
+ 1_876.png,38,0,0,0,0
2389
+ 1_877.png,40,0,0,0,0
2390
+ 1_878.png,0,0,4,0,0
2391
+ 1_879.png,0,0,5,0,0
2392
+ 1_88.png,13,1,1,0,0
2393
+ 1_880.png,0,0,2,0,0
2394
+ 1_881.png,0,0,4,0,0
2395
+ 1_882.png,0,0,6,0,0
2396
+ 1_883.png,0,0,4,0,0
2397
+ 1_884.png,0,0,9,0,0
2398
+ 1_885.png,0,2,3,0,0
2399
+ 1_886.png,0,0,7,0,0
2400
+ 1_887.png,0,0,5,0,0
2401
+ 1_888.png,0,0,5,0,0
2402
+ 1_889.png,0,0,12,0,0
2403
+ 1_89.png,15,2,1,0,0
2404
+ 1_890.png,0,0,3,0,0
2405
+ 1_891.png,0,0,4,0,0
2406
+ 1_892.png,0,0,14,0,0
2407
+ 1_893.png,9,46,2,0,0
2408
+ 1_894.png,25,36,3,0,0
2409
+ 1_895.png,31,16,12,22,0
2410
+ 1_896.png,9,0,28,36,0
2411
+ 1_897.png,17,0,7,32,0
2412
+ 1_898.png,26,0,8,25,0
2413
+ 1_899.png,18,0,27,24,0
2414
+ 1_9.png,12,0,2,0,0
2415
+ 1_90.png,14,0,4,0,0
2416
+ 1_900.png,41,0,5,20,0
2417
+ 1_901.png,25,0,4,52,0
2418
+ 1_902.png,45,0,5,39,0
2419
+ 1_903.png,9,0,5,75,0
2420
+ 1_904.png,5,0,2,59,0
2421
+ 1_905.png,18,0,3,40,0
2422
+ 1_906.png,33,4,18,35,0
2423
+ 1_907.png,2,0,2,66,0
2424
+ 1_908.png,26,8,22,22,0
2425
+ 1_909.png,44,0,7,36,0
2426
+ 1_91.png,6,0,0,0,0
2427
+ 1_910.png,14,21,16,32,0
2428
+ 1_911.png,63,4,17,12,0
2429
+ 1_912.png,9,0,10,0,0
2430
+ 1_913.png,28,0,0,0,0
2431
+ 1_914.png,26,0,0,0,0
2432
+ 1_915.png,3,0,22,0,0
2433
+ 1_916.png,31,0,0,0,0
2434
+ 1_917.png,3,0,10,0,0
2435
+ 1_918.png,19,0,6,0,0
2436
+ 1_919.png,11,2,8,0,0
2437
+ 1_92.png,0,1,0,0,0
2438
+ 1_920.png,17,0,6,0,0
2439
+ 1_921.png,19,1,13,0,0
2440
+ 1_922.png,1,0,23,0,0
2441
+ 1_923.png,1,2,21,0,0
2442
+ 1_924.png,3,2,23,0,0
2443
+ 1_925.png,27,0,2,0,0
2444
+ 1_926.png,17,0,23,0,0
2445
+ 1_927.png,19,1,23,0,0
2446
+ 1_928.png,19,1,17,0,0
2447
+ 1_929.png,20,1,7,0,0
2448
+ 1_93.png,17,0,2,0,0
2449
+ 1_930.png,32,6,17,0,0
2450
+ 1_931.png,65,29,21,0,9
2451
+ 1_932.png,0,225,57,0,7
2452
+ 1_933.png,0,13,16,0,0
2453
+ 1_934.png,18,1,3,0,0
2454
+ 1_935.png,7,3,8,0,0
2455
+ 1_936.png,0,9,20,0,0
2456
+ 1_937.png,0,1,31,0,0
2457
+ 1_938.png,18,1,2,0,0
2458
+ 1_939.png,1,7,15,0,0
2459
+ 1_94.png,8,0,4,0,0
2460
+ 1_940.png,0,9,22,0,0
2461
+ 1_941.png,0,12,14,0,0
2462
+ 1_942.png,1,6,19,0,0
2463
+ 1_943.png,4,3,21,0,0
2464
+ 1_944.png,16,4,9,0,0
2465
+ 1_945.png,0,1,14,0,0
2466
+ 1_946.png,1,12,11,0,0
2467
+ 1_947.png,9,0,1,0,0
2468
+ 1_948.png,0,0,11,0,0
2469
+ 1_949.png,16,0,4,0,0
2470
+ 1_95.png,23,0,0,0,0
2471
+ 1_950.png,11,0,0,0,0
2472
+ 1_951.png,8,0,0,0,0
2473
+ 1_952.png,16,1,0,0,0
2474
+ 1_953.png,23,0,4,0,0
2475
+ 1_954.png,22,0,3,0,0
2476
+ 1_955.png,23,1,4,0,0
2477
+ 1_956.png,19,4,3,0,0
2478
+ 1_957.png,19,1,3,0,0
2479
+ 1_958.png,19,5,1,0,0
2480
+ 1_959.png,5,68,2,0,0
2481
+ 1_96.png,17,3,4,0,0
2482
+ 1_960.png,0,83,3,0,0
2483
+ 1_961.png,1,73,2,0,0
2484
+ 1_962.png,0,92,3,0,0
2485
+ 1_963.png,0,71,2,0,0
2486
+ 1_964.png,0,0,4,0,25
2487
+ 1_965.png,0,0,0,0,39
2488
+ 1_966.png,0,1,9,0,12
2489
+ 1_967.png,0,1,2,0,33
2490
+ 1_968.png,0,0,0,0,5
2491
+ 1_969.png,0,5,2,0,26
2492
+ 1_97.png,5,2,11,0,0
2493
+ 1_970.png,0,9,39,0,5
2494
+ 1_971.png,0,3,15,0,24
2495
+ 1_972.png,0,2,10,0,0
2496
+ 1_973.png,0,1,9,0,0
2497
+ 1_974.png,0,5,7,0,0
2498
+ 1_975.png,0,2,3,0,0
2499
+ 1_976.png,0,0,1,0,0
2500
+ 1_977.png,0,0,4,0,0
2501
+ 1_978.png,0,0,7,0,0
2502
+ 1_979.png,0,0,4,0,0
2503
+ 1_98.png,7,2,13,0,0
2504
+ 1_980.png,0,0,4,0,0
2505
+ 1_981.png,2,5,11,0,0
2506
+ 1_982.png,21,4,14,0,0
2507
+ 1_983.png,21,5,6,0,0
2508
+ 1_984.png,14,2,4,0,0
2509
+ 1_985.png,23,0,8,0,0
2510
+ 1_986.png,13,0,3,0,0
2511
+ 1_987.png,12,4,13,0,0
2512
+ 1_988.png,26,0,1,0,0
2513
+ 1_989.png,28,0,5,0,0
2514
+ 1_99.png,0,0,10,0,13
2515
+ 1_990.png,0,0,3,0,0
2516
+ 1_991.png,0,0,8,0,0
2517
+ 1_992.png,0,0,10,0,0
2518
+ 1_993.png,0,0,12,0,0
2519
+ 1_994.png,0,0,12,0,0
2520
+ 1_995.png,0,0,15,0,0
2521
+ 1_996.png,0,0,26,0,0
2522
+ 1_997.png,0,0,10,0,0
2523
+ 1_998.png,0,0,14,0,0
2524
+ 1_999.png,0,0,17,0,0
docs/datasets/PanNuke/fold1/types.csv ADDED
@@ -0,0 +1,2524 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ img,type
2
+ 1_0.png,Breast
3
+ 1_1.png,Breast
4
+ 1_2.png,Breast
5
+ 1_3.png,Breast
6
+ 1_4.png,Breast
7
+ 1_5.png,Breast
8
+ 1_6.png,Breast
9
+ 1_7.png,Breast
10
+ 1_8.png,Breast
11
+ 1_9.png,Breast
12
+ 1_10.png,Breast
13
+ 1_11.png,Breast
14
+ 1_12.png,Breast
15
+ 1_13.png,Breast
16
+ 1_14.png,Breast
17
+ 1_15.png,Breast
18
+ 1_16.png,Breast
19
+ 1_17.png,Breast
20
+ 1_18.png,Breast
21
+ 1_19.png,Breast
22
+ 1_20.png,Breast
23
+ 1_21.png,Breast
24
+ 1_22.png,Breast
25
+ 1_23.png,Breast
26
+ 1_24.png,Breast
27
+ 1_25.png,Breast
28
+ 1_26.png,Breast
29
+ 1_27.png,Breast
30
+ 1_28.png,Breast
31
+ 1_29.png,Breast
32
+ 1_30.png,Breast
33
+ 1_31.png,Breast
34
+ 1_32.png,Breast
35
+ 1_33.png,Breast
36
+ 1_34.png,Breast
37
+ 1_35.png,Breast
38
+ 1_36.png,Breast
39
+ 1_37.png,Breast
40
+ 1_38.png,Breast
41
+ 1_39.png,Breast
42
+ 1_40.png,Breast
43
+ 1_41.png,Breast
44
+ 1_42.png,Breast
45
+ 1_43.png,Breast
46
+ 1_44.png,Breast
47
+ 1_45.png,Breast
48
+ 1_46.png,Breast
49
+ 1_47.png,Breast
50
+ 1_48.png,Breast
51
+ 1_49.png,Breast
52
+ 1_50.png,Breast
53
+ 1_51.png,Breast
54
+ 1_52.png,Breast
55
+ 1_53.png,Breast
56
+ 1_54.png,Breast
57
+ 1_55.png,Breast
58
+ 1_56.png,Breast
59
+ 1_57.png,Breast
60
+ 1_58.png,Breast
61
+ 1_59.png,Breast
62
+ 1_60.png,Breast
63
+ 1_61.png,Breast
64
+ 1_62.png,Breast
65
+ 1_63.png,Breast
66
+ 1_64.png,Breast
67
+ 1_65.png,Breast
68
+ 1_66.png,Breast
69
+ 1_67.png,Breast
70
+ 1_68.png,Breast
71
+ 1_69.png,Breast
72
+ 1_70.png,Breast
73
+ 1_71.png,Breast
74
+ 1_72.png,Breast
75
+ 1_73.png,Breast
76
+ 1_74.png,Breast
77
+ 1_75.png,Breast
78
+ 1_76.png,Breast
79
+ 1_77.png,Breast
80
+ 1_78.png,Breast
81
+ 1_79.png,Breast
82
+ 1_80.png,Breast
83
+ 1_81.png,Breast
84
+ 1_82.png,Breast
85
+ 1_83.png,Breast
86
+ 1_84.png,Breast
87
+ 1_85.png,Breast
88
+ 1_86.png,Breast
89
+ 1_87.png,Breast
90
+ 1_88.png,Breast
91
+ 1_89.png,Breast
92
+ 1_90.png,Breast
93
+ 1_91.png,Breast
94
+ 1_92.png,Breast
95
+ 1_93.png,Breast
96
+ 1_94.png,Breast
97
+ 1_95.png,Breast
98
+ 1_96.png,Breast
99
+ 1_97.png,Breast
100
+ 1_98.png,Breast
101
+ 1_99.png,Breast
102
+ 1_100.png,Breast
103
+ 1_101.png,Breast
104
+ 1_102.png,Breast
105
+ 1_103.png,Breast
106
+ 1_104.png,Breast
107
+ 1_105.png,Breast
108
+ 1_106.png,Breast
109
+ 1_107.png,Breast
110
+ 1_108.png,Breast
111
+ 1_109.png,Breast
112
+ 1_110.png,Breast
113
+ 1_111.png,Breast
114
+ 1_112.png,Breast
115
+ 1_113.png,Breast
116
+ 1_114.png,Breast
117
+ 1_115.png,Breast
118
+ 1_116.png,Breast
119
+ 1_117.png,Breast
120
+ 1_118.png,Breast
121
+ 1_119.png,Breast
122
+ 1_120.png,Breast
123
+ 1_121.png,Breast
124
+ 1_122.png,Breast
125
+ 1_123.png,Breast
126
+ 1_124.png,Breast
127
+ 1_125.png,Breast
128
+ 1_126.png,Breast
129
+ 1_127.png,Breast
130
+ 1_128.png,Breast
131
+ 1_129.png,Breast
132
+ 1_130.png,Breast
133
+ 1_131.png,Breast
134
+ 1_132.png,Breast
135
+ 1_133.png,Breast
136
+ 1_134.png,Breast
137
+ 1_135.png,Breast
138
+ 1_136.png,Breast
139
+ 1_137.png,Breast
140
+ 1_138.png,Breast
141
+ 1_139.png,Breast
142
+ 1_140.png,Breast
143
+ 1_141.png,Breast
144
+ 1_142.png,Breast
145
+ 1_143.png,Breast
146
+ 1_144.png,Breast
147
+ 1_145.png,Breast
148
+ 1_146.png,Breast
149
+ 1_147.png,Breast
150
+ 1_148.png,Breast
151
+ 1_149.png,Breast
152
+ 1_150.png,Breast
153
+ 1_151.png,Breast
154
+ 1_152.png,Breast
155
+ 1_153.png,Breast
156
+ 1_154.png,Breast
157
+ 1_155.png,Breast
158
+ 1_156.png,Breast
159
+ 1_157.png,Breast
160
+ 1_158.png,Breast
161
+ 1_159.png,Breast
162
+ 1_160.png,Breast
163
+ 1_161.png,Breast
164
+ 1_162.png,Breast
165
+ 1_163.png,Breast
166
+ 1_164.png,Breast
167
+ 1_165.png,Breast
168
+ 1_166.png,Breast
169
+ 1_167.png,Breast
170
+ 1_168.png,Breast
171
+ 1_169.png,Breast
172
+ 1_170.png,Breast
173
+ 1_171.png,Breast
174
+ 1_172.png,Breast
175
+ 1_173.png,Breast
176
+ 1_174.png,Breast
177
+ 1_175.png,Breast
178
+ 1_176.png,Breast
179
+ 1_177.png,Breast
180
+ 1_178.png,Breast
181
+ 1_179.png,Breast
182
+ 1_180.png,Breast
183
+ 1_181.png,Breast
184
+ 1_182.png,Breast
185
+ 1_183.png,Breast
186
+ 1_184.png,Breast
187
+ 1_185.png,Breast
188
+ 1_186.png,Breast
189
+ 1_187.png,Breast
190
+ 1_188.png,Breast
191
+ 1_189.png,Breast
192
+ 1_190.png,Breast
193
+ 1_191.png,Breast
194
+ 1_192.png,Breast
195
+ 1_193.png,Breast
196
+ 1_194.png,Breast
197
+ 1_195.png,Breast
198
+ 1_196.png,Breast
199
+ 1_197.png,Breast
200
+ 1_198.png,Breast
201
+ 1_199.png,Breast
202
+ 1_200.png,Breast
203
+ 1_201.png,Breast
204
+ 1_202.png,Breast
205
+ 1_203.png,Breast
206
+ 1_204.png,Breast
207
+ 1_205.png,Breast
208
+ 1_206.png,Breast
209
+ 1_207.png,Breast
210
+ 1_208.png,Breast
211
+ 1_209.png,Breast
212
+ 1_210.png,Breast
213
+ 1_211.png,Breast
214
+ 1_212.png,Breast
215
+ 1_213.png,Breast
216
+ 1_214.png,Breast
217
+ 1_215.png,Breast
218
+ 1_216.png,Breast
219
+ 1_217.png,Breast
220
+ 1_218.png,Breast
221
+ 1_219.png,Breast
222
+ 1_220.png,Breast
223
+ 1_221.png,Breast
224
+ 1_222.png,Breast
225
+ 1_223.png,Breast
226
+ 1_224.png,Breast
227
+ 1_225.png,Breast
228
+ 1_226.png,Breast
229
+ 1_227.png,Breast
230
+ 1_228.png,Breast
231
+ 1_229.png,Breast
232
+ 1_230.png,Breast
233
+ 1_231.png,Breast
234
+ 1_232.png,Breast
235
+ 1_233.png,Breast
236
+ 1_234.png,Breast
237
+ 1_235.png,Breast
238
+ 1_236.png,Breast
239
+ 1_237.png,Breast
240
+ 1_238.png,Breast
241
+ 1_239.png,Breast
242
+ 1_240.png,Breast
243
+ 1_241.png,Breast
244
+ 1_242.png,Breast
245
+ 1_243.png,Breast
246
+ 1_244.png,Breast
247
+ 1_245.png,Breast
248
+ 1_246.png,Breast
249
+ 1_247.png,Breast
250
+ 1_248.png,Breast
251
+ 1_249.png,Breast
252
+ 1_250.png,Breast
253
+ 1_251.png,Breast
254
+ 1_252.png,Breast
255
+ 1_253.png,Breast
256
+ 1_254.png,Breast
257
+ 1_255.png,Breast
258
+ 1_256.png,Breast
259
+ 1_257.png,Breast
260
+ 1_258.png,Breast
261
+ 1_259.png,Breast
262
+ 1_260.png,Breast
263
+ 1_261.png,Breast
264
+ 1_262.png,Breast
265
+ 1_263.png,Breast
266
+ 1_264.png,Breast
267
+ 1_265.png,Breast
268
+ 1_266.png,Breast
269
+ 1_267.png,Breast
270
+ 1_268.png,Breast
271
+ 1_269.png,Breast
272
+ 1_270.png,Breast
273
+ 1_271.png,Breast
274
+ 1_272.png,Breast
275
+ 1_273.png,Breast
276
+ 1_274.png,Breast
277
+ 1_275.png,Breast
278
+ 1_276.png,Breast
279
+ 1_277.png,Breast
280
+ 1_278.png,Breast
281
+ 1_279.png,Breast
282
+ 1_280.png,Breast
283
+ 1_281.png,Breast
284
+ 1_282.png,Breast
285
+ 1_283.png,Breast
286
+ 1_284.png,Breast
287
+ 1_285.png,Breast
288
+ 1_286.png,Breast
289
+ 1_287.png,Breast
290
+ 1_288.png,Breast
291
+ 1_289.png,Breast
292
+ 1_290.png,Breast
293
+ 1_291.png,Breast
294
+ 1_292.png,Breast
295
+ 1_293.png,Breast
296
+ 1_294.png,Breast
297
+ 1_295.png,Breast
298
+ 1_296.png,Breast
299
+ 1_297.png,Breast
300
+ 1_298.png,Breast
301
+ 1_299.png,Breast
302
+ 1_300.png,Breast
303
+ 1_301.png,Breast
304
+ 1_302.png,Breast
305
+ 1_303.png,Breast
306
+ 1_304.png,Breast
307
+ 1_305.png,Breast
308
+ 1_306.png,Breast
309
+ 1_307.png,Breast
310
+ 1_308.png,Breast
311
+ 1_309.png,Breast
312
+ 1_310.png,Breast
313
+ 1_311.png,Breast
314
+ 1_312.png,Breast
315
+ 1_313.png,Breast
316
+ 1_314.png,Breast
317
+ 1_315.png,Breast
318
+ 1_316.png,Breast
319
+ 1_317.png,Breast
320
+ 1_318.png,Breast
321
+ 1_319.png,Breast
322
+ 1_320.png,Breast
323
+ 1_321.png,Breast
324
+ 1_322.png,Breast
325
+ 1_323.png,Breast
326
+ 1_324.png,Breast
327
+ 1_325.png,Breast
328
+ 1_326.png,Breast
329
+ 1_327.png,Breast
330
+ 1_328.png,Breast
331
+ 1_329.png,Breast
332
+ 1_330.png,Breast
333
+ 1_331.png,Breast
334
+ 1_332.png,Breast
335
+ 1_333.png,Breast
336
+ 1_334.png,Breast
337
+ 1_335.png,Breast
338
+ 1_336.png,Breast
339
+ 1_337.png,Breast
340
+ 1_338.png,Breast
341
+ 1_339.png,Breast
342
+ 1_340.png,Breast
343
+ 1_341.png,Breast
344
+ 1_342.png,Breast
345
+ 1_343.png,Breast
346
+ 1_344.png,Breast
347
+ 1_345.png,Breast
348
+ 1_346.png,Breast
349
+ 1_347.png,Breast
350
+ 1_348.png,Breast
351
+ 1_349.png,Breast
352
+ 1_350.png,Breast
353
+ 1_351.png,Breast
354
+ 1_352.png,Breast
355
+ 1_353.png,Breast
356
+ 1_354.png,Breast
357
+ 1_355.png,Breast
358
+ 1_356.png,Breast
359
+ 1_357.png,Breast
360
+ 1_358.png,Breast
361
+ 1_359.png,Breast
362
+ 1_360.png,Breast
363
+ 1_361.png,Breast
364
+ 1_362.png,Breast
365
+ 1_363.png,Breast
366
+ 1_364.png,Breast
367
+ 1_365.png,Breast
368
+ 1_366.png,Breast
369
+ 1_367.png,Breast
370
+ 1_368.png,Breast
371
+ 1_369.png,Breast
372
+ 1_370.png,Breast
373
+ 1_371.png,Breast
374
+ 1_372.png,Breast
375
+ 1_373.png,Breast
376
+ 1_374.png,Breast
377
+ 1_375.png,Breast
378
+ 1_376.png,Breast
379
+ 1_377.png,Breast
380
+ 1_378.png,Breast
381
+ 1_379.png,Breast
382
+ 1_380.png,Breast
383
+ 1_381.png,Breast
384
+ 1_382.png,Breast
385
+ 1_383.png,Breast
386
+ 1_384.png,Breast
387
+ 1_385.png,Breast
388
+ 1_386.png,Breast
389
+ 1_387.png,Breast
390
+ 1_388.png,Breast
391
+ 1_389.png,Breast
392
+ 1_390.png,Breast
393
+ 1_391.png,Breast
394
+ 1_392.png,Breast
395
+ 1_393.png,Breast
396
+ 1_394.png,Breast
397
+ 1_395.png,Breast
398
+ 1_396.png,Breast
399
+ 1_397.png,Breast
400
+ 1_398.png,Breast
401
+ 1_399.png,Breast
402
+ 1_400.png,Breast
403
+ 1_401.png,Breast
404
+ 1_402.png,Breast
405
+ 1_403.png,Breast
406
+ 1_404.png,Breast
407
+ 1_405.png,Breast
408
+ 1_406.png,Breast
409
+ 1_407.png,Breast
410
+ 1_408.png,Breast
411
+ 1_409.png,Breast
412
+ 1_410.png,Breast
413
+ 1_411.png,Breast
414
+ 1_412.png,Breast
415
+ 1_413.png,Breast
416
+ 1_414.png,Breast
417
+ 1_415.png,Breast
418
+ 1_416.png,Breast
419
+ 1_417.png,Breast
420
+ 1_418.png,Breast
421
+ 1_419.png,Breast
422
+ 1_420.png,Breast
423
+ 1_421.png,Breast
424
+ 1_422.png,Breast
425
+ 1_423.png,Breast
426
+ 1_424.png,Breast
427
+ 1_425.png,Breast
428
+ 1_426.png,Breast
429
+ 1_427.png,Breast
430
+ 1_428.png,Breast
431
+ 1_429.png,Breast
432
+ 1_430.png,Breast
433
+ 1_431.png,Breast
434
+ 1_432.png,Breast
435
+ 1_433.png,Breast
436
+ 1_434.png,Breast
437
+ 1_435.png,Breast
438
+ 1_436.png,Breast
439
+ 1_437.png,Breast
440
+ 1_438.png,Breast
441
+ 1_439.png,Breast
442
+ 1_440.png,Breast
443
+ 1_441.png,Breast
444
+ 1_442.png,Breast
445
+ 1_443.png,Breast
446
+ 1_444.png,Breast
447
+ 1_445.png,Breast
448
+ 1_446.png,Breast
449
+ 1_447.png,Breast
450
+ 1_448.png,Breast
451
+ 1_449.png,Breast
452
+ 1_450.png,Breast
453
+ 1_451.png,Breast
454
+ 1_452.png,Breast
455
+ 1_453.png,Breast
456
+ 1_454.png,Breast
457
+ 1_455.png,Breast
458
+ 1_456.png,Breast
459
+ 1_457.png,Breast
460
+ 1_458.png,Breast
461
+ 1_459.png,Breast
462
+ 1_460.png,Breast
463
+ 1_461.png,Breast
464
+ 1_462.png,Breast
465
+ 1_463.png,Breast
466
+ 1_464.png,Breast
467
+ 1_465.png,Breast
468
+ 1_466.png,Breast
469
+ 1_467.png,Breast
470
+ 1_468.png,Breast
471
+ 1_469.png,Breast
472
+ 1_470.png,Breast
473
+ 1_471.png,Breast
474
+ 1_472.png,Breast
475
+ 1_473.png,Breast
476
+ 1_474.png,Breast
477
+ 1_475.png,Breast
478
+ 1_476.png,Breast
479
+ 1_477.png,Breast
480
+ 1_478.png,Breast
481
+ 1_479.png,Breast
482
+ 1_480.png,Breast
483
+ 1_481.png,Breast
484
+ 1_482.png,Breast
485
+ 1_483.png,Breast
486
+ 1_484.png,Breast
487
+ 1_485.png,Breast
488
+ 1_486.png,Breast
489
+ 1_487.png,Breast
490
+ 1_488.png,Breast
491
+ 1_489.png,Breast
492
+ 1_490.png,Breast
493
+ 1_491.png,Breast
494
+ 1_492.png,Breast
495
+ 1_493.png,Breast
496
+ 1_494.png,Breast
497
+ 1_495.png,Breast
498
+ 1_496.png,Breast
499
+ 1_497.png,Breast
500
+ 1_498.png,Breast
501
+ 1_499.png,Breast
502
+ 1_500.png,Breast
503
+ 1_501.png,Breast
504
+ 1_502.png,Breast
505
+ 1_503.png,Breast
506
+ 1_504.png,Breast
507
+ 1_505.png,Breast
508
+ 1_506.png,Breast
509
+ 1_507.png,Breast
510
+ 1_508.png,Breast
511
+ 1_509.png,Breast
512
+ 1_510.png,Breast
513
+ 1_511.png,Breast
514
+ 1_512.png,Breast
515
+ 1_513.png,Breast
516
+ 1_514.png,Breast
517
+ 1_515.png,Breast
518
+ 1_516.png,Breast
519
+ 1_517.png,Breast
520
+ 1_518.png,Breast
521
+ 1_519.png,Breast
522
+ 1_520.png,Breast
523
+ 1_521.png,Breast
524
+ 1_522.png,Breast
525
+ 1_523.png,Breast
526
+ 1_524.png,Breast
527
+ 1_525.png,Breast
528
+ 1_526.png,Breast
529
+ 1_527.png,Breast
530
+ 1_528.png,Breast
531
+ 1_529.png,Breast
532
+ 1_530.png,Breast
533
+ 1_531.png,Breast
534
+ 1_532.png,Breast
535
+ 1_533.png,Breast
536
+ 1_534.png,Breast
537
+ 1_535.png,Breast
538
+ 1_536.png,Breast
539
+ 1_537.png,Breast
540
+ 1_538.png,Breast
541
+ 1_539.png,Breast
542
+ 1_540.png,Breast
543
+ 1_541.png,Breast
544
+ 1_542.png,Breast
545
+ 1_543.png,Breast
546
+ 1_544.png,Breast
547
+ 1_545.png,Breast
548
+ 1_546.png,Breast
549
+ 1_547.png,Breast
550
+ 1_548.png,Breast
551
+ 1_549.png,Breast
552
+ 1_550.png,Breast
553
+ 1_551.png,Breast
554
+ 1_552.png,Breast
555
+ 1_553.png,Breast
556
+ 1_554.png,Breast
557
+ 1_555.png,Breast
558
+ 1_556.png,Breast
559
+ 1_557.png,Breast
560
+ 1_558.png,Breast
561
+ 1_559.png,Breast
562
+ 1_560.png,Breast
563
+ 1_561.png,Breast
564
+ 1_562.png,Breast
565
+ 1_563.png,Breast
566
+ 1_564.png,Breast
567
+ 1_565.png,Breast
568
+ 1_566.png,Breast
569
+ 1_567.png,Breast
570
+ 1_568.png,Breast
571
+ 1_569.png,Breast
572
+ 1_570.png,Breast
573
+ 1_571.png,Breast
574
+ 1_572.png,Breast
575
+ 1_573.png,Breast
576
+ 1_574.png,Breast
577
+ 1_575.png,Breast
578
+ 1_576.png,Breast
579
+ 1_577.png,Breast
580
+ 1_578.png,Breast
581
+ 1_579.png,Breast
582
+ 1_580.png,Breast
583
+ 1_581.png,Breast
584
+ 1_582.png,Breast
585
+ 1_583.png,Breast
586
+ 1_584.png,Breast
587
+ 1_585.png,Breast
588
+ 1_586.png,Breast
589
+ 1_587.png,Breast
590
+ 1_588.png,Breast
591
+ 1_589.png,Breast
592
+ 1_590.png,Breast
593
+ 1_591.png,Breast
594
+ 1_592.png,Breast
595
+ 1_593.png,Breast
596
+ 1_594.png,Breast
597
+ 1_595.png,Breast
598
+ 1_596.png,Breast
599
+ 1_597.png,Breast
600
+ 1_598.png,Breast
601
+ 1_599.png,Breast
602
+ 1_600.png,Breast
603
+ 1_601.png,Breast
604
+ 1_602.png,Breast
605
+ 1_603.png,Breast
606
+ 1_604.png,Breast
607
+ 1_605.png,Breast
608
+ 1_606.png,Breast
609
+ 1_607.png,Breast
610
+ 1_608.png,Breast
611
+ 1_609.png,Breast
612
+ 1_610.png,Breast
613
+ 1_611.png,Breast
614
+ 1_612.png,Breast
615
+ 1_613.png,Breast
616
+ 1_614.png,Breast
617
+ 1_615.png,Breast
618
+ 1_616.png,Breast
619
+ 1_617.png,Breast
620
+ 1_618.png,Breast
621
+ 1_619.png,Breast
622
+ 1_620.png,Breast
623
+ 1_621.png,Breast
624
+ 1_622.png,Breast
625
+ 1_623.png,Breast
626
+ 1_624.png,Breast
627
+ 1_625.png,Breast
628
+ 1_626.png,Breast
629
+ 1_627.png,Breast
630
+ 1_628.png,Breast
631
+ 1_629.png,Breast
632
+ 1_630.png,Breast
633
+ 1_631.png,Breast
634
+ 1_632.png,Breast
635
+ 1_633.png,Breast
636
+ 1_634.png,Breast
637
+ 1_635.png,Breast
638
+ 1_636.png,Breast
639
+ 1_637.png,Breast
640
+ 1_638.png,Breast
641
+ 1_639.png,Breast
642
+ 1_640.png,Breast
643
+ 1_641.png,Breast
644
+ 1_642.png,Breast
645
+ 1_643.png,Breast
646
+ 1_644.png,Breast
647
+ 1_645.png,Breast
648
+ 1_646.png,Breast
649
+ 1_647.png,Breast
650
+ 1_648.png,Breast
651
+ 1_649.png,Breast
652
+ 1_650.png,Breast
653
+ 1_651.png,Breast
654
+ 1_652.png,Breast
655
+ 1_653.png,Breast
656
+ 1_654.png,Breast
657
+ 1_655.png,Breast
658
+ 1_656.png,Breast
659
+ 1_657.png,Breast
660
+ 1_658.png,Breast
661
+ 1_659.png,Breast
662
+ 1_660.png,Breast
663
+ 1_661.png,Breast
664
+ 1_662.png,Breast
665
+ 1_663.png,Breast
666
+ 1_664.png,Breast
667
+ 1_665.png,Breast
668
+ 1_666.png,Breast
669
+ 1_667.png,Breast
670
+ 1_668.png,Breast
671
+ 1_669.png,Breast
672
+ 1_670.png,Breast
673
+ 1_671.png,Breast
674
+ 1_672.png,Breast
675
+ 1_673.png,Breast
676
+ 1_674.png,Breast
677
+ 1_675.png,Breast
678
+ 1_676.png,Breast
679
+ 1_677.png,Colon
680
+ 1_678.png,Colon
681
+ 1_679.png,Colon
682
+ 1_680.png,Colon
683
+ 1_681.png,Colon
684
+ 1_682.png,Colon
685
+ 1_683.png,Colon
686
+ 1_684.png,Colon
687
+ 1_685.png,Colon
688
+ 1_686.png,Colon
689
+ 1_687.png,Colon
690
+ 1_688.png,Colon
691
+ 1_689.png,Colon
692
+ 1_690.png,Colon
693
+ 1_691.png,Colon
694
+ 1_692.png,Colon
695
+ 1_693.png,Colon
696
+ 1_694.png,Colon
697
+ 1_695.png,Colon
698
+ 1_696.png,Colon
699
+ 1_697.png,Colon
700
+ 1_698.png,Colon
701
+ 1_699.png,Colon
702
+ 1_700.png,Colon
703
+ 1_701.png,Colon
704
+ 1_702.png,Colon
705
+ 1_703.png,Colon
706
+ 1_704.png,Colon
707
+ 1_705.png,Colon
708
+ 1_706.png,Colon
709
+ 1_707.png,Colon
710
+ 1_708.png,Colon
711
+ 1_709.png,Colon
712
+ 1_710.png,Colon
713
+ 1_711.png,Colon
714
+ 1_712.png,Colon
715
+ 1_713.png,Colon
716
+ 1_714.png,Colon
717
+ 1_715.png,Colon
718
+ 1_716.png,Colon
719
+ 1_717.png,Colon
720
+ 1_718.png,Colon
721
+ 1_719.png,Colon
722
+ 1_720.png,Colon
723
+ 1_721.png,Colon
724
+ 1_722.png,Colon
725
+ 1_723.png,Colon
726
+ 1_724.png,Colon
727
+ 1_725.png,Colon
728
+ 1_726.png,Colon
729
+ 1_727.png,Colon
730
+ 1_728.png,Colon
731
+ 1_729.png,Colon
732
+ 1_730.png,Colon
733
+ 1_731.png,Colon
734
+ 1_732.png,Colon
735
+ 1_733.png,Colon
736
+ 1_734.png,Colon
737
+ 1_735.png,Colon
738
+ 1_736.png,Colon
739
+ 1_737.png,Colon
740
+ 1_738.png,Colon
741
+ 1_739.png,Colon
742
+ 1_740.png,Colon
743
+ 1_741.png,Colon
744
+ 1_742.png,Colon
745
+ 1_743.png,Colon
746
+ 1_744.png,Colon
747
+ 1_745.png,Colon
748
+ 1_746.png,Colon
749
+ 1_747.png,Colon
750
+ 1_748.png,Colon
751
+ 1_749.png,Colon
752
+ 1_750.png,Colon
753
+ 1_751.png,Colon
754
+ 1_752.png,Colon
755
+ 1_753.png,Colon
756
+ 1_754.png,Colon
757
+ 1_755.png,Colon
758
+ 1_756.png,Colon
759
+ 1_757.png,Colon
760
+ 1_758.png,Colon
761
+ 1_759.png,Colon
762
+ 1_760.png,Colon
763
+ 1_761.png,Colon
764
+ 1_762.png,Colon
765
+ 1_763.png,Colon
766
+ 1_764.png,Colon
767
+ 1_765.png,Colon
768
+ 1_766.png,Colon
769
+ 1_767.png,Colon
770
+ 1_768.png,Colon
771
+ 1_769.png,Colon
772
+ 1_770.png,Colon
773
+ 1_771.png,Lung
774
+ 1_772.png,Lung
775
+ 1_773.png,Lung
776
+ 1_774.png,Lung
777
+ 1_775.png,Lung
778
+ 1_776.png,Lung
779
+ 1_777.png,Lung
780
+ 1_778.png,Lung
781
+ 1_779.png,Lung
782
+ 1_780.png,Breast
783
+ 1_781.png,Breast
784
+ 1_782.png,Breast
785
+ 1_783.png,Breast
786
+ 1_784.png,Breast
787
+ 1_785.png,Breast
788
+ 1_786.png,Breast
789
+ 1_787.png,Breast
790
+ 1_788.png,Breast
791
+ 1_789.png,Breast
792
+ 1_790.png,Breast
793
+ 1_791.png,Breast
794
+ 1_792.png,Breast
795
+ 1_793.png,Breast
796
+ 1_794.png,Colon
797
+ 1_795.png,Colon
798
+ 1_796.png,Colon
799
+ 1_797.png,Colon
800
+ 1_798.png,Kidney
801
+ 1_799.png,Kidney
802
+ 1_800.png,Kidney
803
+ 1_801.png,Kidney
804
+ 1_802.png,Kidney
805
+ 1_803.png,Kidney
806
+ 1_804.png,Kidney
807
+ 1_805.png,Kidney
808
+ 1_806.png,Kidney
809
+ 1_807.png,Kidney
810
+ 1_808.png,Kidney
811
+ 1_809.png,Kidney
812
+ 1_810.png,Kidney
813
+ 1_811.png,Kidney
814
+ 1_812.png,Kidney
815
+ 1_813.png,Prostate
816
+ 1_814.png,Prostate
817
+ 1_815.png,Bladder
818
+ 1_816.png,Bladder
819
+ 1_817.png,Bladder
820
+ 1_818.png,Bladder
821
+ 1_819.png,Bladder
822
+ 1_820.png,Breast
823
+ 1_821.png,Breast
824
+ 1_822.png,Breast
825
+ 1_823.png,Breast
826
+ 1_824.png,Bladder
827
+ 1_825.png,Bladder
828
+ 1_826.png,Bladder
829
+ 1_827.png,Bladder
830
+ 1_828.png,Bladder
831
+ 1_829.png,Prostate
832
+ 1_830.png,Prostate
833
+ 1_831.png,Prostate
834
+ 1_832.png,Kidney
835
+ 1_833.png,Kidney
836
+ 1_834.png,Kidney
837
+ 1_835.png,Kidney
838
+ 1_836.png,Kidney
839
+ 1_837.png,Kidney
840
+ 1_838.png,Kidney
841
+ 1_839.png,Kidney
842
+ 1_840.png,Kidney
843
+ 1_841.png,Kidney
844
+ 1_842.png,Kidney
845
+ 1_843.png,Kidney
846
+ 1_844.png,Kidney
847
+ 1_845.png,Stomach
848
+ 1_846.png,Stomach
849
+ 1_847.png,Stomach
850
+ 1_848.png,Stomach
851
+ 1_849.png,Colon
852
+ 1_850.png,Colon
853
+ 1_851.png,Colon
854
+ 1_852.png,Colon
855
+ 1_853.png,Colon
856
+ 1_854.png,Colon
857
+ 1_855.png,Stomach
858
+ 1_856.png,Stomach
859
+ 1_857.png,Stomach
860
+ 1_858.png,Stomach
861
+ 1_859.png,Stomach
862
+ 1_860.png,Ovarian
863
+ 1_861.png,Ovarian
864
+ 1_862.png,Ovarian
865
+ 1_863.png,Ovarian
866
+ 1_864.png,Ovarian
867
+ 1_865.png,Esophagus
868
+ 1_866.png,Esophagus
869
+ 1_867.png,Esophagus
870
+ 1_868.png,Esophagus
871
+ 1_869.png,Esophagus
872
+ 1_870.png,Esophagus
873
+ 1_871.png,Esophagus
874
+ 1_872.png,Esophagus
875
+ 1_873.png,Esophagus
876
+ 1_874.png,Esophagus
877
+ 1_875.png,Esophagus
878
+ 1_876.png,Esophagus
879
+ 1_877.png,Esophagus
880
+ 1_878.png,Esophagus
881
+ 1_879.png,Esophagus
882
+ 1_880.png,Esophagus
883
+ 1_881.png,Esophagus
884
+ 1_882.png,Esophagus
885
+ 1_883.png,Esophagus
886
+ 1_884.png,Esophagus
887
+ 1_885.png,Esophagus
888
+ 1_886.png,Esophagus
889
+ 1_887.png,Esophagus
890
+ 1_888.png,Esophagus
891
+ 1_889.png,Esophagus
892
+ 1_890.png,Esophagus
893
+ 1_891.png,Esophagus
894
+ 1_892.png,Esophagus
895
+ 1_893.png,Pancreatic
896
+ 1_894.png,Pancreatic
897
+ 1_895.png,Lung
898
+ 1_896.png,Lung
899
+ 1_897.png,Lung
900
+ 1_898.png,Lung
901
+ 1_899.png,Lung
902
+ 1_900.png,Lung
903
+ 1_901.png,Lung
904
+ 1_902.png,Lung
905
+ 1_903.png,Lung
906
+ 1_904.png,Lung
907
+ 1_905.png,Lung
908
+ 1_906.png,Lung
909
+ 1_907.png,Lung
910
+ 1_908.png,Lung
911
+ 1_909.png,Lung
912
+ 1_910.png,Lung
913
+ 1_911.png,Lung
914
+ 1_912.png,Lung
915
+ 1_913.png,Lung
916
+ 1_914.png,Lung
917
+ 1_915.png,Lung
918
+ 1_916.png,Lung
919
+ 1_917.png,Lung
920
+ 1_918.png,Lung
921
+ 1_919.png,Lung
922
+ 1_920.png,Lung
923
+ 1_921.png,Lung
924
+ 1_922.png,Lung
925
+ 1_923.png,Lung
926
+ 1_924.png,Lung
927
+ 1_925.png,Lung
928
+ 1_926.png,Lung
929
+ 1_927.png,Lung
930
+ 1_928.png,Lung
931
+ 1_929.png,Lung
932
+ 1_930.png,Uterus
933
+ 1_931.png,Uterus
934
+ 1_932.png,Uterus
935
+ 1_933.png,Thyroid
936
+ 1_934.png,Thyroid
937
+ 1_935.png,Thyroid
938
+ 1_936.png,Thyroid
939
+ 1_937.png,Thyroid
940
+ 1_938.png,Thyroid
941
+ 1_939.png,Thyroid
942
+ 1_940.png,Thyroid
943
+ 1_941.png,Thyroid
944
+ 1_942.png,Thyroid
945
+ 1_943.png,Thyroid
946
+ 1_944.png,Thyroid
947
+ 1_945.png,Thyroid
948
+ 1_946.png,Thyroid
949
+ 1_947.png,Thyroid
950
+ 1_948.png,Thyroid
951
+ 1_949.png,Thyroid
952
+ 1_950.png,Thyroid
953
+ 1_951.png,Thyroid
954
+ 1_952.png,Skin
955
+ 1_953.png,Skin
956
+ 1_954.png,Skin
957
+ 1_955.png,Skin
958
+ 1_956.png,Skin
959
+ 1_957.png,Skin
960
+ 1_958.png,Skin
961
+ 1_959.png,Skin
962
+ 1_960.png,Skin
963
+ 1_961.png,Skin
964
+ 1_962.png,Skin
965
+ 1_963.png,Skin
966
+ 1_964.png,Skin
967
+ 1_965.png,Skin
968
+ 1_966.png,Skin
969
+ 1_967.png,Skin
970
+ 1_968.png,Cervix
971
+ 1_969.png,Cervix
972
+ 1_970.png,Cervix
973
+ 1_971.png,Cervix
974
+ 1_972.png,Thyroid
975
+ 1_973.png,Thyroid
976
+ 1_974.png,Thyroid
977
+ 1_975.png,Thyroid
978
+ 1_976.png,Thyroid
979
+ 1_977.png,Thyroid
980
+ 1_978.png,Thyroid
981
+ 1_979.png,Thyroid
982
+ 1_980.png,Thyroid
983
+ 1_981.png,Thyroid
984
+ 1_982.png,Thyroid
985
+ 1_983.png,Thyroid
986
+ 1_984.png,Thyroid
987
+ 1_985.png,Thyroid
988
+ 1_986.png,Thyroid
989
+ 1_987.png,Thyroid
990
+ 1_988.png,Thyroid
991
+ 1_989.png,Thyroid
992
+ 1_990.png,Esophagus
993
+ 1_991.png,Esophagus
994
+ 1_992.png,Esophagus
995
+ 1_993.png,Esophagus
996
+ 1_994.png,Esophagus
997
+ 1_995.png,Esophagus
998
+ 1_996.png,Esophagus
999
+ 1_997.png,Esophagus
1000
+ 1_998.png,Esophagus
1001
+ 1_999.png,Esophagus
1002
+ 1_1000.png,Esophagus
1003
+ 1_1001.png,Esophagus
1004
+ 1_1002.png,Esophagus
1005
+ 1_1003.png,Esophagus
1006
+ 1_1004.png,Esophagus
1007
+ 1_1005.png,Esophagus
1008
+ 1_1006.png,Esophagus
1009
+ 1_1007.png,Esophagus
1010
+ 1_1008.png,Esophagus
1011
+ 1_1009.png,Esophagus
1012
+ 1_1010.png,Esophagus
1013
+ 1_1011.png,Esophagus
1014
+ 1_1012.png,Esophagus
1015
+ 1_1013.png,Esophagus
1016
+ 1_1014.png,Esophagus
1017
+ 1_1015.png,Esophagus
1018
+ 1_1016.png,Esophagus
1019
+ 1_1017.png,Esophagus
1020
+ 1_1018.png,Esophagus
1021
+ 1_1019.png,Esophagus
1022
+ 1_1020.png,Esophagus
1023
+ 1_1021.png,Esophagus
1024
+ 1_1022.png,Esophagus
1025
+ 1_1023.png,Cervix
1026
+ 1_1024.png,Cervix
1027
+ 1_1025.png,Cervix
1028
+ 1_1026.png,Cervix
1029
+ 1_1027.png,Cervix
1030
+ 1_1028.png,Cervix
1031
+ 1_1029.png,Cervix
1032
+ 1_1030.png,Cervix
1033
+ 1_1031.png,Adrenal_gland
1034
+ 1_1032.png,Adrenal_gland
1035
+ 1_1033.png,Adrenal_gland
1036
+ 1_1034.png,Adrenal_gland
1037
+ 1_1035.png,Adrenal_gland
1038
+ 1_1036.png,Adrenal_gland
1039
+ 1_1037.png,Adrenal_gland
1040
+ 1_1038.png,Adrenal_gland
1041
+ 1_1039.png,Adrenal_gland
1042
+ 1_1040.png,Adrenal_gland
1043
+ 1_1041.png,Adrenal_gland
1044
+ 1_1042.png,Adrenal_gland
1045
+ 1_1043.png,Adrenal_gland
1046
+ 1_1044.png,Adrenal_gland
1047
+ 1_1045.png,Adrenal_gland
1048
+ 1_1046.png,Adrenal_gland
1049
+ 1_1047.png,Adrenal_gland
1050
+ 1_1048.png,Adrenal_gland
1051
+ 1_1049.png,Adrenal_gland
1052
+ 1_1050.png,Adrenal_gland
1053
+ 1_1051.png,Adrenal_gland
1054
+ 1_1052.png,Adrenal_gland
1055
+ 1_1053.png,Adrenal_gland
1056
+ 1_1054.png,Adrenal_gland
1057
+ 1_1055.png,Adrenal_gland
1058
+ 1_1056.png,Adrenal_gland
1059
+ 1_1057.png,Adrenal_gland
1060
+ 1_1058.png,Adrenal_gland
1061
+ 1_1059.png,Adrenal_gland
1062
+ 1_1060.png,Adrenal_gland
1063
+ 1_1061.png,Adrenal_gland
1064
+ 1_1062.png,Adrenal_gland
1065
+ 1_1063.png,Adrenal_gland
1066
+ 1_1064.png,Adrenal_gland
1067
+ 1_1065.png,Adrenal_gland
1068
+ 1_1066.png,Adrenal_gland
1069
+ 1_1067.png,Adrenal_gland
1070
+ 1_1068.png,Adrenal_gland
1071
+ 1_1069.png,Adrenal_gland
1072
+ 1_1070.png,Adrenal_gland
1073
+ 1_1071.png,Adrenal_gland
1074
+ 1_1072.png,Adrenal_gland
1075
+ 1_1073.png,Adrenal_gland
1076
+ 1_1074.png,Adrenal_gland
1077
+ 1_1075.png,Esophagus
1078
+ 1_1076.png,Esophagus
1079
+ 1_1077.png,Esophagus
1080
+ 1_1078.png,Esophagus
1081
+ 1_1079.png,Esophagus
1082
+ 1_1080.png,Esophagus
1083
+ 1_1081.png,Esophagus
1084
+ 1_1082.png,Esophagus
1085
+ 1_1083.png,Esophagus
1086
+ 1_1084.png,Esophagus
1087
+ 1_1085.png,Esophagus
1088
+ 1_1086.png,Esophagus
1089
+ 1_1087.png,Esophagus
1090
+ 1_1088.png,Esophagus
1091
+ 1_1089.png,Esophagus
1092
+ 1_1090.png,Esophagus
1093
+ 1_1091.png,Esophagus
1094
+ 1_1092.png,Esophagus
1095
+ 1_1093.png,Esophagus
1096
+ 1_1094.png,Esophagus
1097
+ 1_1095.png,Esophagus
1098
+ 1_1096.png,Esophagus
1099
+ 1_1097.png,Esophagus
1100
+ 1_1098.png,Esophagus
1101
+ 1_1099.png,Esophagus
1102
+ 1_1100.png,Esophagus
1103
+ 1_1101.png,Esophagus
1104
+ 1_1102.png,Esophagus
1105
+ 1_1103.png,Esophagus
1106
+ 1_1104.png,Esophagus
1107
+ 1_1105.png,Esophagus
1108
+ 1_1106.png,Esophagus
1109
+ 1_1107.png,Esophagus
1110
+ 1_1108.png,Adrenal_gland
1111
+ 1_1109.png,Adrenal_gland
1112
+ 1_1110.png,Adrenal_gland
1113
+ 1_1111.png,Adrenal_gland
1114
+ 1_1112.png,Adrenal_gland
1115
+ 1_1113.png,Adrenal_gland
1116
+ 1_1114.png,Adrenal_gland
1117
+ 1_1115.png,Adrenal_gland
1118
+ 1_1116.png,Adrenal_gland
1119
+ 1_1117.png,Adrenal_gland
1120
+ 1_1118.png,Adrenal_gland
1121
+ 1_1119.png,Adrenal_gland
1122
+ 1_1120.png,Adrenal_gland
1123
+ 1_1121.png,Adrenal_gland
1124
+ 1_1122.png,Adrenal_gland
1125
+ 1_1123.png,Adrenal_gland
1126
+ 1_1124.png,Pancreatic
1127
+ 1_1125.png,Pancreatic
1128
+ 1_1126.png,Pancreatic
1129
+ 1_1127.png,Pancreatic
1130
+ 1_1128.png,Pancreatic
1131
+ 1_1129.png,Pancreatic
1132
+ 1_1130.png,Pancreatic
1133
+ 1_1131.png,Pancreatic
1134
+ 1_1132.png,Pancreatic
1135
+ 1_1133.png,Pancreatic
1136
+ 1_1134.png,Pancreatic
1137
+ 1_1135.png,Pancreatic
1138
+ 1_1136.png,Pancreatic
1139
+ 1_1137.png,Pancreatic
1140
+ 1_1138.png,Pancreatic
1141
+ 1_1139.png,Pancreatic
1142
+ 1_1140.png,Pancreatic
1143
+ 1_1141.png,Pancreatic
1144
+ 1_1142.png,Pancreatic
1145
+ 1_1143.png,Pancreatic
1146
+ 1_1144.png,Pancreatic
1147
+ 1_1145.png,Pancreatic
1148
+ 1_1146.png,Pancreatic
1149
+ 1_1147.png,Pancreatic
1150
+ 1_1148.png,Pancreatic
1151
+ 1_1149.png,Pancreatic
1152
+ 1_1150.png,Adrenal_gland
1153
+ 1_1151.png,Adrenal_gland
1154
+ 1_1152.png,Adrenal_gland
1155
+ 1_1153.png,Adrenal_gland
1156
+ 1_1154.png,Adrenal_gland
1157
+ 1_1155.png,Adrenal_gland
1158
+ 1_1156.png,Adrenal_gland
1159
+ 1_1157.png,Adrenal_gland
1160
+ 1_1158.png,Adrenal_gland
1161
+ 1_1159.png,Adrenal_gland
1162
+ 1_1160.png,Adrenal_gland
1163
+ 1_1161.png,Adrenal_gland
1164
+ 1_1162.png,Adrenal_gland
1165
+ 1_1163.png,Adrenal_gland
1166
+ 1_1164.png,Adrenal_gland
1167
+ 1_1165.png,Adrenal_gland
1168
+ 1_1166.png,Adrenal_gland
1169
+ 1_1167.png,Adrenal_gland
1170
+ 1_1168.png,Adrenal_gland
1171
+ 1_1169.png,Adrenal_gland
1172
+ 1_1170.png,Adrenal_gland
1173
+ 1_1171.png,Cervix
1174
+ 1_1172.png,Cervix
1175
+ 1_1173.png,Cervix
1176
+ 1_1174.png,Cervix
1177
+ 1_1175.png,Cervix
1178
+ 1_1176.png,Cervix
1179
+ 1_1177.png,Cervix
1180
+ 1_1178.png,Cervix
1181
+ 1_1179.png,Cervix
1182
+ 1_1180.png,Cervix
1183
+ 1_1181.png,Cervix
1184
+ 1_1182.png,Cervix
1185
+ 1_1183.png,Cervix
1186
+ 1_1184.png,Cervix
1187
+ 1_1185.png,Cervix
1188
+ 1_1186.png,Cervix
1189
+ 1_1187.png,Cervix
1190
+ 1_1188.png,Cervix
1191
+ 1_1189.png,Cervix
1192
+ 1_1190.png,Cervix
1193
+ 1_1191.png,Cervix
1194
+ 1_1192.png,Cervix
1195
+ 1_1193.png,Cervix
1196
+ 1_1194.png,Bile-duct
1197
+ 1_1195.png,Bile-duct
1198
+ 1_1196.png,Bile-duct
1199
+ 1_1197.png,Bile-duct
1200
+ 1_1198.png,Bile-duct
1201
+ 1_1199.png,Bile-duct
1202
+ 1_1200.png,Bile-duct
1203
+ 1_1201.png,Bile-duct
1204
+ 1_1202.png,Bile-duct
1205
+ 1_1203.png,Bile-duct
1206
+ 1_1204.png,Bile-duct
1207
+ 1_1205.png,Bile-duct
1208
+ 1_1206.png,Bile-duct
1209
+ 1_1207.png,Bile-duct
1210
+ 1_1208.png,Bile-duct
1211
+ 1_1209.png,Bile-duct
1212
+ 1_1210.png,Bile-duct
1213
+ 1_1211.png,Bile-duct
1214
+ 1_1212.png,Bile-duct
1215
+ 1_1213.png,Testis
1216
+ 1_1214.png,Testis
1217
+ 1_1215.png,Testis
1218
+ 1_1216.png,Testis
1219
+ 1_1217.png,Testis
1220
+ 1_1218.png,Testis
1221
+ 1_1219.png,Testis
1222
+ 1_1220.png,Testis
1223
+ 1_1221.png,Testis
1224
+ 1_1222.png,Testis
1225
+ 1_1223.png,Testis
1226
+ 1_1224.png,Testis
1227
+ 1_1225.png,Testis
1228
+ 1_1226.png,Testis
1229
+ 1_1227.png,Testis
1230
+ 1_1228.png,Testis
1231
+ 1_1229.png,Testis
1232
+ 1_1230.png,Testis
1233
+ 1_1231.png,Testis
1234
+ 1_1232.png,Testis
1235
+ 1_1233.png,Testis
1236
+ 1_1234.png,Testis
1237
+ 1_1235.png,Testis
1238
+ 1_1236.png,Testis
1239
+ 1_1237.png,Testis
1240
+ 1_1238.png,Testis
1241
+ 1_1239.png,Testis
1242
+ 1_1240.png,Testis
1243
+ 1_1241.png,Testis
1244
+ 1_1242.png,Testis
1245
+ 1_1243.png,Testis
1246
+ 1_1244.png,Testis
1247
+ 1_1245.png,Testis
1248
+ 1_1246.png,Testis
1249
+ 1_1247.png,Testis
1250
+ 1_1248.png,Testis
1251
+ 1_1249.png,Testis
1252
+ 1_1250.png,Testis
1253
+ 1_1251.png,Testis
1254
+ 1_1252.png,Testis
1255
+ 1_1253.png,Testis
1256
+ 1_1254.png,Testis
1257
+ 1_1255.png,Testis
1258
+ 1_1256.png,Testis
1259
+ 1_1257.png,Testis
1260
+ 1_1258.png,Testis
1261
+ 1_1259.png,Testis
1262
+ 1_1260.png,Testis
1263
+ 1_1261.png,Testis
1264
+ 1_1262.png,Testis
1265
+ 1_1263.png,Testis
1266
+ 1_1264.png,Testis
1267
+ 1_1265.png,Bile-duct
1268
+ 1_1266.png,Bile-duct
1269
+ 1_1267.png,Bile-duct
1270
+ 1_1268.png,Bile-duct
1271
+ 1_1269.png,Bile-duct
1272
+ 1_1270.png,Bile-duct
1273
+ 1_1271.png,Bile-duct
1274
+ 1_1272.png,Bile-duct
1275
+ 1_1273.png,Bile-duct
1276
+ 1_1274.png,Bile-duct
1277
+ 1_1275.png,Bile-duct
1278
+ 1_1276.png,Bile-duct
1279
+ 1_1277.png,Bile-duct
1280
+ 1_1278.png,Bile-duct
1281
+ 1_1279.png,Bile-duct
1282
+ 1_1280.png,Bile-duct
1283
+ 1_1281.png,Bile-duct
1284
+ 1_1282.png,Bile-duct
1285
+ 1_1283.png,Bile-duct
1286
+ 1_1284.png,Bile-duct
1287
+ 1_1285.png,Bile-duct
1288
+ 1_1286.png,Bile-duct
1289
+ 1_1287.png,Bile-duct
1290
+ 1_1288.png,Bile-duct
1291
+ 1_1289.png,Bile-duct
1292
+ 1_1290.png,Bile-duct
1293
+ 1_1291.png,Bile-duct
1294
+ 1_1292.png,Bile-duct
1295
+ 1_1293.png,Bile-duct
1296
+ 1_1294.png,Bile-duct
1297
+ 1_1295.png,Colon
1298
+ 1_1296.png,Colon
1299
+ 1_1297.png,Colon
1300
+ 1_1298.png,Colon
1301
+ 1_1299.png,Colon
1302
+ 1_1300.png,Colon
1303
+ 1_1301.png,Colon
1304
+ 1_1302.png,Colon
1305
+ 1_1303.png,Colon
1306
+ 1_1304.png,Colon
1307
+ 1_1305.png,Colon
1308
+ 1_1306.png,Colon
1309
+ 1_1307.png,Colon
1310
+ 1_1308.png,Colon
1311
+ 1_1309.png,Colon
1312
+ 1_1310.png,Colon
1313
+ 1_1311.png,Colon
1314
+ 1_1312.png,Colon
1315
+ 1_1313.png,Colon
1316
+ 1_1314.png,Colon
1317
+ 1_1315.png,Colon
1318
+ 1_1316.png,Colon
1319
+ 1_1317.png,Colon
1320
+ 1_1318.png,Colon
1321
+ 1_1319.png,Colon
1322
+ 1_1320.png,Colon
1323
+ 1_1321.png,Colon
1324
+ 1_1322.png,Colon
1325
+ 1_1323.png,Colon
1326
+ 1_1324.png,Colon
1327
+ 1_1325.png,Colon
1328
+ 1_1326.png,Colon
1329
+ 1_1327.png,Colon
1330
+ 1_1328.png,Colon
1331
+ 1_1329.png,Colon
1332
+ 1_1330.png,Colon
1333
+ 1_1331.png,Colon
1334
+ 1_1332.png,Adrenal_gland
1335
+ 1_1333.png,Adrenal_gland
1336
+ 1_1334.png,Adrenal_gland
1337
+ 1_1335.png,Adrenal_gland
1338
+ 1_1336.png,Adrenal_gland
1339
+ 1_1337.png,Adrenal_gland
1340
+ 1_1338.png,Adrenal_gland
1341
+ 1_1339.png,Adrenal_gland
1342
+ 1_1340.png,Adrenal_gland
1343
+ 1_1341.png,Adrenal_gland
1344
+ 1_1342.png,Adrenal_gland
1345
+ 1_1343.png,Adrenal_gland
1346
+ 1_1344.png,Adrenal_gland
1347
+ 1_1345.png,Adrenal_gland
1348
+ 1_1346.png,Adrenal_gland
1349
+ 1_1347.png,Adrenal_gland
1350
+ 1_1348.png,Adrenal_gland
1351
+ 1_1349.png,Adrenal_gland
1352
+ 1_1350.png,Adrenal_gland
1353
+ 1_1351.png,Adrenal_gland
1354
+ 1_1352.png,Adrenal_gland
1355
+ 1_1353.png,Adrenal_gland
1356
+ 1_1354.png,Adrenal_gland
1357
+ 1_1355.png,Adrenal_gland
1358
+ 1_1356.png,Adrenal_gland
1359
+ 1_1357.png,Adrenal_gland
1360
+ 1_1358.png,Adrenal_gland
1361
+ 1_1359.png,Adrenal_gland
1362
+ 1_1360.png,Adrenal_gland
1363
+ 1_1361.png,Adrenal_gland
1364
+ 1_1362.png,Adrenal_gland
1365
+ 1_1363.png,Adrenal_gland
1366
+ 1_1364.png,Adrenal_gland
1367
+ 1_1365.png,Adrenal_gland
1368
+ 1_1366.png,Adrenal_gland
1369
+ 1_1367.png,Adrenal_gland
1370
+ 1_1368.png,Adrenal_gland
1371
+ 1_1369.png,Adrenal_gland
1372
+ 1_1370.png,Adrenal_gland
1373
+ 1_1371.png,Adrenal_gland
1374
+ 1_1372.png,Adrenal_gland
1375
+ 1_1373.png,Adrenal_gland
1376
+ 1_1374.png,Adrenal_gland
1377
+ 1_1375.png,Adrenal_gland
1378
+ 1_1376.png,Adrenal_gland
1379
+ 1_1377.png,Adrenal_gland
1380
+ 1_1378.png,Adrenal_gland
1381
+ 1_1379.png,Adrenal_gland
1382
+ 1_1380.png,Adrenal_gland
1383
+ 1_1381.png,Adrenal_gland
1384
+ 1_1382.png,Adrenal_gland
1385
+ 1_1383.png,Adrenal_gland
1386
+ 1_1384.png,Adrenal_gland
1387
+ 1_1385.png,Adrenal_gland
1388
+ 1_1386.png,Adrenal_gland
1389
+ 1_1387.png,Adrenal_gland
1390
+ 1_1388.png,Adrenal_gland
1391
+ 1_1389.png,Adrenal_gland
1392
+ 1_1390.png,Adrenal_gland
1393
+ 1_1391.png,Adrenal_gland
1394
+ 1_1392.png,Adrenal_gland
1395
+ 1_1393.png,Adrenal_gland
1396
+ 1_1394.png,Adrenal_gland
1397
+ 1_1395.png,Adrenal_gland
1398
+ 1_1396.png,Adrenal_gland
1399
+ 1_1397.png,Adrenal_gland
1400
+ 1_1398.png,Adrenal_gland
1401
+ 1_1399.png,Bile-duct
1402
+ 1_1400.png,Bile-duct
1403
+ 1_1401.png,Bile-duct
1404
+ 1_1402.png,Bile-duct
1405
+ 1_1403.png,Bile-duct
1406
+ 1_1404.png,Bile-duct
1407
+ 1_1405.png,Bile-duct
1408
+ 1_1406.png,Bile-duct
1409
+ 1_1407.png,Bile-duct
1410
+ 1_1408.png,Bile-duct
1411
+ 1_1409.png,Bile-duct
1412
+ 1_1410.png,Bile-duct
1413
+ 1_1411.png,Bile-duct
1414
+ 1_1412.png,Bile-duct
1415
+ 1_1413.png,Bile-duct
1416
+ 1_1414.png,Bile-duct
1417
+ 1_1415.png,Bile-duct
1418
+ 1_1416.png,Bile-duct
1419
+ 1_1417.png,Bile-duct
1420
+ 1_1418.png,Bile-duct
1421
+ 1_1419.png,Bile-duct
1422
+ 1_1420.png,Bile-duct
1423
+ 1_1421.png,Bile-duct
1424
+ 1_1422.png,Bile-duct
1425
+ 1_1423.png,Bile-duct
1426
+ 1_1424.png,Bile-duct
1427
+ 1_1425.png,Bile-duct
1428
+ 1_1426.png,Bile-duct
1429
+ 1_1427.png,Bile-duct
1430
+ 1_1428.png,Bile-duct
1431
+ 1_1429.png,Bile-duct
1432
+ 1_1430.png,Bile-duct
1433
+ 1_1431.png,Bile-duct
1434
+ 1_1432.png,Bile-duct
1435
+ 1_1433.png,Bile-duct
1436
+ 1_1434.png,Bile-duct
1437
+ 1_1435.png,Bile-duct
1438
+ 1_1436.png,Bile-duct
1439
+ 1_1437.png,Bile-duct
1440
+ 1_1438.png,Bile-duct
1441
+ 1_1439.png,Bile-duct
1442
+ 1_1440.png,Bile-duct
1443
+ 1_1441.png,Bile-duct
1444
+ 1_1442.png,Bile-duct
1445
+ 1_1443.png,Bile-duct
1446
+ 1_1444.png,Bile-duct
1447
+ 1_1445.png,Bile-duct
1448
+ 1_1446.png,Bile-duct
1449
+ 1_1447.png,Bile-duct
1450
+ 1_1448.png,Bile-duct
1451
+ 1_1449.png,Bile-duct
1452
+ 1_1450.png,Bile-duct
1453
+ 1_1451.png,Bile-duct
1454
+ 1_1452.png,Bile-duct
1455
+ 1_1453.png,Bladder
1456
+ 1_1454.png,Bladder
1457
+ 1_1455.png,Bladder
1458
+ 1_1456.png,Bladder
1459
+ 1_1457.png,Bladder
1460
+ 1_1458.png,Bladder
1461
+ 1_1459.png,Bladder
1462
+ 1_1460.png,Bladder
1463
+ 1_1461.png,Bladder
1464
+ 1_1462.png,Bladder
1465
+ 1_1463.png,Bladder
1466
+ 1_1464.png,Bladder
1467
+ 1_1465.png,Bladder
1468
+ 1_1466.png,Bladder
1469
+ 1_1467.png,Bladder
1470
+ 1_1468.png,Bladder
1471
+ 1_1469.png,Bladder
1472
+ 1_1470.png,Bladder
1473
+ 1_1471.png,Bladder
1474
+ 1_1472.png,Bladder
1475
+ 1_1473.png,Bladder
1476
+ 1_1474.png,Bladder
1477
+ 1_1475.png,Bladder
1478
+ 1_1476.png,Bladder
1479
+ 1_1477.png,Bladder
1480
+ 1_1478.png,Bladder
1481
+ 1_1479.png,Bladder
1482
+ 1_1480.png,Bladder
1483
+ 1_1481.png,Bladder
1484
+ 1_1482.png,Bladder
1485
+ 1_1483.png,Bladder
1486
+ 1_1484.png,Bladder
1487
+ 1_1485.png,Bladder
1488
+ 1_1486.png,Bladder
1489
+ 1_1487.png,Bladder
1490
+ 1_1488.png,Bladder
1491
+ 1_1489.png,Bladder
1492
+ 1_1490.png,Bladder
1493
+ 1_1491.png,Bladder
1494
+ 1_1492.png,Bladder
1495
+ 1_1493.png,Bladder
1496
+ 1_1494.png,Bladder
1497
+ 1_1495.png,Bladder
1498
+ 1_1496.png,Bladder
1499
+ 1_1497.png,Bladder
1500
+ 1_1498.png,Bladder
1501
+ 1_1499.png,Bladder
1502
+ 1_1500.png,Bladder
1503
+ 1_1501.png,Bladder
1504
+ 1_1502.png,Bladder
1505
+ 1_1503.png,Bladder
1506
+ 1_1504.png,Breast
1507
+ 1_1505.png,Breast
1508
+ 1_1506.png,Breast
1509
+ 1_1507.png,Breast
1510
+ 1_1508.png,Breast
1511
+ 1_1509.png,Breast
1512
+ 1_1510.png,Breast
1513
+ 1_1511.png,Breast
1514
+ 1_1512.png,Breast
1515
+ 1_1513.png,Breast
1516
+ 1_1514.png,Breast
1517
+ 1_1515.png,Breast
1518
+ 1_1516.png,Breast
1519
+ 1_1517.png,Breast
1520
+ 1_1518.png,Breast
1521
+ 1_1519.png,Breast
1522
+ 1_1520.png,Breast
1523
+ 1_1521.png,Breast
1524
+ 1_1522.png,Breast
1525
+ 1_1523.png,Breast
1526
+ 1_1524.png,Breast
1527
+ 1_1525.png,Breast
1528
+ 1_1526.png,Breast
1529
+ 1_1527.png,Breast
1530
+ 1_1528.png,Breast
1531
+ 1_1529.png,Breast
1532
+ 1_1530.png,Breast
1533
+ 1_1531.png,Breast
1534
+ 1_1532.png,Breast
1535
+ 1_1533.png,Breast
1536
+ 1_1534.png,Breast
1537
+ 1_1535.png,Breast
1538
+ 1_1536.png,Breast
1539
+ 1_1537.png,Breast
1540
+ 1_1538.png,Breast
1541
+ 1_1539.png,Breast
1542
+ 1_1540.png,Breast
1543
+ 1_1541.png,Breast
1544
+ 1_1542.png,Breast
1545
+ 1_1543.png,Breast
1546
+ 1_1544.png,Breast
1547
+ 1_1545.png,Breast
1548
+ 1_1546.png,Breast
1549
+ 1_1547.png,Breast
1550
+ 1_1548.png,Breast
1551
+ 1_1549.png,Breast
1552
+ 1_1550.png,Breast
1553
+ 1_1551.png,Breast
1554
+ 1_1552.png,Breast
1555
+ 1_1553.png,Breast
1556
+ 1_1554.png,Breast
1557
+ 1_1555.png,Breast
1558
+ 1_1556.png,Breast
1559
+ 1_1557.png,Breast
1560
+ 1_1558.png,Cervix
1561
+ 1_1559.png,Cervix
1562
+ 1_1560.png,Cervix
1563
+ 1_1561.png,Cervix
1564
+ 1_1562.png,Cervix
1565
+ 1_1563.png,Cervix
1566
+ 1_1564.png,Cervix
1567
+ 1_1565.png,Cervix
1568
+ 1_1566.png,Cervix
1569
+ 1_1567.png,Cervix
1570
+ 1_1568.png,Cervix
1571
+ 1_1569.png,Cervix
1572
+ 1_1570.png,Cervix
1573
+ 1_1571.png,Cervix
1574
+ 1_1572.png,Cervix
1575
+ 1_1573.png,Cervix
1576
+ 1_1574.png,Cervix
1577
+ 1_1575.png,Colon
1578
+ 1_1576.png,Colon
1579
+ 1_1577.png,Colon
1580
+ 1_1578.png,Colon
1581
+ 1_1579.png,Colon
1582
+ 1_1580.png,Colon
1583
+ 1_1581.png,Colon
1584
+ 1_1582.png,Colon
1585
+ 1_1583.png,Colon
1586
+ 1_1584.png,Colon
1587
+ 1_1585.png,Colon
1588
+ 1_1586.png,Colon
1589
+ 1_1587.png,Colon
1590
+ 1_1588.png,Colon
1591
+ 1_1589.png,Colon
1592
+ 1_1590.png,Colon
1593
+ 1_1591.png,Colon
1594
+ 1_1592.png,Colon
1595
+ 1_1593.png,Colon
1596
+ 1_1594.png,Colon
1597
+ 1_1595.png,Colon
1598
+ 1_1596.png,Colon
1599
+ 1_1597.png,Colon
1600
+ 1_1598.png,Colon
1601
+ 1_1599.png,Colon
1602
+ 1_1600.png,Colon
1603
+ 1_1601.png,Colon
1604
+ 1_1602.png,Colon
1605
+ 1_1603.png,Colon
1606
+ 1_1604.png,Colon
1607
+ 1_1605.png,Colon
1608
+ 1_1606.png,Colon
1609
+ 1_1607.png,Colon
1610
+ 1_1608.png,Colon
1611
+ 1_1609.png,Colon
1612
+ 1_1610.png,Colon
1613
+ 1_1611.png,Colon
1614
+ 1_1612.png,Colon
1615
+ 1_1613.png,Colon
1616
+ 1_1614.png,Colon
1617
+ 1_1615.png,Colon
1618
+ 1_1616.png,Colon
1619
+ 1_1617.png,Colon
1620
+ 1_1618.png,Colon
1621
+ 1_1619.png,Colon
1622
+ 1_1620.png,Colon
1623
+ 1_1621.png,Colon
1624
+ 1_1622.png,Colon
1625
+ 1_1623.png,Colon
1626
+ 1_1624.png,Colon
1627
+ 1_1625.png,Colon
1628
+ 1_1626.png,Colon
1629
+ 1_1627.png,Colon
1630
+ 1_1628.png,Colon
1631
+ 1_1629.png,Colon
1632
+ 1_1630.png,Colon
1633
+ 1_1631.png,Colon
1634
+ 1_1632.png,Colon
1635
+ 1_1633.png,Colon
1636
+ 1_1634.png,Colon
1637
+ 1_1635.png,Colon
1638
+ 1_1636.png,Colon
1639
+ 1_1637.png,Colon
1640
+ 1_1638.png,Colon
1641
+ 1_1639.png,Colon
1642
+ 1_1640.png,Colon
1643
+ 1_1641.png,Colon
1644
+ 1_1642.png,Colon
1645
+ 1_1643.png,Colon
1646
+ 1_1644.png,Colon
1647
+ 1_1645.png,Colon
1648
+ 1_1646.png,Colon
1649
+ 1_1647.png,Colon
1650
+ 1_1648.png,Colon
1651
+ 1_1649.png,Colon
1652
+ 1_1650.png,Colon
1653
+ 1_1651.png,Colon
1654
+ 1_1652.png,Colon
1655
+ 1_1653.png,Colon
1656
+ 1_1654.png,Colon
1657
+ 1_1655.png,Colon
1658
+ 1_1656.png,Colon
1659
+ 1_1657.png,Colon
1660
+ 1_1658.png,Colon
1661
+ 1_1659.png,Colon
1662
+ 1_1660.png,Colon
1663
+ 1_1661.png,Colon
1664
+ 1_1662.png,Colon
1665
+ 1_1663.png,Colon
1666
+ 1_1664.png,Colon
1667
+ 1_1665.png,Colon
1668
+ 1_1666.png,Colon
1669
+ 1_1667.png,Colon
1670
+ 1_1668.png,Colon
1671
+ 1_1669.png,Colon
1672
+ 1_1670.png,Colon
1673
+ 1_1671.png,Colon
1674
+ 1_1672.png,Colon
1675
+ 1_1673.png,Colon
1676
+ 1_1674.png,Colon
1677
+ 1_1675.png,Colon
1678
+ 1_1676.png,Colon
1679
+ 1_1677.png,Colon
1680
+ 1_1678.png,Colon
1681
+ 1_1679.png,Colon
1682
+ 1_1680.png,Colon
1683
+ 1_1681.png,Colon
1684
+ 1_1682.png,Colon
1685
+ 1_1683.png,Colon
1686
+ 1_1684.png,Colon
1687
+ 1_1685.png,Colon
1688
+ 1_1686.png,Colon
1689
+ 1_1687.png,Colon
1690
+ 1_1688.png,Colon
1691
+ 1_1689.png,Colon
1692
+ 1_1690.png,Colon
1693
+ 1_1691.png,Colon
1694
+ 1_1692.png,Colon
1695
+ 1_1693.png,Colon
1696
+ 1_1694.png,Colon
1697
+ 1_1695.png,Colon
1698
+ 1_1696.png,Colon
1699
+ 1_1697.png,Colon
1700
+ 1_1698.png,Colon
1701
+ 1_1699.png,Colon
1702
+ 1_1700.png,Colon
1703
+ 1_1701.png,Colon
1704
+ 1_1702.png,Colon
1705
+ 1_1703.png,Colon
1706
+ 1_1704.png,Colon
1707
+ 1_1705.png,Colon
1708
+ 1_1706.png,Colon
1709
+ 1_1707.png,Colon
1710
+ 1_1708.png,Colon
1711
+ 1_1709.png,Colon
1712
+ 1_1710.png,Colon
1713
+ 1_1711.png,Colon
1714
+ 1_1712.png,Colon
1715
+ 1_1713.png,Colon
1716
+ 1_1714.png,Colon
1717
+ 1_1715.png,Colon
1718
+ 1_1716.png,Colon
1719
+ 1_1717.png,Colon
1720
+ 1_1718.png,Colon
1721
+ 1_1719.png,Colon
1722
+ 1_1720.png,Colon
1723
+ 1_1721.png,Colon
1724
+ 1_1722.png,Colon
1725
+ 1_1723.png,Colon
1726
+ 1_1724.png,Colon
1727
+ 1_1725.png,Colon
1728
+ 1_1726.png,Colon
1729
+ 1_1727.png,Colon
1730
+ 1_1728.png,Colon
1731
+ 1_1729.png,Colon
1732
+ 1_1730.png,Colon
1733
+ 1_1731.png,Colon
1734
+ 1_1732.png,Colon
1735
+ 1_1733.png,Colon
1736
+ 1_1734.png,Colon
1737
+ 1_1735.png,Colon
1738
+ 1_1736.png,Colon
1739
+ 1_1737.png,Colon
1740
+ 1_1738.png,Colon
1741
+ 1_1739.png,Colon
1742
+ 1_1740.png,Colon
1743
+ 1_1741.png,Colon
1744
+ 1_1742.png,Colon
1745
+ 1_1743.png,Colon
1746
+ 1_1744.png,Colon
1747
+ 1_1745.png,Colon
1748
+ 1_1746.png,Colon
1749
+ 1_1747.png,Colon
1750
+ 1_1748.png,Colon
1751
+ 1_1749.png,Colon
1752
+ 1_1750.png,Colon
1753
+ 1_1751.png,Colon
1754
+ 1_1752.png,Colon
1755
+ 1_1753.png,Colon
1756
+ 1_1754.png,Colon
1757
+ 1_1755.png,Colon
1758
+ 1_1756.png,Colon
1759
+ 1_1757.png,Colon
1760
+ 1_1758.png,Colon
1761
+ 1_1759.png,Colon
1762
+ 1_1760.png,Colon
1763
+ 1_1761.png,Colon
1764
+ 1_1762.png,Colon
1765
+ 1_1763.png,Colon
1766
+ 1_1764.png,Colon
1767
+ 1_1765.png,Colon
1768
+ 1_1766.png,Colon
1769
+ 1_1767.png,Colon
1770
+ 1_1768.png,Colon
1771
+ 1_1769.png,Colon
1772
+ 1_1770.png,Colon
1773
+ 1_1771.png,Colon
1774
+ 1_1772.png,Colon
1775
+ 1_1773.png,Colon
1776
+ 1_1774.png,Colon
1777
+ 1_1775.png,Colon
1778
+ 1_1776.png,Colon
1779
+ 1_1777.png,Colon
1780
+ 1_1778.png,Colon
1781
+ 1_1779.png,Colon
1782
+ 1_1780.png,Colon
1783
+ 1_1781.png,Colon
1784
+ 1_1782.png,Colon
1785
+ 1_1783.png,Colon
1786
+ 1_1784.png,Colon
1787
+ 1_1785.png,Colon
1788
+ 1_1786.png,Colon
1789
+ 1_1787.png,Colon
1790
+ 1_1788.png,Colon
1791
+ 1_1789.png,Colon
1792
+ 1_1790.png,Colon
1793
+ 1_1791.png,Colon
1794
+ 1_1792.png,Colon
1795
+ 1_1793.png,Colon
1796
+ 1_1794.png,Colon
1797
+ 1_1795.png,Colon
1798
+ 1_1796.png,Colon
1799
+ 1_1797.png,Colon
1800
+ 1_1798.png,Colon
1801
+ 1_1799.png,Colon
1802
+ 1_1800.png,Colon
1803
+ 1_1801.png,Colon
1804
+ 1_1802.png,Colon
1805
+ 1_1803.png,Colon
1806
+ 1_1804.png,Colon
1807
+ 1_1805.png,Colon
1808
+ 1_1806.png,Colon
1809
+ 1_1807.png,Colon
1810
+ 1_1808.png,Colon
1811
+ 1_1809.png,Colon
1812
+ 1_1810.png,Colon
1813
+ 1_1811.png,Esophagus
1814
+ 1_1812.png,Esophagus
1815
+ 1_1813.png,Esophagus
1816
+ 1_1814.png,Esophagus
1817
+ 1_1815.png,Esophagus
1818
+ 1_1816.png,Esophagus
1819
+ 1_1817.png,Esophagus
1820
+ 1_1818.png,Esophagus
1821
+ 1_1819.png,Esophagus
1822
+ 1_1820.png,Esophagus
1823
+ 1_1821.png,Esophagus
1824
+ 1_1822.png,Esophagus
1825
+ 1_1823.png,Esophagus
1826
+ 1_1824.png,Esophagus
1827
+ 1_1825.png,Esophagus
1828
+ 1_1826.png,Esophagus
1829
+ 1_1827.png,Esophagus
1830
+ 1_1828.png,Esophagus
1831
+ 1_1829.png,Esophagus
1832
+ 1_1830.png,Esophagus
1833
+ 1_1831.png,Esophagus
1834
+ 1_1832.png,Esophagus
1835
+ 1_1833.png,Esophagus
1836
+ 1_1834.png,Esophagus
1837
+ 1_1835.png,Esophagus
1838
+ 1_1836.png,Esophagus
1839
+ 1_1837.png,Esophagus
1840
+ 1_1838.png,Esophagus
1841
+ 1_1839.png,Esophagus
1842
+ 1_1840.png,Esophagus
1843
+ 1_1841.png,Esophagus
1844
+ 1_1842.png,Esophagus
1845
+ 1_1843.png,Esophagus
1846
+ 1_1844.png,Esophagus
1847
+ 1_1845.png,HeadNeck
1848
+ 1_1846.png,HeadNeck
1849
+ 1_1847.png,HeadNeck
1850
+ 1_1848.png,HeadNeck
1851
+ 1_1849.png,HeadNeck
1852
+ 1_1850.png,HeadNeck
1853
+ 1_1851.png,HeadNeck
1854
+ 1_1852.png,HeadNeck
1855
+ 1_1853.png,HeadNeck
1856
+ 1_1854.png,HeadNeck
1857
+ 1_1855.png,HeadNeck
1858
+ 1_1856.png,HeadNeck
1859
+ 1_1857.png,HeadNeck
1860
+ 1_1858.png,HeadNeck
1861
+ 1_1859.png,HeadNeck
1862
+ 1_1860.png,HeadNeck
1863
+ 1_1861.png,HeadNeck
1864
+ 1_1862.png,HeadNeck
1865
+ 1_1863.png,HeadNeck
1866
+ 1_1864.png,HeadNeck
1867
+ 1_1865.png,HeadNeck
1868
+ 1_1866.png,HeadNeck
1869
+ 1_1867.png,HeadNeck
1870
+ 1_1868.png,HeadNeck
1871
+ 1_1869.png,HeadNeck
1872
+ 1_1870.png,HeadNeck
1873
+ 1_1871.png,HeadNeck
1874
+ 1_1872.png,HeadNeck
1875
+ 1_1873.png,HeadNeck
1876
+ 1_1874.png,HeadNeck
1877
+ 1_1875.png,HeadNeck
1878
+ 1_1876.png,HeadNeck
1879
+ 1_1877.png,HeadNeck
1880
+ 1_1878.png,HeadNeck
1881
+ 1_1879.png,HeadNeck
1882
+ 1_1880.png,HeadNeck
1883
+ 1_1881.png,HeadNeck
1884
+ 1_1882.png,HeadNeck
1885
+ 1_1883.png,HeadNeck
1886
+ 1_1884.png,HeadNeck
1887
+ 1_1885.png,HeadNeck
1888
+ 1_1886.png,HeadNeck
1889
+ 1_1887.png,HeadNeck
1890
+ 1_1888.png,HeadNeck
1891
+ 1_1889.png,HeadNeck
1892
+ 1_1890.png,HeadNeck
1893
+ 1_1891.png,HeadNeck
1894
+ 1_1892.png,HeadNeck
1895
+ 1_1893.png,HeadNeck
1896
+ 1_1894.png,HeadNeck
1897
+ 1_1895.png,HeadNeck
1898
+ 1_1896.png,HeadNeck
1899
+ 1_1897.png,HeadNeck
1900
+ 1_1898.png,HeadNeck
1901
+ 1_1899.png,HeadNeck
1902
+ 1_1900.png,HeadNeck
1903
+ 1_1901.png,HeadNeck
1904
+ 1_1902.png,HeadNeck
1905
+ 1_1903.png,HeadNeck
1906
+ 1_1904.png,HeadNeck
1907
+ 1_1905.png,HeadNeck
1908
+ 1_1906.png,HeadNeck
1909
+ 1_1907.png,HeadNeck
1910
+ 1_1908.png,HeadNeck
1911
+ 1_1909.png,HeadNeck
1912
+ 1_1910.png,HeadNeck
1913
+ 1_1911.png,HeadNeck
1914
+ 1_1912.png,HeadNeck
1915
+ 1_1913.png,HeadNeck
1916
+ 1_1914.png,HeadNeck
1917
+ 1_1915.png,HeadNeck
1918
+ 1_1916.png,HeadNeck
1919
+ 1_1917.png,HeadNeck
1920
+ 1_1918.png,HeadNeck
1921
+ 1_1919.png,HeadNeck
1922
+ 1_1920.png,HeadNeck
1923
+ 1_1921.png,HeadNeck
1924
+ 1_1922.png,HeadNeck
1925
+ 1_1923.png,HeadNeck
1926
+ 1_1924.png,HeadNeck
1927
+ 1_1925.png,HeadNeck
1928
+ 1_1926.png,HeadNeck
1929
+ 1_1927.png,HeadNeck
1930
+ 1_1928.png,HeadNeck
1931
+ 1_1929.png,HeadNeck
1932
+ 1_1930.png,HeadNeck
1933
+ 1_1931.png,HeadNeck
1934
+ 1_1932.png,HeadNeck
1935
+ 1_1933.png,HeadNeck
1936
+ 1_1934.png,HeadNeck
1937
+ 1_1935.png,HeadNeck
1938
+ 1_1936.png,HeadNeck
1939
+ 1_1937.png,HeadNeck
1940
+ 1_1938.png,HeadNeck
1941
+ 1_1939.png,HeadNeck
1942
+ 1_1940.png,HeadNeck
1943
+ 1_1941.png,HeadNeck
1944
+ 1_1942.png,HeadNeck
1945
+ 1_1943.png,HeadNeck
1946
+ 1_1944.png,HeadNeck
1947
+ 1_1945.png,HeadNeck
1948
+ 1_1946.png,HeadNeck
1949
+ 1_1947.png,HeadNeck
1950
+ 1_1948.png,HeadNeck
1951
+ 1_1949.png,HeadNeck
1952
+ 1_1950.png,HeadNeck
1953
+ 1_1951.png,HeadNeck
1954
+ 1_1952.png,HeadNeck
1955
+ 1_1953.png,HeadNeck
1956
+ 1_1954.png,HeadNeck
1957
+ 1_1955.png,HeadNeck
1958
+ 1_1956.png,HeadNeck
1959
+ 1_1957.png,HeadNeck
1960
+ 1_1958.png,HeadNeck
1961
+ 1_1959.png,HeadNeck
1962
+ 1_1960.png,HeadNeck
1963
+ 1_1961.png,HeadNeck
1964
+ 1_1962.png,HeadNeck
1965
+ 1_1963.png,HeadNeck
1966
+ 1_1964.png,HeadNeck
1967
+ 1_1965.png,HeadNeck
1968
+ 1_1966.png,HeadNeck
1969
+ 1_1967.png,HeadNeck
1970
+ 1_1968.png,HeadNeck
1971
+ 1_1969.png,HeadNeck
1972
+ 1_1970.png,HeadNeck
1973
+ 1_1971.png,HeadNeck
1974
+ 1_1972.png,HeadNeck
1975
+ 1_1973.png,HeadNeck
1976
+ 1_1974.png,HeadNeck
1977
+ 1_1975.png,HeadNeck
1978
+ 1_1976.png,HeadNeck
1979
+ 1_1977.png,HeadNeck
1980
+ 1_1978.png,HeadNeck
1981
+ 1_1979.png,HeadNeck
1982
+ 1_1980.png,HeadNeck
1983
+ 1_1981.png,HeadNeck
1984
+ 1_1982.png,HeadNeck
1985
+ 1_1983.png,HeadNeck
1986
+ 1_1984.png,HeadNeck
1987
+ 1_1985.png,HeadNeck
1988
+ 1_1986.png,HeadNeck
1989
+ 1_1987.png,HeadNeck
1990
+ 1_1988.png,HeadNeck
1991
+ 1_1989.png,HeadNeck
1992
+ 1_1990.png,HeadNeck
1993
+ 1_1991.png,HeadNeck
1994
+ 1_1992.png,HeadNeck
1995
+ 1_1993.png,HeadNeck
1996
+ 1_1994.png,HeadNeck
1997
+ 1_1995.png,HeadNeck
1998
+ 1_1996.png,HeadNeck
1999
+ 1_1997.png,HeadNeck
2000
+ 1_1998.png,HeadNeck
2001
+ 1_1999.png,HeadNeck
2002
+ 1_2000.png,HeadNeck
2003
+ 1_2001.png,HeadNeck
2004
+ 1_2002.png,HeadNeck
2005
+ 1_2003.png,HeadNeck
2006
+ 1_2004.png,HeadNeck
2007
+ 1_2005.png,HeadNeck
2008
+ 1_2006.png,HeadNeck
2009
+ 1_2007.png,HeadNeck
2010
+ 1_2008.png,HeadNeck
2011
+ 1_2009.png,HeadNeck
2012
+ 1_2010.png,HeadNeck
2013
+ 1_2011.png,HeadNeck
2014
+ 1_2012.png,HeadNeck
2015
+ 1_2013.png,HeadNeck
2016
+ 1_2014.png,Kidney
2017
+ 1_2015.png,Kidney
2018
+ 1_2016.png,Kidney
2019
+ 1_2017.png,Kidney
2020
+ 1_2018.png,Kidney
2021
+ 1_2019.png,Kidney
2022
+ 1_2020.png,Kidney
2023
+ 1_2021.png,Kidney
2024
+ 1_2022.png,Kidney
2025
+ 1_2023.png,Kidney
2026
+ 1_2024.png,Kidney
2027
+ 1_2025.png,Kidney
2028
+ 1_2026.png,Kidney
2029
+ 1_2027.png,Kidney
2030
+ 1_2028.png,Kidney
2031
+ 1_2029.png,Kidney
2032
+ 1_2030.png,Kidney
2033
+ 1_2031.png,Kidney
2034
+ 1_2032.png,Kidney
2035
+ 1_2033.png,Kidney
2036
+ 1_2034.png,Kidney
2037
+ 1_2035.png,Kidney
2038
+ 1_2036.png,Kidney
2039
+ 1_2037.png,Kidney
2040
+ 1_2038.png,Kidney
2041
+ 1_2039.png,Kidney
2042
+ 1_2040.png,Kidney
2043
+ 1_2041.png,Kidney
2044
+ 1_2042.png,Liver
2045
+ 1_2043.png,Liver
2046
+ 1_2044.png,Liver
2047
+ 1_2045.png,Liver
2048
+ 1_2046.png,Liver
2049
+ 1_2047.png,Liver
2050
+ 1_2048.png,Liver
2051
+ 1_2049.png,Liver
2052
+ 1_2050.png,Liver
2053
+ 1_2051.png,Liver
2054
+ 1_2052.png,Liver
2055
+ 1_2053.png,Liver
2056
+ 1_2054.png,Liver
2057
+ 1_2055.png,Liver
2058
+ 1_2056.png,Liver
2059
+ 1_2057.png,Liver
2060
+ 1_2058.png,Liver
2061
+ 1_2059.png,Liver
2062
+ 1_2060.png,Liver
2063
+ 1_2061.png,Liver
2064
+ 1_2062.png,Liver
2065
+ 1_2063.png,Liver
2066
+ 1_2064.png,Liver
2067
+ 1_2065.png,Liver
2068
+ 1_2066.png,Liver
2069
+ 1_2067.png,Liver
2070
+ 1_2068.png,Liver
2071
+ 1_2069.png,Liver
2072
+ 1_2070.png,Liver
2073
+ 1_2071.png,Liver
2074
+ 1_2072.png,Liver
2075
+ 1_2073.png,Liver
2076
+ 1_2074.png,Liver
2077
+ 1_2075.png,Liver
2078
+ 1_2076.png,Liver
2079
+ 1_2077.png,Liver
2080
+ 1_2078.png,Liver
2081
+ 1_2079.png,Liver
2082
+ 1_2080.png,Liver
2083
+ 1_2081.png,Liver
2084
+ 1_2082.png,Liver
2085
+ 1_2083.png,Liver
2086
+ 1_2084.png,Liver
2087
+ 1_2085.png,Liver
2088
+ 1_2086.png,Liver
2089
+ 1_2087.png,Liver
2090
+ 1_2088.png,Liver
2091
+ 1_2089.png,Liver
2092
+ 1_2090.png,Liver
2093
+ 1_2091.png,Liver
2094
+ 1_2092.png,Liver
2095
+ 1_2093.png,Liver
2096
+ 1_2094.png,Liver
2097
+ 1_2095.png,Liver
2098
+ 1_2096.png,Liver
2099
+ 1_2097.png,Liver
2100
+ 1_2098.png,Liver
2101
+ 1_2099.png,Liver
2102
+ 1_2100.png,Liver
2103
+ 1_2101.png,Liver
2104
+ 1_2102.png,Liver
2105
+ 1_2103.png,Liver
2106
+ 1_2104.png,Liver
2107
+ 1_2105.png,Liver
2108
+ 1_2106.png,Liver
2109
+ 1_2107.png,Liver
2110
+ 1_2108.png,Lung
2111
+ 1_2109.png,Lung
2112
+ 1_2110.png,Lung
2113
+ 1_2111.png,Lung
2114
+ 1_2112.png,Lung
2115
+ 1_2113.png,Lung
2116
+ 1_2114.png,Lung
2117
+ 1_2115.png,Lung
2118
+ 1_2116.png,Lung
2119
+ 1_2117.png,Ovarian
2120
+ 1_2118.png,Ovarian
2121
+ 1_2119.png,Ovarian
2122
+ 1_2120.png,Ovarian
2123
+ 1_2121.png,Ovarian
2124
+ 1_2122.png,Ovarian
2125
+ 1_2123.png,Ovarian
2126
+ 1_2124.png,Ovarian
2127
+ 1_2125.png,Ovarian
2128
+ 1_2126.png,Ovarian
2129
+ 1_2127.png,Ovarian
2130
+ 1_2128.png,Ovarian
2131
+ 1_2129.png,Ovarian
2132
+ 1_2130.png,Ovarian
2133
+ 1_2131.png,Ovarian
2134
+ 1_2132.png,Ovarian
2135
+ 1_2133.png,Ovarian
2136
+ 1_2134.png,Ovarian
2137
+ 1_2135.png,Ovarian
2138
+ 1_2136.png,Ovarian
2139
+ 1_2137.png,Ovarian
2140
+ 1_2138.png,Ovarian
2141
+ 1_2139.png,Ovarian
2142
+ 1_2140.png,Ovarian
2143
+ 1_2141.png,Ovarian
2144
+ 1_2142.png,Ovarian
2145
+ 1_2143.png,Ovarian
2146
+ 1_2144.png,Ovarian
2147
+ 1_2145.png,Ovarian
2148
+ 1_2146.png,Ovarian
2149
+ 1_2147.png,Ovarian
2150
+ 1_2148.png,Ovarian
2151
+ 1_2149.png,Ovarian
2152
+ 1_2150.png,Ovarian
2153
+ 1_2151.png,Ovarian
2154
+ 1_2152.png,Pancreatic
2155
+ 1_2153.png,Pancreatic
2156
+ 1_2154.png,Pancreatic
2157
+ 1_2155.png,Pancreatic
2158
+ 1_2156.png,Pancreatic
2159
+ 1_2157.png,Pancreatic
2160
+ 1_2158.png,Pancreatic
2161
+ 1_2159.png,Pancreatic
2162
+ 1_2160.png,Pancreatic
2163
+ 1_2161.png,Pancreatic
2164
+ 1_2162.png,Pancreatic
2165
+ 1_2163.png,Pancreatic
2166
+ 1_2164.png,Pancreatic
2167
+ 1_2165.png,Pancreatic
2168
+ 1_2166.png,Pancreatic
2169
+ 1_2167.png,Pancreatic
2170
+ 1_2168.png,Pancreatic
2171
+ 1_2169.png,Pancreatic
2172
+ 1_2170.png,Pancreatic
2173
+ 1_2171.png,Pancreatic
2174
+ 1_2172.png,Pancreatic
2175
+ 1_2173.png,Pancreatic
2176
+ 1_2174.png,Pancreatic
2177
+ 1_2175.png,Pancreatic
2178
+ 1_2176.png,Pancreatic
2179
+ 1_2177.png,Pancreatic
2180
+ 1_2178.png,Pancreatic
2181
+ 1_2179.png,Pancreatic
2182
+ 1_2180.png,Pancreatic
2183
+ 1_2181.png,Pancreatic
2184
+ 1_2182.png,Pancreatic
2185
+ 1_2183.png,Pancreatic
2186
+ 1_2184.png,Pancreatic
2187
+ 1_2185.png,Pancreatic
2188
+ 1_2186.png,Pancreatic
2189
+ 1_2187.png,Pancreatic
2190
+ 1_2188.png,Pancreatic
2191
+ 1_2189.png,Pancreatic
2192
+ 1_2190.png,Pancreatic
2193
+ 1_2191.png,Pancreatic
2194
+ 1_2192.png,Pancreatic
2195
+ 1_2193.png,Pancreatic
2196
+ 1_2194.png,Pancreatic
2197
+ 1_2195.png,Pancreatic
2198
+ 1_2196.png,Pancreatic
2199
+ 1_2197.png,Pancreatic
2200
+ 1_2198.png,Pancreatic
2201
+ 1_2199.png,Pancreatic
2202
+ 1_2200.png,Pancreatic
2203
+ 1_2201.png,Pancreatic
2204
+ 1_2202.png,Pancreatic
2205
+ 1_2203.png,Pancreatic
2206
+ 1_2204.png,Pancreatic
2207
+ 1_2205.png,Pancreatic
2208
+ 1_2206.png,Pancreatic
2209
+ 1_2207.png,Pancreatic
2210
+ 1_2208.png,Pancreatic
2211
+ 1_2209.png,Pancreatic
2212
+ 1_2210.png,Pancreatic
2213
+ 1_2211.png,Pancreatic
2214
+ 1_2212.png,Pancreatic
2215
+ 1_2213.png,Prostate
2216
+ 1_2214.png,Prostate
2217
+ 1_2215.png,Prostate
2218
+ 1_2216.png,Prostate
2219
+ 1_2217.png,Prostate
2220
+ 1_2218.png,Prostate
2221
+ 1_2219.png,Prostate
2222
+ 1_2220.png,Prostate
2223
+ 1_2221.png,Prostate
2224
+ 1_2222.png,Prostate
2225
+ 1_2223.png,Prostate
2226
+ 1_2224.png,Prostate
2227
+ 1_2225.png,Prostate
2228
+ 1_2226.png,Prostate
2229
+ 1_2227.png,Prostate
2230
+ 1_2228.png,Prostate
2231
+ 1_2229.png,Prostate
2232
+ 1_2230.png,Prostate
2233
+ 1_2231.png,Prostate
2234
+ 1_2232.png,Prostate
2235
+ 1_2233.png,Prostate
2236
+ 1_2234.png,Prostate
2237
+ 1_2235.png,Prostate
2238
+ 1_2236.png,Prostate
2239
+ 1_2237.png,Prostate
2240
+ 1_2238.png,Prostate
2241
+ 1_2239.png,Prostate
2242
+ 1_2240.png,Prostate
2243
+ 1_2241.png,Prostate
2244
+ 1_2242.png,Prostate
2245
+ 1_2243.png,Prostate
2246
+ 1_2244.png,Prostate
2247
+ 1_2245.png,Skin
2248
+ 1_2246.png,Skin
2249
+ 1_2247.png,Skin
2250
+ 1_2248.png,Skin
2251
+ 1_2249.png,Skin
2252
+ 1_2250.png,Skin
2253
+ 1_2251.png,Skin
2254
+ 1_2252.png,Skin
2255
+ 1_2253.png,Skin
2256
+ 1_2254.png,Skin
2257
+ 1_2255.png,Skin
2258
+ 1_2256.png,Skin
2259
+ 1_2257.png,Skin
2260
+ 1_2258.png,Skin
2261
+ 1_2259.png,Skin
2262
+ 1_2260.png,Skin
2263
+ 1_2261.png,Skin
2264
+ 1_2262.png,Skin
2265
+ 1_2263.png,Skin
2266
+ 1_2264.png,Skin
2267
+ 1_2265.png,Skin
2268
+ 1_2266.png,Skin
2269
+ 1_2267.png,Skin
2270
+ 1_2268.png,Skin
2271
+ 1_2269.png,Skin
2272
+ 1_2270.png,Skin
2273
+ 1_2271.png,Skin
2274
+ 1_2272.png,Skin
2275
+ 1_2273.png,Skin
2276
+ 1_2274.png,Skin
2277
+ 1_2275.png,Skin
2278
+ 1_2276.png,Skin
2279
+ 1_2277.png,Skin
2280
+ 1_2278.png,Skin
2281
+ 1_2279.png,Skin
2282
+ 1_2280.png,Skin
2283
+ 1_2281.png,Skin
2284
+ 1_2282.png,Skin
2285
+ 1_2283.png,Skin
2286
+ 1_2284.png,Skin
2287
+ 1_2285.png,Skin
2288
+ 1_2286.png,Skin
2289
+ 1_2287.png,Skin
2290
+ 1_2288.png,Skin
2291
+ 1_2289.png,Skin
2292
+ 1_2290.png,Skin
2293
+ 1_2291.png,Skin
2294
+ 1_2292.png,Skin
2295
+ 1_2293.png,Skin
2296
+ 1_2294.png,Skin
2297
+ 1_2295.png,Skin
2298
+ 1_2296.png,Skin
2299
+ 1_2297.png,Skin
2300
+ 1_2298.png,Skin
2301
+ 1_2299.png,Skin
2302
+ 1_2300.png,Skin
2303
+ 1_2301.png,Skin
2304
+ 1_2302.png,Skin
2305
+ 1_2303.png,Skin
2306
+ 1_2304.png,Skin
2307
+ 1_2305.png,Skin
2308
+ 1_2306.png,Skin
2309
+ 1_2307.png,Skin
2310
+ 1_2308.png,Skin
2311
+ 1_2309.png,Skin
2312
+ 1_2310.png,Skin
2313
+ 1_2311.png,Skin
2314
+ 1_2312.png,Skin
2315
+ 1_2313.png,Skin
2316
+ 1_2314.png,Skin
2317
+ 1_2315.png,Skin
2318
+ 1_2316.png,Stomach
2319
+ 1_2317.png,Stomach
2320
+ 1_2318.png,Stomach
2321
+ 1_2319.png,Stomach
2322
+ 1_2320.png,Stomach
2323
+ 1_2321.png,Stomach
2324
+ 1_2322.png,Stomach
2325
+ 1_2323.png,Stomach
2326
+ 1_2324.png,Stomach
2327
+ 1_2325.png,Stomach
2328
+ 1_2326.png,Stomach
2329
+ 1_2327.png,Stomach
2330
+ 1_2328.png,Stomach
2331
+ 1_2329.png,Stomach
2332
+ 1_2330.png,Stomach
2333
+ 1_2331.png,Stomach
2334
+ 1_2332.png,Stomach
2335
+ 1_2333.png,Stomach
2336
+ 1_2334.png,Stomach
2337
+ 1_2335.png,Stomach
2338
+ 1_2336.png,Stomach
2339
+ 1_2337.png,Stomach
2340
+ 1_2338.png,Stomach
2341
+ 1_2339.png,Stomach
2342
+ 1_2340.png,Stomach
2343
+ 1_2341.png,Stomach
2344
+ 1_2342.png,Stomach
2345
+ 1_2343.png,Stomach
2346
+ 1_2344.png,Stomach
2347
+ 1_2345.png,Stomach
2348
+ 1_2346.png,Stomach
2349
+ 1_2347.png,Stomach
2350
+ 1_2348.png,Stomach
2351
+ 1_2349.png,Stomach
2352
+ 1_2350.png,Stomach
2353
+ 1_2351.png,Stomach
2354
+ 1_2352.png,Stomach
2355
+ 1_2353.png,Stomach
2356
+ 1_2354.png,Stomach
2357
+ 1_2355.png,Testis
2358
+ 1_2356.png,Testis
2359
+ 1_2357.png,Testis
2360
+ 1_2358.png,Testis
2361
+ 1_2359.png,Testis
2362
+ 1_2360.png,Testis
2363
+ 1_2361.png,Testis
2364
+ 1_2362.png,Testis
2365
+ 1_2363.png,Testis
2366
+ 1_2364.png,Testis
2367
+ 1_2365.png,Testis
2368
+ 1_2366.png,Testis
2369
+ 1_2367.png,Testis
2370
+ 1_2368.png,Testis
2371
+ 1_2369.png,Testis
2372
+ 1_2370.png,Testis
2373
+ 1_2371.png,Testis
2374
+ 1_2372.png,Testis
2375
+ 1_2373.png,Testis
2376
+ 1_2374.png,Testis
2377
+ 1_2375.png,Testis
2378
+ 1_2376.png,Testis
2379
+ 1_2377.png,Testis
2380
+ 1_2378.png,Testis
2381
+ 1_2379.png,Testis
2382
+ 1_2380.png,Testis
2383
+ 1_2381.png,Testis
2384
+ 1_2382.png,Thyroid
2385
+ 1_2383.png,Thyroid
2386
+ 1_2384.png,Thyroid
2387
+ 1_2385.png,Thyroid
2388
+ 1_2386.png,Thyroid
2389
+ 1_2387.png,Thyroid
2390
+ 1_2388.png,Thyroid
2391
+ 1_2389.png,Thyroid
2392
+ 1_2390.png,Thyroid
2393
+ 1_2391.png,Thyroid
2394
+ 1_2392.png,Thyroid
2395
+ 1_2393.png,Thyroid
2396
+ 1_2394.png,Thyroid
2397
+ 1_2395.png,Thyroid
2398
+ 1_2396.png,Thyroid
2399
+ 1_2397.png,Thyroid
2400
+ 1_2398.png,Thyroid
2401
+ 1_2399.png,Thyroid
2402
+ 1_2400.png,Thyroid
2403
+ 1_2401.png,Thyroid
2404
+ 1_2402.png,Thyroid
2405
+ 1_2403.png,Thyroid
2406
+ 1_2404.png,Thyroid
2407
+ 1_2405.png,Thyroid
2408
+ 1_2406.png,Thyroid
2409
+ 1_2407.png,Thyroid
2410
+ 1_2408.png,Thyroid
2411
+ 1_2409.png,Thyroid
2412
+ 1_2410.png,Thyroid
2413
+ 1_2411.png,Thyroid
2414
+ 1_2412.png,Thyroid
2415
+ 1_2413.png,Thyroid
2416
+ 1_2414.png,Thyroid
2417
+ 1_2415.png,Thyroid
2418
+ 1_2416.png,Thyroid
2419
+ 1_2417.png,Thyroid
2420
+ 1_2418.png,Thyroid
2421
+ 1_2419.png,Thyroid
2422
+ 1_2420.png,Thyroid
2423
+ 1_2421.png,Thyroid
2424
+ 1_2422.png,Thyroid
2425
+ 1_2423.png,Thyroid
2426
+ 1_2424.png,Thyroid
2427
+ 1_2425.png,Thyroid
2428
+ 1_2426.png,Thyroid
2429
+ 1_2427.png,Thyroid
2430
+ 1_2428.png,Thyroid
2431
+ 1_2429.png,Uterus
2432
+ 1_2430.png,Uterus
2433
+ 1_2431.png,Uterus
2434
+ 1_2432.png,Colon
2435
+ 1_2433.png,Colon
2436
+ 1_2434.png,Colon
2437
+ 1_2435.png,Colon
2438
+ 1_2436.png,Colon
2439
+ 1_2437.png,Colon
2440
+ 1_2438.png,Colon
2441
+ 1_2439.png,Colon
2442
+ 1_2440.png,Colon
2443
+ 1_2441.png,Colon
2444
+ 1_2442.png,Colon
2445
+ 1_2443.png,Colon
2446
+ 1_2444.png,Colon
2447
+ 1_2445.png,Colon
2448
+ 1_2446.png,Colon
2449
+ 1_2447.png,Colon
2450
+ 1_2448.png,Colon
2451
+ 1_2449.png,Colon
2452
+ 1_2450.png,Colon
2453
+ 1_2451.png,Colon
2454
+ 1_2452.png,Colon
2455
+ 1_2453.png,Colon
2456
+ 1_2454.png,Colon
2457
+ 1_2455.png,Colon
2458
+ 1_2456.png,Colon
2459
+ 1_2457.png,Colon
2460
+ 1_2458.png,Colon
2461
+ 1_2459.png,Colon
2462
+ 1_2460.png,Colon
2463
+ 1_2461.png,Colon
2464
+ 1_2462.png,Colon
2465
+ 1_2463.png,Colon
2466
+ 1_2464.png,Colon
2467
+ 1_2465.png,Colon
2468
+ 1_2466.png,Colon
2469
+ 1_2467.png,Colon
2470
+ 1_2468.png,Colon
2471
+ 1_2469.png,Colon
2472
+ 1_2470.png,Colon
2473
+ 1_2471.png,Colon
2474
+ 1_2472.png,Colon
2475
+ 1_2473.png,Colon
2476
+ 1_2474.png,Colon
2477
+ 1_2475.png,Colon
2478
+ 1_2476.png,Colon
2479
+ 1_2477.png,Colon
2480
+ 1_2478.png,Colon
2481
+ 1_2479.png,Colon
2482
+ 1_2480.png,Colon
2483
+ 1_2481.png,Colon
2484
+ 1_2482.png,Colon
2485
+ 1_2483.png,Colon
2486
+ 1_2484.png,Colon
2487
+ 1_2485.png,Colon
2488
+ 1_2486.png,Colon
2489
+ 1_2487.png,Colon
2490
+ 1_2488.png,Colon
2491
+ 1_2489.png,Colon
2492
+ 1_2490.png,Colon
2493
+ 1_2491.png,Colon
2494
+ 1_2492.png,Colon
2495
+ 1_2493.png,Colon
2496
+ 1_2494.png,Colon
2497
+ 1_2495.png,Colon
2498
+ 1_2496.png,Colon
2499
+ 1_2497.png,Colon
2500
+ 1_2498.png,Colon
2501
+ 1_2499.png,Colon
2502
+ 1_2500.png,Colon
2503
+ 1_2501.png,Colon
2504
+ 1_2502.png,Colon
2505
+ 1_2503.png,Colon
2506
+ 1_2504.png,Colon
2507
+ 1_2505.png,Colon
2508
+ 1_2506.png,Colon
2509
+ 1_2507.png,Colon
2510
+ 1_2508.png,Colon
2511
+ 1_2509.png,Colon
2512
+ 1_2510.png,Colon
2513
+ 1_2511.png,Colon
2514
+ 1_2512.png,Colon
2515
+ 1_2513.png,Colon
2516
+ 1_2514.png,Colon
2517
+ 1_2515.png,Colon
2518
+ 1_2516.png,Colon
2519
+ 1_2517.png,Colon
2520
+ 1_2518.png,Colon
2521
+ 1_2519.png,Colon
2522
+ 1_2520.png,Colon
2523
+ 1_2521.png,Colon
2524
+ 1_2522.png,Colon