Skip to content

Reference for ultralytics/models/yolo/segment/val.py

Note

This file is available at https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py. If you spot a problem please help fix it by contributing a Pull Request 🛠️. Thank you 🙏!


ultralytics.models.yolo.segment.val.SegmentationValidator

SegmentationValidator(
    dataloader=None, save_dir=None, args=None, _callbacks=None
)

Bases: DetectionValidator

A class extending the DetectionValidator class for validation based on a segmentation model.

This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions to compute metrics such as mAP for both detection and segmentation tasks.

Attributes:

Name Type Description
plot_masks list

List to store masks for plotting.

process callable

Function to process masks based on save_json and save_txt flags.

args namespace

Arguments for the validator.

metrics SegmentMetrics

Metrics calculator for segmentation tasks.

stats dict

Dictionary to store statistics during validation.

Examples:

>>> from ultralytics.models.yolo.segment import SegmentationValidator
>>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
>>> validator = SegmentationValidator(args=args)
>>> validator()

Parameters:

Name Type Description Default
dataloader DataLoader

Dataloader to use for validation.

None
save_dir Path

Directory to save results.

None
args namespace

Arguments for the validator.

None
_callbacks list

List of callback functions.

None
Source code in ultralytics/models/yolo/segment/val.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
    """
    Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.

    Args:
        dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
        save_dir (Path, optional): Directory to save results.
        args (namespace, optional): Arguments for the validator.
        _callbacks (list, optional): List of callback functions.
    """
    super().__init__(dataloader, save_dir, args, _callbacks)
    self.process = None
    self.args.task = "segment"
    self.metrics = SegmentMetrics()

eval_json

eval_json(stats: Dict[str, Any]) -> Dict[str, Any]

Return COCO-style instance segmentation evaluation metrics.

Source code in ultralytics/models/yolo/segment/val.py
245
246
247
248
249
250
251
252
253
def eval_json(self, stats: Dict[str, Any]) -> Dict[str, Any]:
    """Return COCO-style instance segmentation evaluation metrics."""
    pred_json = self.save_dir / "predictions.json"  # predictions
    anno_json = (
        self.data["path"]
        / "annotations"
        / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
    )  # annotations
    return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])

get_desc

get_desc() -> str

Return a formatted description of evaluation metrics.

Source code in ultralytics/models/yolo/segment/val.py
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def get_desc(self) -> str:
    """Return a formatted description of evaluation metrics."""
    return ("%22s" + "%11s" * 10) % (
        "Class",
        "Images",
        "Instances",
        "Box(P",
        "R",
        "mAP50",
        "mAP50-95)",
        "Mask(P",
        "R",
        "mAP50",
        "mAP50-95)",
    )

init_metrics

init_metrics(model: Module) -> None

Initialize metrics and select mask processing function based on save_json flag.

Parameters:

Name Type Description Default
model Module

Model to validate.

required
Source code in ultralytics/models/yolo/segment/val.py
67
68
69
70
71
72
73
74
75
76
77
78
def init_metrics(self, model: torch.nn.Module) -> None:
    """
    Initialize metrics and select mask processing function based on save_json flag.

    Args:
        model (torch.nn.Module): Model to validate.
    """
    super().init_metrics(model)
    if self.args.save_json:
        check_requirements("faster-coco-eval>=1.6.7")
    # More accurate vs faster
    self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask

plot_predictions

plot_predictions(
    batch: Dict[str, Any], preds: List[Dict[str, Tensor]], ni: int
) -> None

Plot batch predictions with masks and bounding boxes.

Parameters:

Name Type Description Default
batch Dict[str, Any]

Batch containing images and annotations.

required
preds List[Dict[str, Tensor]]

List of predictions from the model.

required
ni int

Batch index.

required
Source code in ultralytics/models/yolo/segment/val.py
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
def plot_predictions(self, batch: Dict[str, Any], preds: List[Dict[str, torch.Tensor]], ni: int) -> None:
    """
    Plot batch predictions with masks and bounding boxes.

    Args:
        batch (Dict[str, Any]): Batch containing images and annotations.
        preds (List[Dict[str, torch.Tensor]]): List of predictions from the model.
        ni (int): Batch index.
    """
    for p in preds:
        masks = p["masks"]
        if masks.shape[0] > 50:
            LOGGER.warning("Limiting validation plots to first 50 items per image for speed...")
        p["masks"] = torch.as_tensor(masks[:50], dtype=torch.uint8).cpu()
    super().plot_predictions(batch, preds, ni, max_det=50)  # plot bboxes

postprocess

postprocess(preds: List[Tensor]) -> List[Dict[str, torch.Tensor]]

Post-process YOLO predictions and return output detections with proto.

Parameters:

Name Type Description Default
preds List[Tensor]

Raw predictions from the model.

required

Returns:

Type Description
List[Dict[str, Tensor]]

List[Dict[str, torch.Tensor]]: Processed detection predictions with masks.

Source code in ultralytics/models/yolo/segment/val.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def postprocess(self, preds: List[torch.Tensor]) -> List[Dict[str, torch.Tensor]]:
    """
    Post-process YOLO predictions and return output detections with proto.

    Args:
        preds (List[torch.Tensor]): Raw predictions from the model.

    Returns:
        List[Dict[str, torch.Tensor]]: Processed detection predictions with masks.
    """
    proto = preds[1][-1] if len(preds[1]) == 3 else preds[1]  # second output is len 3 if pt, but only 1 if exported
    preds = super().postprocess(preds[0])
    imgsz = [4 * x for x in proto.shape[2:]]  # get image size from proto
    for i, pred in enumerate(preds):
        coefficient = pred.pop("extra")
        pred["masks"] = (
            self.process(proto[i], coefficient, pred["bboxes"], shape=imgsz)
            if len(coefficient)
            else torch.zeros(
                (0, *(imgsz if self.process is ops.process_mask_native else proto.shape[2:])),
                dtype=torch.uint8,
                device=pred["bboxes"].device,
            )
        )
    return preds

pred_to_json

pred_to_json(predn: Dict[str, Tensor], pbatch: Dict[str, Any]) -> None

Save one JSON result for COCO evaluation.

Parameters:

Name Type Description Default
predn Dict[str, Tensor]

Predictions containing bboxes, masks, confidence scores, and classes.

required
pbatch Dict[str, Any]

Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.

required

Examples:

>>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
Source code in ultralytics/models/yolo/segment/val.py
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
def pred_to_json(self, predn: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> None:
    """
    Save one JSON result for COCO evaluation.

    Args:
        predn (Dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
        pbatch (Dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.

    Examples:
         >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
    """
    from faster_coco_eval.core.mask import encode  # noqa

    def single_encode(x):
        """Encode predicted masks as RLE and append results to jdict."""
        rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
        rle["counts"] = rle["counts"].decode("utf-8")
        return rle

    coco_masks = torch.as_tensor(predn["masks"], dtype=torch.uint8)
    coco_masks = ops.scale_image(
        coco_masks.permute(1, 2, 0).contiguous().cpu().numpy(),
        pbatch["ori_shape"],
        ratio_pad=pbatch["ratio_pad"],
    )
    pred_masks = np.transpose(coco_masks, (2, 0, 1))
    with ThreadPool(NUM_THREADS) as pool:
        rles = pool.map(single_encode, pred_masks)
    super().pred_to_json(predn, pbatch)
    for i, r in enumerate(rles):
        self.jdict[-len(rles) + i]["segmentation"] = r  # segmentation

preprocess

preprocess(batch: Dict[str, Any]) -> Dict[str, Any]

Preprocess batch of images for YOLO segmentation validation.

Parameters:

Name Type Description Default
batch Dict[str, Any]

Batch containing images and annotations.

required

Returns:

Type Description
Dict[str, Any]

Preprocessed batch.

Source code in ultralytics/models/yolo/segment/val.py
53
54
55
56
57
58
59
60
61
62
63
64
65
def preprocess(self, batch: Dict[str, Any]) -> Dict[str, Any]:
    """
    Preprocess batch of images for YOLO segmentation validation.

    Args:
        batch (Dict[str, Any]): Batch containing images and annotations.

    Returns:
        (Dict[str, Any]): Preprocessed batch.
    """
    batch = super().preprocess(batch)
    batch["masks"] = batch["masks"].to(self.device).float()
    return batch

save_one_txt

save_one_txt(
    predn: Tensor, save_conf: bool, shape: Tuple[int, int], file: Path
) -> None

Save YOLO detections to a txt file in normalized coordinates in a specific format.

Parameters:

Name Type Description Default
predn Tensor

Predictions in the format (x1, y1, x2, y2, conf, class).

required
save_conf bool

Whether to save confidence scores.

required
shape Tuple[int, int]

Shape of the original image.

required
file Path

File path to save the detections.

required
Source code in ultralytics/models/yolo/segment/val.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
def save_one_txt(self, predn: torch.Tensor, save_conf: bool, shape: Tuple[int, int], file: Path) -> None:
    """
    Save YOLO detections to a txt file in normalized coordinates in a specific format.

    Args:
        predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
        save_conf (bool): Whether to save confidence scores.
        shape (Tuple[int, int]): Shape of the original image.
        file (Path): File path to save the detections.
    """
    from ultralytics.engine.results import Results

    Results(
        np.zeros((shape[0], shape[1]), dtype=np.uint8),
        path=None,
        names=self.names,
        boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
        masks=torch.as_tensor(predn["masks"], dtype=torch.uint8),
    ).save_txt(file, save_conf=save_conf)





📅 Created 1 year ago ✏️ Updated 10 months ago