Spaces:
Sleeping
Sleeping
Add densepose converters
Browse files- densepose/converters/__init__.py +15 -0
- densepose/converters/base.py +93 -0
- densepose/converters/builtin.py +31 -0
- densepose/converters/chart_output_hflip.py +71 -0
- densepose/converters/chart_output_to_chart_result.py +188 -0
- densepose/converters/hflip.py +34 -0
- densepose/converters/segm_to_mask.py +150 -0
- densepose/converters/to_chart_result.py +70 -0
- densepose/converters/to_mask.py +49 -0
densepose/converters/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from .hflip import HFlipConverter
|
| 4 |
+
from .to_mask import ToMaskConverter
|
| 5 |
+
from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
|
| 6 |
+
from .segm_to_mask import (
|
| 7 |
+
predictor_output_with_fine_and_coarse_segm_to_mask,
|
| 8 |
+
predictor_output_with_coarse_segm_to_mask,
|
| 9 |
+
resample_fine_and_coarse_segm_to_bbox,
|
| 10 |
+
)
|
| 11 |
+
from .chart_output_to_chart_result import (
|
| 12 |
+
densepose_chart_predictor_output_to_result,
|
| 13 |
+
densepose_chart_predictor_output_to_result_with_confidences,
|
| 14 |
+
)
|
| 15 |
+
from .chart_output_hflip import densepose_chart_predictor_output_hflip
|
densepose/converters/base.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Any, Tuple, Type
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BaseConverter:
|
| 8 |
+
"""
|
| 9 |
+
Converter base class to be reused by various converters.
|
| 10 |
+
Converter allows one to convert data from various source types to a particular
|
| 11 |
+
destination type. Each source type needs to register its converter. The
|
| 12 |
+
registration for each source type is valid for all descendants of that type.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
@classmethod
|
| 16 |
+
def register(cls, from_type: Type, converter: Any = None):
|
| 17 |
+
"""
|
| 18 |
+
Registers a converter for the specified type.
|
| 19 |
+
Can be used as a decorator (if converter is None), or called as a method.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
from_type (type): type to register the converter for;
|
| 23 |
+
all instances of this type will use the same converter
|
| 24 |
+
converter (callable): converter to be registered for the given
|
| 25 |
+
type; if None, this method is assumed to be a decorator for the converter
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
if converter is not None:
|
| 29 |
+
cls._do_register(from_type, converter)
|
| 30 |
+
|
| 31 |
+
def wrapper(converter: Any) -> Any:
|
| 32 |
+
cls._do_register(from_type, converter)
|
| 33 |
+
return converter
|
| 34 |
+
|
| 35 |
+
return wrapper
|
| 36 |
+
|
| 37 |
+
@classmethod
|
| 38 |
+
def _do_register(cls, from_type: Type, converter: Any):
|
| 39 |
+
cls.registry[from_type] = converter # pyre-ignore[16]
|
| 40 |
+
|
| 41 |
+
@classmethod
|
| 42 |
+
def _lookup_converter(cls, from_type: Type) -> Any:
|
| 43 |
+
"""
|
| 44 |
+
Perform recursive lookup for the given type
|
| 45 |
+
to find registered converter. If a converter was found for some base
|
| 46 |
+
class, it gets registered for this class to save on further lookups.
|
| 47 |
+
|
| 48 |
+
Args:
|
| 49 |
+
from_type: type for which to find a converter
|
| 50 |
+
Return:
|
| 51 |
+
callable or None - registered converter or None
|
| 52 |
+
if no suitable entry was found in the registry
|
| 53 |
+
"""
|
| 54 |
+
if from_type in cls.registry: # pyre-ignore[16]
|
| 55 |
+
return cls.registry[from_type]
|
| 56 |
+
for base in from_type.__bases__:
|
| 57 |
+
converter = cls._lookup_converter(base)
|
| 58 |
+
if converter is not None:
|
| 59 |
+
cls._do_register(from_type, converter)
|
| 60 |
+
return converter
|
| 61 |
+
return None
|
| 62 |
+
|
| 63 |
+
@classmethod
|
| 64 |
+
def convert(cls, instance: Any, *args, **kwargs):
|
| 65 |
+
"""
|
| 66 |
+
Convert an instance to the destination type using some registered
|
| 67 |
+
converter. Does recursive lookup for base classes, so there's no need
|
| 68 |
+
for explicit registration for derived classes.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
instance: source instance to convert to the destination type
|
| 72 |
+
Return:
|
| 73 |
+
An instance of the destination type obtained from the source instance
|
| 74 |
+
Raises KeyError, if no suitable converter found
|
| 75 |
+
"""
|
| 76 |
+
instance_type = type(instance)
|
| 77 |
+
converter = cls._lookup_converter(instance_type)
|
| 78 |
+
if converter is None:
|
| 79 |
+
if cls.dst_type is None: # pyre-ignore[16]
|
| 80 |
+
output_type_str = "itself"
|
| 81 |
+
else:
|
| 82 |
+
output_type_str = cls.dst_type
|
| 83 |
+
raise KeyError(f"Could not find converter from {instance_type} to {output_type_str}")
|
| 84 |
+
return converter(instance, *args, **kwargs)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
IntTupleBox = Tuple[int, int, int, int]
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def make_int_box(box: torch.Tensor) -> IntTupleBox:
|
| 91 |
+
int_box = [0, 0, 0, 0]
|
| 92 |
+
int_box[0], int_box[1], int_box[2], int_box[3] = tuple(box.long().tolist())
|
| 93 |
+
return int_box[0], int_box[1], int_box[2], int_box[3]
|
densepose/converters/builtin.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
|
| 4 |
+
from . import (
|
| 5 |
+
HFlipConverter,
|
| 6 |
+
ToChartResultConverter,
|
| 7 |
+
ToChartResultConverterWithConfidences,
|
| 8 |
+
ToMaskConverter,
|
| 9 |
+
densepose_chart_predictor_output_hflip,
|
| 10 |
+
densepose_chart_predictor_output_to_result,
|
| 11 |
+
densepose_chart_predictor_output_to_result_with_confidences,
|
| 12 |
+
predictor_output_with_coarse_segm_to_mask,
|
| 13 |
+
predictor_output_with_fine_and_coarse_segm_to_mask,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
ToMaskConverter.register(
|
| 17 |
+
DensePoseChartPredictorOutput, predictor_output_with_fine_and_coarse_segm_to_mask
|
| 18 |
+
)
|
| 19 |
+
ToMaskConverter.register(
|
| 20 |
+
DensePoseEmbeddingPredictorOutput, predictor_output_with_coarse_segm_to_mask
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
ToChartResultConverter.register(
|
| 24 |
+
DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
ToChartResultConverterWithConfidences.register(
|
| 28 |
+
DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result_with_confidences
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
HFlipConverter.register(DensePoseChartPredictorOutput, densepose_chart_predictor_output_hflip)
|
densepose/converters/chart_output_hflip.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
from dataclasses import fields
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from densepose.structures import DensePoseChartPredictorOutput, DensePoseTransformData
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def densepose_chart_predictor_output_hflip(
|
| 9 |
+
densepose_predictor_output: DensePoseChartPredictorOutput,
|
| 10 |
+
transform_data: DensePoseTransformData,
|
| 11 |
+
) -> DensePoseChartPredictorOutput:
|
| 12 |
+
"""
|
| 13 |
+
Change to take into account a Horizontal flip.
|
| 14 |
+
"""
|
| 15 |
+
if len(densepose_predictor_output) > 0:
|
| 16 |
+
|
| 17 |
+
PredictorOutput = type(densepose_predictor_output)
|
| 18 |
+
output_dict = {}
|
| 19 |
+
|
| 20 |
+
for field in fields(densepose_predictor_output):
|
| 21 |
+
field_value = getattr(densepose_predictor_output, field.name)
|
| 22 |
+
# flip tensors
|
| 23 |
+
if isinstance(field_value, torch.Tensor):
|
| 24 |
+
setattr(densepose_predictor_output, field.name, torch.flip(field_value, [3]))
|
| 25 |
+
|
| 26 |
+
densepose_predictor_output = _flip_iuv_semantics_tensor(
|
| 27 |
+
densepose_predictor_output, transform_data
|
| 28 |
+
)
|
| 29 |
+
densepose_predictor_output = _flip_segm_semantics_tensor(
|
| 30 |
+
densepose_predictor_output, transform_data
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
for field in fields(densepose_predictor_output):
|
| 34 |
+
output_dict[field.name] = getattr(densepose_predictor_output, field.name)
|
| 35 |
+
|
| 36 |
+
return PredictorOutput(**output_dict)
|
| 37 |
+
else:
|
| 38 |
+
return densepose_predictor_output
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _flip_iuv_semantics_tensor(
|
| 42 |
+
densepose_predictor_output: DensePoseChartPredictorOutput,
|
| 43 |
+
dp_transform_data: DensePoseTransformData,
|
| 44 |
+
) -> DensePoseChartPredictorOutput:
|
| 45 |
+
point_label_symmetries = dp_transform_data.point_label_symmetries
|
| 46 |
+
uv_symmetries = dp_transform_data.uv_symmetries
|
| 47 |
+
|
| 48 |
+
N, C, H, W = densepose_predictor_output.u.shape
|
| 49 |
+
u_loc = (densepose_predictor_output.u[:, 1:, :, :].clamp(0, 1) * 255).long()
|
| 50 |
+
v_loc = (densepose_predictor_output.v[:, 1:, :, :].clamp(0, 1) * 255).long()
|
| 51 |
+
Iindex = torch.arange(C - 1, device=densepose_predictor_output.u.device)[
|
| 52 |
+
None, :, None, None
|
| 53 |
+
].expand(N, C - 1, H, W)
|
| 54 |
+
densepose_predictor_output.u[:, 1:, :, :] = uv_symmetries["U_transforms"][Iindex, v_loc, u_loc]
|
| 55 |
+
densepose_predictor_output.v[:, 1:, :, :] = uv_symmetries["V_transforms"][Iindex, v_loc, u_loc]
|
| 56 |
+
|
| 57 |
+
for el in ["fine_segm", "u", "v"]:
|
| 58 |
+
densepose_predictor_output.__dict__[el] = densepose_predictor_output.__dict__[el][
|
| 59 |
+
:, point_label_symmetries, :, :
|
| 60 |
+
]
|
| 61 |
+
return densepose_predictor_output
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _flip_segm_semantics_tensor(
|
| 65 |
+
densepose_predictor_output: DensePoseChartPredictorOutput, dp_transform_data
|
| 66 |
+
):
|
| 67 |
+
if densepose_predictor_output.coarse_segm.shape[1] > 2:
|
| 68 |
+
densepose_predictor_output.coarse_segm = densepose_predictor_output.coarse_segm[
|
| 69 |
+
:, dp_transform_data.mask_label_symmetries, :, :
|
| 70 |
+
]
|
| 71 |
+
return densepose_predictor_output
|
densepose/converters/chart_output_to_chart_result.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Dict
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from detectron2.structures.boxes import Boxes, BoxMode
|
| 8 |
+
|
| 9 |
+
from ..structures import (
|
| 10 |
+
DensePoseChartPredictorOutput,
|
| 11 |
+
DensePoseChartResult,
|
| 12 |
+
DensePoseChartResultWithConfidences,
|
| 13 |
+
)
|
| 14 |
+
from . import resample_fine_and_coarse_segm_to_bbox
|
| 15 |
+
from .base import IntTupleBox, make_int_box
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def resample_uv_tensors_to_bbox(
|
| 19 |
+
u: torch.Tensor,
|
| 20 |
+
v: torch.Tensor,
|
| 21 |
+
labels: torch.Tensor,
|
| 22 |
+
box_xywh_abs: IntTupleBox,
|
| 23 |
+
) -> torch.Tensor:
|
| 24 |
+
"""
|
| 25 |
+
Resamples U and V coordinate estimates for the given bounding box
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
u (tensor [1, C, H, W] of float): U coordinates
|
| 29 |
+
v (tensor [1, C, H, W] of float): V coordinates
|
| 30 |
+
labels (tensor [H, W] of long): labels obtained by resampling segmentation
|
| 31 |
+
outputs for the given bounding box
|
| 32 |
+
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
|
| 33 |
+
Return:
|
| 34 |
+
Resampled U and V coordinates - a tensor [2, H, W] of float
|
| 35 |
+
"""
|
| 36 |
+
x, y, w, h = box_xywh_abs
|
| 37 |
+
w = max(int(w), 1)
|
| 38 |
+
h = max(int(h), 1)
|
| 39 |
+
u_bbox = F.interpolate(u, (h, w), mode="bilinear", align_corners=False)
|
| 40 |
+
v_bbox = F.interpolate(v, (h, w), mode="bilinear", align_corners=False)
|
| 41 |
+
uv = torch.zeros([2, h, w], dtype=torch.float32, device=u.device)
|
| 42 |
+
for part_id in range(1, u_bbox.size(1)):
|
| 43 |
+
uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
|
| 44 |
+
uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
|
| 45 |
+
return uv
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def resample_uv_to_bbox(
|
| 49 |
+
predictor_output: DensePoseChartPredictorOutput,
|
| 50 |
+
labels: torch.Tensor,
|
| 51 |
+
box_xywh_abs: IntTupleBox,
|
| 52 |
+
) -> torch.Tensor:
|
| 53 |
+
"""
|
| 54 |
+
Resamples U and V coordinate estimates for the given bounding box
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
|
| 58 |
+
output to be resampled
|
| 59 |
+
labels (tensor [H, W] of long): labels obtained by resampling segmentation
|
| 60 |
+
outputs for the given bounding box
|
| 61 |
+
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
|
| 62 |
+
Return:
|
| 63 |
+
Resampled U and V coordinates - a tensor [2, H, W] of float
|
| 64 |
+
"""
|
| 65 |
+
return resample_uv_tensors_to_bbox(
|
| 66 |
+
predictor_output.u,
|
| 67 |
+
predictor_output.v,
|
| 68 |
+
labels,
|
| 69 |
+
box_xywh_abs,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def densepose_chart_predictor_output_to_result(
|
| 74 |
+
predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
|
| 75 |
+
) -> DensePoseChartResult:
|
| 76 |
+
"""
|
| 77 |
+
Convert densepose chart predictor outputs to results
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
|
| 81 |
+
output to be converted to results, must contain only 1 output
|
| 82 |
+
boxes (Boxes): bounding box that corresponds to the predictor output,
|
| 83 |
+
must contain only 1 bounding box
|
| 84 |
+
Return:
|
| 85 |
+
DensePose chart-based result (DensePoseChartResult)
|
| 86 |
+
"""
|
| 87 |
+
assert len(predictor_output) == 1 and len(boxes) == 1, (
|
| 88 |
+
f"Predictor output to result conversion can operate only single outputs"
|
| 89 |
+
f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
boxes_xyxy_abs = boxes.tensor.clone()
|
| 93 |
+
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
|
| 94 |
+
box_xywh = make_int_box(boxes_xywh_abs[0])
|
| 95 |
+
|
| 96 |
+
labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
|
| 97 |
+
uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
|
| 98 |
+
return DensePoseChartResult(labels=labels, uv=uv)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def resample_confidences_to_bbox(
|
| 102 |
+
predictor_output: DensePoseChartPredictorOutput,
|
| 103 |
+
labels: torch.Tensor,
|
| 104 |
+
box_xywh_abs: IntTupleBox,
|
| 105 |
+
) -> Dict[str, torch.Tensor]:
|
| 106 |
+
"""
|
| 107 |
+
Resamples confidences for the given bounding box
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
|
| 111 |
+
output to be resampled
|
| 112 |
+
labels (tensor [H, W] of long): labels obtained by resampling segmentation
|
| 113 |
+
outputs for the given bounding box
|
| 114 |
+
box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
|
| 115 |
+
Return:
|
| 116 |
+
Resampled confidences - a dict of [H, W] tensors of float
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
x, y, w, h = box_xywh_abs
|
| 120 |
+
w = max(int(w), 1)
|
| 121 |
+
h = max(int(h), 1)
|
| 122 |
+
|
| 123 |
+
confidence_names = [
|
| 124 |
+
"sigma_1",
|
| 125 |
+
"sigma_2",
|
| 126 |
+
"kappa_u",
|
| 127 |
+
"kappa_v",
|
| 128 |
+
"fine_segm_confidence",
|
| 129 |
+
"coarse_segm_confidence",
|
| 130 |
+
]
|
| 131 |
+
confidence_results = {key: None for key in confidence_names}
|
| 132 |
+
confidence_names = [
|
| 133 |
+
key for key in confidence_names if getattr(predictor_output, key) is not None
|
| 134 |
+
]
|
| 135 |
+
confidence_base = torch.zeros([h, w], dtype=torch.float32, device=predictor_output.u.device)
|
| 136 |
+
|
| 137 |
+
# assign data from channels that correspond to the labels
|
| 138 |
+
for key in confidence_names:
|
| 139 |
+
resampled_confidence = F.interpolate(
|
| 140 |
+
getattr(predictor_output, key),
|
| 141 |
+
(h, w),
|
| 142 |
+
mode="bilinear",
|
| 143 |
+
align_corners=False,
|
| 144 |
+
)
|
| 145 |
+
result = confidence_base.clone()
|
| 146 |
+
for part_id in range(1, predictor_output.u.size(1)):
|
| 147 |
+
if resampled_confidence.size(1) != predictor_output.u.size(1):
|
| 148 |
+
# confidence is not part-based, don't try to fill it part by part
|
| 149 |
+
continue
|
| 150 |
+
result[labels == part_id] = resampled_confidence[0, part_id][labels == part_id]
|
| 151 |
+
|
| 152 |
+
if resampled_confidence.size(1) != predictor_output.u.size(1):
|
| 153 |
+
# confidence is not part-based, fill the data with the first channel
|
| 154 |
+
# (targeted for segmentation confidences that have only 1 channel)
|
| 155 |
+
result = resampled_confidence[0, 0]
|
| 156 |
+
|
| 157 |
+
confidence_results[key] = result
|
| 158 |
+
|
| 159 |
+
return confidence_results # pyre-ignore[7]
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def densepose_chart_predictor_output_to_result_with_confidences(
|
| 163 |
+
predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
|
| 164 |
+
) -> DensePoseChartResultWithConfidences:
|
| 165 |
+
"""
|
| 166 |
+
Convert densepose chart predictor outputs to results
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
predictor_output (DensePoseChartPredictorOutput): DensePose predictor
|
| 170 |
+
output with confidences to be converted to results, must contain only 1 output
|
| 171 |
+
boxes (Boxes): bounding box that corresponds to the predictor output,
|
| 172 |
+
must contain only 1 bounding box
|
| 173 |
+
Return:
|
| 174 |
+
DensePose chart-based result with confidences (DensePoseChartResultWithConfidences)
|
| 175 |
+
"""
|
| 176 |
+
assert len(predictor_output) == 1 and len(boxes) == 1, (
|
| 177 |
+
f"Predictor output to result conversion can operate only single outputs"
|
| 178 |
+
f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
boxes_xyxy_abs = boxes.tensor.clone()
|
| 182 |
+
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
|
| 183 |
+
box_xywh = make_int_box(boxes_xywh_abs[0])
|
| 184 |
+
|
| 185 |
+
labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
|
| 186 |
+
uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
|
| 187 |
+
confidences = resample_confidences_to_bbox(predictor_output, labels, box_xywh)
|
| 188 |
+
return DensePoseChartResultWithConfidences(labels=labels, uv=uv, **confidences)
|
densepose/converters/hflip.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
from .base import BaseConverter
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class HFlipConverter(BaseConverter):
|
| 9 |
+
"""
|
| 10 |
+
Converts various DensePose predictor outputs to DensePose results.
|
| 11 |
+
Each DensePose predictor output type has to register its convertion strategy.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
registry = {}
|
| 15 |
+
dst_type = None
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
# pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
|
| 19 |
+
# inconsistently.
|
| 20 |
+
def convert(cls, predictor_outputs: Any, transform_data: Any, *args, **kwargs):
|
| 21 |
+
"""
|
| 22 |
+
Performs an horizontal flip on DensePose predictor outputs.
|
| 23 |
+
Does recursive lookup for base classes, so there's no need
|
| 24 |
+
for explicit registration for derived classes.
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
predictor_outputs: DensePose predictor output to be converted to BitMasks
|
| 28 |
+
transform_data: Anything useful for the flip
|
| 29 |
+
Return:
|
| 30 |
+
An instance of the same type as predictor_outputs
|
| 31 |
+
"""
|
| 32 |
+
return super(HFlipConverter, cls).convert(
|
| 33 |
+
predictor_outputs, transform_data, *args, **kwargs
|
| 34 |
+
)
|
densepose/converters/segm_to_mask.py
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Any
|
| 4 |
+
import torch
|
| 5 |
+
from torch.nn import functional as F
|
| 6 |
+
|
| 7 |
+
from detectron2.structures import BitMasks, Boxes, BoxMode
|
| 8 |
+
|
| 9 |
+
from .base import IntTupleBox, make_int_box
|
| 10 |
+
from .to_mask import ImageSizeType
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox):
|
| 14 |
+
"""
|
| 15 |
+
Resample coarse segmentation tensor to the given
|
| 16 |
+
bounding box and derive labels for each pixel of the bounding box
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
coarse_segm: float tensor of shape [1, K, Hout, Wout]
|
| 20 |
+
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
|
| 21 |
+
corner coordinates, width (W) and height (H)
|
| 22 |
+
Return:
|
| 23 |
+
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
|
| 24 |
+
"""
|
| 25 |
+
x, y, w, h = box_xywh_abs
|
| 26 |
+
w = max(int(w), 1)
|
| 27 |
+
h = max(int(h), 1)
|
| 28 |
+
labels = F.interpolate(coarse_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
|
| 29 |
+
return labels
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def resample_fine_and_coarse_segm_tensors_to_bbox(
|
| 33 |
+
fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox
|
| 34 |
+
):
|
| 35 |
+
"""
|
| 36 |
+
Resample fine and coarse segmentation tensors to the given
|
| 37 |
+
bounding box and derive labels for each pixel of the bounding box
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
fine_segm: float tensor of shape [1, C, Hout, Wout]
|
| 41 |
+
coarse_segm: float tensor of shape [1, K, Hout, Wout]
|
| 42 |
+
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
|
| 43 |
+
corner coordinates, width (W) and height (H)
|
| 44 |
+
Return:
|
| 45 |
+
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
|
| 46 |
+
"""
|
| 47 |
+
x, y, w, h = box_xywh_abs
|
| 48 |
+
w = max(int(w), 1)
|
| 49 |
+
h = max(int(h), 1)
|
| 50 |
+
# coarse segmentation
|
| 51 |
+
coarse_segm_bbox = F.interpolate(
|
| 52 |
+
coarse_segm,
|
| 53 |
+
(h, w),
|
| 54 |
+
mode="bilinear",
|
| 55 |
+
align_corners=False,
|
| 56 |
+
).argmax(dim=1)
|
| 57 |
+
# combined coarse and fine segmentation
|
| 58 |
+
labels = (
|
| 59 |
+
F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
|
| 60 |
+
* (coarse_segm_bbox > 0).long()
|
| 61 |
+
)
|
| 62 |
+
return labels
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def resample_fine_and_coarse_segm_to_bbox(predictor_output: Any, box_xywh_abs: IntTupleBox):
|
| 66 |
+
"""
|
| 67 |
+
Resample fine and coarse segmentation outputs from a predictor to the given
|
| 68 |
+
bounding box and derive labels for each pixel of the bounding box
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
predictor_output: DensePose predictor output that contains segmentation
|
| 72 |
+
results to be resampled
|
| 73 |
+
box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
|
| 74 |
+
corner coordinates, width (W) and height (H)
|
| 75 |
+
Return:
|
| 76 |
+
Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
|
| 77 |
+
"""
|
| 78 |
+
return resample_fine_and_coarse_segm_tensors_to_bbox(
|
| 79 |
+
predictor_output.fine_segm,
|
| 80 |
+
predictor_output.coarse_segm,
|
| 81 |
+
box_xywh_abs,
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def predictor_output_with_coarse_segm_to_mask(
|
| 86 |
+
predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
|
| 87 |
+
) -> BitMasks:
|
| 88 |
+
"""
|
| 89 |
+
Convert predictor output with coarse and fine segmentation to a mask.
|
| 90 |
+
Assumes that predictor output has the following attributes:
|
| 91 |
+
- coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
|
| 92 |
+
unnormalized scores for N instances; D is the number of coarse
|
| 93 |
+
segmentation labels, H and W is the resolution of the estimate
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
predictor_output: DensePose predictor output to be converted to mask
|
| 97 |
+
boxes (Boxes): bounding boxes that correspond to the DensePose
|
| 98 |
+
predictor outputs
|
| 99 |
+
image_size_hw (tuple [int, int]): image height Himg and width Wimg
|
| 100 |
+
Return:
|
| 101 |
+
BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
|
| 102 |
+
a mask of the size of the image for each instance
|
| 103 |
+
"""
|
| 104 |
+
H, W = image_size_hw
|
| 105 |
+
boxes_xyxy_abs = boxes.tensor.clone()
|
| 106 |
+
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
|
| 107 |
+
N = len(boxes_xywh_abs)
|
| 108 |
+
masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
|
| 109 |
+
for i in range(len(boxes_xywh_abs)):
|
| 110 |
+
box_xywh = make_int_box(boxes_xywh_abs[i])
|
| 111 |
+
box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh)
|
| 112 |
+
x, y, w, h = box_xywh
|
| 113 |
+
masks[i, y : y + h, x : x + w] = box_mask
|
| 114 |
+
|
| 115 |
+
return BitMasks(masks)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def predictor_output_with_fine_and_coarse_segm_to_mask(
|
| 119 |
+
predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
|
| 120 |
+
) -> BitMasks:
|
| 121 |
+
"""
|
| 122 |
+
Convert predictor output with coarse and fine segmentation to a mask.
|
| 123 |
+
Assumes that predictor output has the following attributes:
|
| 124 |
+
- coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
|
| 125 |
+
unnormalized scores for N instances; D is the number of coarse
|
| 126 |
+
segmentation labels, H and W is the resolution of the estimate
|
| 127 |
+
- fine_segm (tensor of size [N, C, H, W]): fine segmentation
|
| 128 |
+
unnormalized scores for N instances; C is the number of fine
|
| 129 |
+
segmentation labels, H and W is the resolution of the estimate
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
predictor_output: DensePose predictor output to be converted to mask
|
| 133 |
+
boxes (Boxes): bounding boxes that correspond to the DensePose
|
| 134 |
+
predictor outputs
|
| 135 |
+
image_size_hw (tuple [int, int]): image height Himg and width Wimg
|
| 136 |
+
Return:
|
| 137 |
+
BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
|
| 138 |
+
a mask of the size of the image for each instance
|
| 139 |
+
"""
|
| 140 |
+
H, W = image_size_hw
|
| 141 |
+
boxes_xyxy_abs = boxes.tensor.clone()
|
| 142 |
+
boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
|
| 143 |
+
N = len(boxes_xywh_abs)
|
| 144 |
+
masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
|
| 145 |
+
for i in range(len(boxes_xywh_abs)):
|
| 146 |
+
box_xywh = make_int_box(boxes_xywh_abs[i])
|
| 147 |
+
labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh)
|
| 148 |
+
x, y, w, h = box_xywh
|
| 149 |
+
masks[i, y : y + h, x : x + w] = labels_i > 0
|
| 150 |
+
return BitMasks(masks)
|
densepose/converters/to_chart_result.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Any
|
| 4 |
+
|
| 5 |
+
from detectron2.structures import Boxes
|
| 6 |
+
|
| 7 |
+
from ..structures import DensePoseChartResult, DensePoseChartResultWithConfidences
|
| 8 |
+
from .base import BaseConverter
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class ToChartResultConverter(BaseConverter):
|
| 12 |
+
"""
|
| 13 |
+
Converts various DensePose predictor outputs to DensePose results.
|
| 14 |
+
Each DensePose predictor output type has to register its convertion strategy.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
registry = {}
|
| 18 |
+
dst_type = DensePoseChartResult
|
| 19 |
+
|
| 20 |
+
@classmethod
|
| 21 |
+
# pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
|
| 22 |
+
# inconsistently.
|
| 23 |
+
def convert(cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs) -> DensePoseChartResult:
|
| 24 |
+
"""
|
| 25 |
+
Convert DensePose predictor outputs to DensePoseResult using some registered
|
| 26 |
+
converter. Does recursive lookup for base classes, so there's no need
|
| 27 |
+
for explicit registration for derived classes.
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
densepose_predictor_outputs: DensePose predictor output to be
|
| 31 |
+
converted to BitMasks
|
| 32 |
+
boxes (Boxes): bounding boxes that correspond to the DensePose
|
| 33 |
+
predictor outputs
|
| 34 |
+
Return:
|
| 35 |
+
An instance of DensePoseResult. If no suitable converter was found, raises KeyError
|
| 36 |
+
"""
|
| 37 |
+
return super(ToChartResultConverter, cls).convert(predictor_outputs, boxes, *args, **kwargs)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class ToChartResultConverterWithConfidences(BaseConverter):
|
| 41 |
+
"""
|
| 42 |
+
Converts various DensePose predictor outputs to DensePose results.
|
| 43 |
+
Each DensePose predictor output type has to register its convertion strategy.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
registry = {}
|
| 47 |
+
dst_type = DensePoseChartResultWithConfidences
|
| 48 |
+
|
| 49 |
+
@classmethod
|
| 50 |
+
# pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
|
| 51 |
+
# inconsistently.
|
| 52 |
+
def convert(
|
| 53 |
+
cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs
|
| 54 |
+
) -> DensePoseChartResultWithConfidences:
|
| 55 |
+
"""
|
| 56 |
+
Convert DensePose predictor outputs to DensePoseResult with confidences
|
| 57 |
+
using some registered converter. Does recursive lookup for base classes,
|
| 58 |
+
so there's no need for explicit registration for derived classes.
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
densepose_predictor_outputs: DensePose predictor output with confidences
|
| 62 |
+
to be converted to BitMasks
|
| 63 |
+
boxes (Boxes): bounding boxes that correspond to the DensePose
|
| 64 |
+
predictor outputs
|
| 65 |
+
Return:
|
| 66 |
+
An instance of DensePoseResult. If no suitable converter was found, raises KeyError
|
| 67 |
+
"""
|
| 68 |
+
return super(ToChartResultConverterWithConfidences, cls).convert(
|
| 69 |
+
predictor_outputs, boxes, *args, **kwargs
|
| 70 |
+
)
|
densepose/converters/to_mask.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from typing import Any, Tuple
|
| 4 |
+
|
| 5 |
+
from detectron2.structures import BitMasks, Boxes
|
| 6 |
+
|
| 7 |
+
from .base import BaseConverter
|
| 8 |
+
|
| 9 |
+
ImageSizeType = Tuple[int, int]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ToMaskConverter(BaseConverter):
|
| 13 |
+
"""
|
| 14 |
+
Converts various DensePose predictor outputs to masks
|
| 15 |
+
in bit mask format (see `BitMasks`). Each DensePose predictor output type
|
| 16 |
+
has to register its convertion strategy.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
registry = {}
|
| 20 |
+
dst_type = BitMasks
|
| 21 |
+
|
| 22 |
+
@classmethod
|
| 23 |
+
# pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
|
| 24 |
+
# inconsistently.
|
| 25 |
+
def convert(
|
| 26 |
+
cls,
|
| 27 |
+
densepose_predictor_outputs: Any,
|
| 28 |
+
boxes: Boxes,
|
| 29 |
+
image_size_hw: ImageSizeType,
|
| 30 |
+
*args,
|
| 31 |
+
**kwargs
|
| 32 |
+
) -> BitMasks:
|
| 33 |
+
"""
|
| 34 |
+
Convert DensePose predictor outputs to BitMasks using some registered
|
| 35 |
+
converter. Does recursive lookup for base classes, so there's no need
|
| 36 |
+
for explicit registration for derived classes.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
densepose_predictor_outputs: DensePose predictor output to be
|
| 40 |
+
converted to BitMasks
|
| 41 |
+
boxes (Boxes): bounding boxes that correspond to the DensePose
|
| 42 |
+
predictor outputs
|
| 43 |
+
image_size_hw (tuple [int, int]): image height and width
|
| 44 |
+
Return:
|
| 45 |
+
An instance of `BitMasks`. If no suitable converter was found, raises KeyError
|
| 46 |
+
"""
|
| 47 |
+
return super(ToMaskConverter, cls).convert(
|
| 48 |
+
densepose_predictor_outputs, boxes, image_size_hw, *args, **kwargs
|
| 49 |
+
)
|