code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def draw_clip_range(self, frames, preds, bboxes, draw_range):
"""Draw a range of frames with the same bboxes and predictions."""
# no predictions to be draw
if bboxes is None or len(bboxes) == 0:
return frames
# draw frames in `draw_range`
left_frames = frames[:draw_range[0]]
right_frames = frames[draw_range[1] + 1:]
draw_frames = frames[draw_range[0]:draw_range[1] + 1]
# get labels(texts) and draw predictions
draw_frames = [
self.draw_one_image(frame, bboxes, preds) for frame in draw_frames
]
return list(left_frames) + draw_frames + list(right_frames)
|
Draw a range of frames with the same bboxes and predictions.
|
draw_clip_range
|
python
|
open-mmlab/mmaction2
|
demo/webcam_demo_spatiotemporal_det.py
|
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
|
Apache-2.0
|
def abbrev(name):
"""Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
"""
while name.find('(') != -1:
st, ed = name.find('('), name.find(')')
name = name[:st] + '...' + name[ed + 1:]
return name
|
Get the abbreviation of label name:
'take (an object) from (a person)' -> 'take ... from ...'
|
abbrev
|
python
|
open-mmlab/mmaction2
|
demo/webcam_demo_spatiotemporal_det.py
|
https://github.com/open-mmlab/mmaction2/blob/master/demo/webcam_demo_spatiotemporal_det.py
|
Apache-2.0
|
def parse_version_info(version_str: str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int or str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
|
Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int or str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
|
parse_version_info
|
python
|
open-mmlab/mmaction2
|
mmaction/version.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/version.py
|
Apache-2.0
|
def init_recognizer(config: Union[str, Path, mmengine.Config],
checkpoint: Optional[str] = None,
device: Union[str, torch.device] = 'cuda:0') -> nn.Module:
"""Initialize a recognizer from config file.
Args:
config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file
path, :obj:`Path` or the config object.
checkpoint (str, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Defaults to None.
device (str | torch.device): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
nn.Module: The constructed recognizer.
"""
if isinstance(config, (str, Path)):
config = mmengine.Config.fromfile(config)
elif not isinstance(config, mmengine.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
init_default_scope(config.get('default_scope', 'mmaction'))
if hasattr(config.model, 'backbone') and config.model.backbone.get(
'pretrained', None):
config.model.backbone.pretrained = None
model = MODELS.build(config.model)
if checkpoint is not None:
load_checkpoint(model, checkpoint, map_location='cpu')
model.cfg = config
model.to(device)
model.eval()
return model
|
Initialize a recognizer from config file.
Args:
config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file
path, :obj:`Path` or the config object.
checkpoint (str, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Defaults to None.
device (str | torch.device): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
nn.Module: The constructed recognizer.
|
init_recognizer
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inference.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
|
Apache-2.0
|
def inference_recognizer(model: nn.Module,
video: Union[str, dict],
test_pipeline: Optional[Compose] = None
) -> ActionDataSample:
"""Inference a video with the recognizer.
Args:
model (nn.Module): The loaded recognizer.
video (Union[str, dict]): The video file path or the results
dictionary (the input of pipeline).
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
"""
if test_pipeline is None:
cfg = model.cfg
init_default_scope(cfg.get('default_scope', 'mmaction'))
test_pipeline_cfg = cfg.test_pipeline
test_pipeline = Compose(test_pipeline_cfg)
input_flag = None
if isinstance(video, dict):
input_flag = 'dict'
elif isinstance(video, str) and osp.exists(video):
if video.endswith('.npy'):
input_flag = 'audio'
else:
input_flag = 'video'
else:
raise RuntimeError(f'The type of argument `video` is not supported: '
f'{type(video)}')
if input_flag == 'dict':
data = video
if input_flag == 'video':
data = dict(filename=video, label=-1, start_index=0, modality='RGB')
if input_flag == 'audio':
data = dict(
audio_path=video,
total_frames=len(np.load(video)),
start_index=0,
label=-1)
data = test_pipeline(data)
data = pseudo_collate([data])
# Forward the model
with torch.no_grad():
result = model.test_step(data)[0]
return result
|
Inference a video with the recognizer.
Args:
model (nn.Module): The loaded recognizer.
video (Union[str, dict]): The video file path or the results
dictionary (the input of pipeline).
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
|
inference_recognizer
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inference.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
|
Apache-2.0
|
def inference_skeleton(model: nn.Module,
pose_results: List[dict],
img_shape: Tuple[int],
test_pipeline: Optional[Compose] = None
) -> ActionDataSample:
"""Inference a pose results with the skeleton recognizer.
Args:
model (nn.Module): The loaded recognizer.
pose_results (List[dict]): The pose estimation results dictionary
(the results of `pose_inference`)
img_shape (Tuple[int]): The original image shape used for inference
skeleton recognizer.
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
"""
if test_pipeline is None:
cfg = model.cfg
init_default_scope(cfg.get('default_scope', 'mmaction'))
test_pipeline_cfg = cfg.test_pipeline
test_pipeline = Compose(test_pipeline_cfg)
h, w = img_shape
num_keypoint = pose_results[0]['keypoints'].shape[1]
num_frame = len(pose_results)
num_person = max([len(x['keypoints']) for x in pose_results])
fake_anno = dict(
frame_dict='',
label=-1,
img_shape=(h, w),
origin_shape=(h, w),
start_index=0,
modality='Pose',
total_frames=num_frame)
keypoint = np.zeros((num_frame, num_person, num_keypoint, 2),
dtype=np.float16)
keypoint_score = np.zeros((num_frame, num_person, num_keypoint),
dtype=np.float16)
for f_idx, frm_pose in enumerate(pose_results):
frm_num_persons = frm_pose['keypoints'].shape[0]
for p_idx in range(frm_num_persons):
keypoint[f_idx, p_idx] = frm_pose['keypoints'][p_idx]
keypoint_score[f_idx, p_idx] = frm_pose['keypoint_scores'][p_idx]
fake_anno['keypoint'] = keypoint.transpose((1, 0, 2, 3))
fake_anno['keypoint_score'] = keypoint_score.transpose((1, 0, 2))
return inference_recognizer(model, fake_anno, test_pipeline)
|
Inference a pose results with the skeleton recognizer.
Args:
model (nn.Module): The loaded recognizer.
pose_results (List[dict]): The pose estimation results dictionary
(the results of `pose_inference`)
img_shape (Tuple[int]): The original image shape used for inference
skeleton recognizer.
test_pipeline (:obj:`Compose`, optional): The test pipeline.
If not specified, the test pipeline in the config will be
used. Defaults to None.
Returns:
:obj:`ActionDataSample`: The inference results. Specifically, the
predicted scores are saved at ``result.pred_score``.
|
inference_skeleton
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inference.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
|
Apache-2.0
|
def detection_inference(det_config: Union[str, Path, mmengine.Config,
nn.Module],
det_checkpoint: str,
frame_paths: List[str],
det_score_thr: float = 0.9,
det_cat_id: int = 0,
device: Union[str, torch.device] = 'cuda:0',
with_score: bool = False) -> tuple:
"""Detect human boxes given frame paths.
Args:
det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]):
Det config file path or Detection model object. It can be
a :obj:`Path`, a config object, or a module object.
det_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do detection inference.
det_score_thr (float): The threshold of human detection score.
Defaults to 0.9.
det_cat_id (int): The category id for human detection. Defaults to 0.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
with_score (bool): Whether to append detection score after box.
Defaults to None.
Returns:
List[np.ndarray]: List of detected human boxes.
List[:obj:`DetDataSample`]: List of data samples, generally used
to visualize data.
"""
try:
from mmdet.apis import inference_detector, init_detector
from mmdet.structures import DetDataSample
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_detector` and '
'`init_detector` from `mmdet.apis`. These apis are '
'required in this inference api! ')
if isinstance(det_config, nn.Module):
model = det_config
else:
model = init_detector(
config=det_config, checkpoint=det_checkpoint, device=device)
results = []
data_samples = []
print('Performing Human Detection for each frame')
for frame_path in track_iter_progress(frame_paths):
det_data_sample: DetDataSample = inference_detector(model, frame_path)
pred_instance = det_data_sample.pred_instances.cpu().numpy()
bboxes = pred_instance.bboxes
scores = pred_instance.scores
# We only keep human detection bboxs with score larger
# than `det_score_thr` and category id equal to `det_cat_id`.
valid_idx = np.logical_and(pred_instance.labels == det_cat_id,
pred_instance.scores > det_score_thr)
bboxes = bboxes[valid_idx]
scores = scores[valid_idx]
if with_score:
bboxes = np.concatenate((bboxes, scores[:, None]), axis=-1)
results.append(bboxes)
data_samples.append(det_data_sample)
return results, data_samples
|
Detect human boxes given frame paths.
Args:
det_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]):
Det config file path or Detection model object. It can be
a :obj:`Path`, a config object, or a module object.
det_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do detection inference.
det_score_thr (float): The threshold of human detection score.
Defaults to 0.9.
det_cat_id (int): The category id for human detection. Defaults to 0.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
with_score (bool): Whether to append detection score after box.
Defaults to None.
Returns:
List[np.ndarray]: List of detected human boxes.
List[:obj:`DetDataSample`]: List of data samples, generally used
to visualize data.
|
detection_inference
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inference.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
|
Apache-2.0
|
def pose_inference(pose_config: Union[str, Path, mmengine.Config, nn.Module],
pose_checkpoint: str,
frame_paths: List[str],
det_results: List[np.ndarray],
device: Union[str, torch.device] = 'cuda:0') -> tuple:
"""Perform Top-Down pose estimation.
Args:
pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]): Pose config file path or
pose model object. It can be a :obj:`Path`, a config object,
or a module object.
pose_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do pose inference.
det_results (List[np.ndarray]): List of detected human boxes.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
List[List[Dict[str, np.ndarray]]]: List of pose estimation results.
List[:obj:`PoseDataSample`]: List of data samples, generally used
to visualize data.
"""
try:
from mmpose.apis import inference_topdown, init_model
from mmpose.structures import PoseDataSample, merge_data_samples
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_topdown` and '
'`init_model` from `mmpose.apis`. These apis '
'are required in this inference api! ')
if isinstance(pose_config, nn.Module):
model = pose_config
else:
model = init_model(pose_config, pose_checkpoint, device)
results = []
data_samples = []
print('Performing Human Pose Estimation for each frame')
for f, d in track_iter_progress(list(zip(frame_paths, det_results))):
pose_data_samples: List[PoseDataSample] \
= inference_topdown(model, f, d[..., :4], bbox_format='xyxy')
pose_data_sample = merge_data_samples(pose_data_samples)
pose_data_sample.dataset_meta = model.dataset_meta
# make fake pred_instances
if not hasattr(pose_data_sample, 'pred_instances'):
num_keypoints = model.dataset_meta['num_keypoints']
pred_instances_data = dict(
keypoints=np.empty(shape=(0, num_keypoints, 2)),
keypoints_scores=np.empty(shape=(0, 17), dtype=np.float32),
bboxes=np.empty(shape=(0, 4), dtype=np.float32),
bbox_scores=np.empty(shape=(0), dtype=np.float32))
pose_data_sample.pred_instances = InstanceData(
**pred_instances_data)
poses = pose_data_sample.pred_instances.to_dict()
results.append(poses)
data_samples.append(pose_data_sample)
return results, data_samples
|
Perform Top-Down pose estimation.
Args:
pose_config (Union[str, :obj:`Path`, :obj:`mmengine.Config`,
:obj:`torch.nn.Module`]): Pose config file path or
pose model object. It can be a :obj:`Path`, a config object,
or a module object.
pose_checkpoint: Checkpoint path/url.
frame_paths (List[str]): The paths of frames to do pose inference.
det_results (List[np.ndarray]): List of detected human boxes.
device (Union[str, torch.device]): The desired device of returned
tensor. Defaults to ``'cuda:0'``.
Returns:
List[List[Dict[str, np.ndarray]]]: List of pose estimation results.
List[:obj:`PoseDataSample`]: List of data samples, generally used
to visualize data.
|
pose_inference
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inference.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inference.py
|
Apache-2.0
|
def __call__(self,
inputs: InputsType,
return_datasamples: bool = False,
batch_size: int = 1,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
vid_out_dir: str = '',
out_type: str = 'video',
print_result: bool = False,
pred_out_file: str = '',
target_resolution: Optional[Tuple[int]] = None,
**kwargs) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
out_type (str): Output type of visualization results.
Defaults to 'video'.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
return super().__call__(
inputs,
return_datasamples,
batch_size,
return_vis=return_vis,
show=show,
wait_time=wait_time,
draw_pred=draw_pred,
vid_out_dir=vid_out_dir,
print_result=print_result,
pred_out_file=pred_out_file,
out_type=out_type,
target_resolution=target_resolution,
**kwargs)
|
Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Inference batch size. Defaults to 1.
show (bool): Whether to display the visualization results in a
popup window. Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
out_type (str): Output type of visualization results.
Defaults to 'video'.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
**kwargs: Other keyword arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
|
__call__
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/actionrecog_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
|
Apache-2.0
|
def _inputs_to_list(self, inputs: InputsType) -> list:
"""Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
"""
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
return list(inputs)
|
Preprocess the inputs to a list. The main difference from mmengine
version is that we don't list a directory cause input could be a frame
folder.
Preprocess inputs to a list according to its type:
- list or tuple: return inputs
- str: return a list containing the string. The string
could be a path to file, a url or other types of string according
to the task.
Args:
inputs (InputsType): Inputs for the inferencer.
Returns:
list: List of input for the :meth:`preprocess`.
|
_inputs_to_list
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/actionrecog_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
|
Apache-2.0
|
def visualize(
self,
inputs: InputsType,
preds: PredType,
return_vis: bool = False,
show: bool = False,
wait_time: int = 0,
draw_pred: bool = True,
fps: int = 30,
out_type: str = 'video',
target_resolution: Optional[Tuple[int]] = None,
vid_out_dir: str = '',
) -> Union[List[np.ndarray], None]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable.
"""
if self.visualizer is None or (not show and vid_out_dir == ''
and not return_vis):
return None
if getattr(self, 'visualizer') is None:
raise ValueError('Visualization needs the "visualizer" term'
'defined in the config, but got None.')
results = []
for single_input, pred in zip(inputs, preds):
if isinstance(single_input, str):
frames = single_input
video_name = osp.basename(single_input)
elif isinstance(single_input, np.ndarray):
frames = single_input.copy()
video_num = str(self.num_visualized_vids).zfill(8)
video_name = f'{video_num}.mp4'
else:
raise ValueError('Unsupported input type: '
f'{type(single_input)}')
out_path = osp.join(vid_out_dir, video_name) if vid_out_dir != '' \
else None
visualization = self.visualizer.add_datasample(
video_name,
frames,
pred,
show_frames=show,
wait_time=wait_time,
draw_gt=False,
draw_pred=draw_pred,
fps=fps,
out_type=out_type,
out_path=out_path,
target_resolution=target_resolution,
)
results.append(visualization)
self.num_visualized_vids += 1
return results
|
Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
return_vis (bool): Whether to return the visualization result.
Defaults to False.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw prediction labels.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
Returns:
List[np.ndarray] or None: Returns visualization results only if
applicable.
|
visualize
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/actionrecog_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
|
Apache-2.0
|
def postprocess(
self,
preds: PredType,
visualization: Optional[List[np.ndarray]] = None,
return_datasample: bool = False,
print_result: bool = False,
pred_out_file: str = '',
) -> Union[ResType, Tuple[ResType, np.ndarray]]:
"""Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[Dict]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers.
"""
result_dict = {}
results = preds
if not return_datasample:
results = []
for pred in preds:
result = self.pred2dict(pred)
results.append(result)
# Add video to the results after printing and dumping
result_dict['predictions'] = results
if print_result:
print(result_dict)
if pred_out_file != '':
mmengine.dump(result_dict, pred_out_file)
result_dict['visualization'] = visualization
return result_dict
|
Process the predictions and visualization results from ``forward``
and ``visualize``.
This method should be responsible for the following tasks:
1. Convert datasamples into a json-serializable dict if needed.
2. Pack the predictions and visualization results and return them.
3. Dump or log the predictions.
Args:
preds (List[Dict]): Predictions of the model.
visualization (Optional[np.ndarray]): Visualized predictions.
return_datasample (bool): Whether to use Datasample to store
inference results. If False, dict will be used.
print_result (bool): Whether to print the inference result w/o
visualization to the console. Defaults to False.
pred_out_file: File to save the inference results w/o
visualization. If left as empty, no file will be saved.
Defaults to ''.
Returns:
dict: Inference and visualization results with key ``predictions``
and ``visualization``.
- ``visualization`` (Any): Returned by :meth:`visualize`.
- ``predictions`` (dict or DataSample): Returned by
:meth:`forward` and processed in :meth:`postprocess`.
If ``return_datasample=False``, it usually should be a
json-serializable dict containing only basic data elements such
as strings and numbers.
|
postprocess
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/actionrecog_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
|
Apache-2.0
|
def pred2dict(self, data_sample: ActionDataSample) -> Dict:
"""Extract elements necessary to represent a prediction into a
dictionary. It's better to contain only basic data elements such as
strings and numbers in order to guarantee it's json-serializable.
Args:
data_sample (ActionDataSample): The data sample to be converted.
Returns:
dict: The output dictionary.
"""
result = {}
result['pred_labels'] = data_sample.pred_label.tolist()
result['pred_scores'] = data_sample.pred_score.tolist()
return result
|
Extract elements necessary to represent a prediction into a
dictionary. It's better to contain only basic data elements such as
strings and numbers in order to guarantee it's json-serializable.
Args:
data_sample (ActionDataSample): The data sample to be converted.
Returns:
dict: The output dictionary.
|
pred2dict
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/actionrecog_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/actionrecog_inferencer.py
|
Apache-2.0
|
def forward(self, inputs: InputType, batch_size: int,
**forward_kwargs) -> PredType:
"""Forward the inputs to the model.
Args:
inputs (InputsType): The inputs to be forwarded.
batch_size (int): Batch size. Defaults to 1.
Returns:
Dict: The prediction results. Possibly with keys "rec".
"""
result = {}
if self.mode == 'rec':
predictions = self.actionrecog_inferencer(
inputs,
return_datasamples=True,
batch_size=batch_size,
**forward_kwargs)['predictions']
result['rec'] = [[p] for p in predictions]
return result
|
Forward the inputs to the model.
Args:
inputs (InputsType): The inputs to be forwarded.
batch_size (int): Batch size. Defaults to 1.
Returns:
Dict: The prediction results. Possibly with keys "rec".
|
forward
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/mmaction2_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
|
Apache-2.0
|
def visualize(self, inputs: InputsType, preds: PredType,
**kwargs) -> List[np.ndarray]:
"""Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
"""
if 'rec' in self.mode:
return self.actionrecog_inferencer.visualize(
inputs, preds['rec'][0], **kwargs)
|
Visualize predictions.
Args:
inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer.
preds (List[Dict]): Predictions of the model.
show (bool): Whether to display the image in a popup window.
Defaults to False.
wait_time (float): The interval of show (s). Defaults to 0.
draw_pred (bool): Whether to draw predicted bounding boxes.
Defaults to True.
fps (int): Frames per second for saving video. Defaults to 4.
out_type (str): Output format type, choose from 'img', 'gif',
'video'. Defaults to ``'img'``.
target_resolution (Tuple[int], optional): Set to
(desired_width desired_height) to have resized frames. If
either dimension is None, the frames are resized by keeping
the existing aspect ratio. Defaults to None.
vid_out_dir (str): Output directory of visualization results.
If left as empty, no file will be saved. Defaults to ''.
|
visualize
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/mmaction2_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
|
Apache-2.0
|
def __call__(
self,
inputs: InputsType,
batch_size: int = 1,
**kwargs,
) -> dict:
"""Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer. It can be a path
to image / image directory, or an array, or a list of these.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Batch size. Defaults to 1.
**kwargs: Key words arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
"""
(
preprocess_kwargs,
forward_kwargs,
visualize_kwargs,
postprocess_kwargs,
) = self._dispatch_kwargs(**kwargs)
ori_inputs = self._inputs_to_list(inputs)
preds = self.forward(ori_inputs, batch_size, **forward_kwargs)
visualization = self.visualize(
ori_inputs, preds,
**visualize_kwargs) # type: ignore # noqa: E501
results = self.postprocess(preds, visualization, **postprocess_kwargs)
return results
|
Call the inferencer.
Args:
inputs (InputsType): Inputs for the inferencer. It can be a path
to image / image directory, or an array, or a list of these.
return_datasamples (bool): Whether to return results as
:obj:`BaseDataElement`. Defaults to False.
batch_size (int): Batch size. Defaults to 1.
**kwargs: Key words arguments passed to :meth:`preprocess`,
:meth:`forward`, :meth:`visualize` and :meth:`postprocess`.
Each key in kwargs should be in the corresponding set of
``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs``
and ``postprocess_kwargs``.
Returns:
dict: Inference and visualization results.
|
__call__
|
python
|
open-mmlab/mmaction2
|
mmaction/apis/inferencers/mmaction2_inferencer.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/apis/inferencers/mmaction2_inferencer.py
|
Apache-2.0
|
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get audio information."""
check_file_exist(self.ann_file)
data_list = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
filename = line_split[idx]
if self.data_prefix['audio'] is not None:
filename = osp.join(self.data_prefix['audio'], filename)
video_info['audio_path'] = filename
idx += 1
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
data_list.append(video_info)
return data_list
|
Load annotation file to get audio information.
|
load_data_list
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/audio_dataset.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/audio_dataset.py
|
Apache-2.0
|
def parse_img_record(self, img_records: List[dict]) -> tuple:
"""Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids.
"""
bboxes, labels, entity_ids = [], [], []
while len(img_records) > 0:
img_record = img_records[0]
num_img_records = len(img_records)
selected_records = [
x for x in img_records
if np.array_equal(x['entity_box'], img_record['entity_box'])
]
num_selected_records = len(selected_records)
img_records = [
x for x in img_records if
not np.array_equal(x['entity_box'], img_record['entity_box'])
]
assert len(img_records) + num_selected_records == num_img_records
bboxes.append(img_record['entity_box'])
valid_labels = np.array([
selected_record['label']
for selected_record in selected_records
])
# The format can be directly used by BCELossWithLogits
if self.multilabel:
label = np.zeros(self.num_classes, dtype=np.float32)
label[valid_labels] = 1.
else:
label = valid_labels
labels.append(label)
entity_ids.append(img_record['entity_id'])
bboxes = np.stack(bboxes)
labels = np.stack(labels)
entity_ids = np.stack(entity_ids)
return bboxes, labels, entity_ids
|
Merge image records of the same entity at the same time.
Args:
img_records (List[dict]): List of img_records (lines in AVA
annotations).
Returns:
Tuple(list): A tuple consists of lists of bboxes, action labels and
entity_ids.
|
parse_img_record
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/ava_dataset.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/ava_dataset.py
|
Apache-2.0
|
def load_data_list(self) -> List[Dict]:
"""Load annotation file to get skeleton information."""
assert self.ann_file.endswith('.pkl')
mmengine.exists(self.ann_file)
data_list = mmengine.load(self.ann_file)
if self.split is not None:
split, annos = data_list['split'], data_list['annotations']
identifier = 'filename' if 'filename' in annos[0] else 'frame_dir'
split = set(split[self.split])
data_list = [x for x in annos if x[identifier] in split]
# Sometimes we may need to load video from the file
if 'video' in self.data_prefix:
for item in data_list:
if 'filename' in item:
item['filename'] = osp.join(self.data_prefix['video'],
item['filename'])
if 'frame_dir' in item:
item['frame_dir'] = osp.join(self.data_prefix['video'],
item['frame_dir'])
return data_list
|
Load annotation file to get skeleton information.
|
load_data_list
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/pose_dataset.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/pose_dataset.py
|
Apache-2.0
|
def get_type(transform: Union[dict, Callable]) -> str:
"""get the type of the transform."""
if isinstance(transform, dict) and 'type' in transform:
return transform['type']
elif callable(transform):
return transform.__repr__().split('(')[0]
else:
raise TypeError
|
get the type of the transform.
|
get_type
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/repeat_aug_dataset.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py
|
Apache-2.0
|
def prepare_data(self, idx) -> List[dict]:
"""Get data processed by ``self.pipeline``.
Reduce the video loading and decompressing.
Args:
idx (int): The index of ``data_info``.
Returns:
List[dict]: A list of length num_repeats.
"""
transforms = self.pipeline.transforms
data_info = self.get_data_info(idx)
data_info = transforms[0](data_info) # DecordInit
frame_inds_list, frame_inds_length = [], [0]
fake_data_info = dict(
total_frames=data_info['total_frames'],
start_index=data_info['start_index'])
if not self.sample_once:
for repeat in range(self.num_repeats):
data_info_ = transforms[1](fake_data_info) # SampleFrames
frame_inds = data_info_['frame_inds']
frame_inds_list.append(frame_inds.reshape(-1))
frame_inds_length.append(frame_inds.size +
frame_inds_length[-1])
else:
data_info_ = transforms[1](fake_data_info) # SampleFrames
frame_inds = data_info_['frame_inds']
for repeat in range(self.num_repeats):
frame_inds_list.append(frame_inds.reshape(-1))
frame_inds_length.append(frame_inds.size +
frame_inds_length[-1])
for key in data_info_:
data_info[key] = data_info_[key]
data_info['frame_inds'] = np.concatenate(frame_inds_list)
data_info = transforms[2](data_info) # DecordDecode
imgs = data_info.pop('imgs')
data_info_list = []
for repeat in range(self.num_repeats):
data_info_ = deepcopy(data_info)
start = frame_inds_length[repeat]
end = frame_inds_length[repeat + 1]
data_info_['imgs'] = imgs[start:end]
for transform in transforms[3:]:
data_info_ = transform(data_info_)
data_info_list.append(data_info_)
del imgs
return data_info_list
|
Get data processed by ``self.pipeline``.
Reduce the video loading and decompressing.
Args:
idx (int): The index of ``data_info``.
Returns:
List[dict]: A list of length num_repeats.
|
prepare_data
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/repeat_aug_dataset.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/repeat_aug_dataset.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PackActionInputs`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
packed_results = dict()
if self.collect_keys is not None:
packed_results['inputs'] = dict()
for key in self.collect_keys:
packed_results['inputs'][key] = to_tensor(results[key])
else:
if 'imgs' in results:
imgs = results['imgs']
packed_results['inputs'] = to_tensor(imgs)
elif 'heatmap_imgs' in results:
heatmap_imgs = results['heatmap_imgs']
packed_results['inputs'] = to_tensor(heatmap_imgs)
elif 'keypoint' in results:
keypoint = results['keypoint']
packed_results['inputs'] = to_tensor(keypoint)
elif 'audios' in results:
audios = results['audios']
packed_results['inputs'] = to_tensor(audios)
elif 'text' in results:
text = results['text']
packed_results['inputs'] = to_tensor(text)
else:
raise ValueError(
'Cannot get `imgs`, `keypoint`, `heatmap_imgs`, '
'`audios` or `text` in the input dict of '
'`PackActionInputs`.')
data_sample = ActionDataSample()
if 'gt_bboxes' in results:
instance_data = InstanceData()
for key in self.mapping_table.keys():
instance_data[self.mapping_table[key]] = to_tensor(
results[key])
data_sample.gt_instances = instance_data
if 'proposals' in results:
data_sample.proposals = InstanceData(
bboxes=to_tensor(results['proposals']))
if 'label' in results:
data_sample.set_gt_label(results['label'])
# Set custom algorithm keys
for key in self.algorithm_keys:
if key in results:
data_sample.set_field(results[key], key)
# Set meta keys
img_meta = {k: results[k] for k in self.meta_keys if k in results}
data_sample.set_metainfo(img_meta)
packed_results['data_samples'] = data_sample
return packed_results
|
The transform function of :class:`PackActionInputs`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/formatting.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
|
Apache-2.0
|
def transform(self, results):
"""Method to pack the input data.
Args:
results (dict): Result dict from the data pipeline.
Returns:
dict:
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
- 'data_samples' (obj:`DetDataSample`): The annotation info of the
sample.
"""
packed_results = dict()
if 'raw_feature' in results:
raw_feature = results['raw_feature']
packed_results['inputs'] = to_tensor(raw_feature)
elif 'bsp_feature' in results:
packed_results['inputs'] = torch.tensor(0.)
else:
raise ValueError(
'Cannot get "raw_feature" or "bsp_feature" in the input '
'dict of `PackActionInputs`.')
data_sample = ActionDataSample()
for key in self.keys:
if key not in results:
continue
elif key == 'proposals':
instance_data = InstanceData()
instance_data[key] = to_tensor(results[key])
data_sample.proposals = instance_data
else:
if hasattr(data_sample, 'gt_instances'):
data_sample.gt_instances[key] = to_tensor(results[key])
else:
instance_data = InstanceData()
instance_data[key] = to_tensor(results[key])
data_sample.gt_instances = instance_data
img_meta = {k: results[k] for k in self.meta_keys if k in results}
data_sample.set_metainfo(img_meta)
packed_results['data_samples'] = data_sample
return packed_results
|
Method to pack the input data.
Args:
results (dict): Result dict from the data pipeline.
Returns:
dict:
- 'inputs' (obj:`torch.Tensor`): The forward data of models.
- 'data_samples' (obj:`DetDataSample`): The annotation info of the
sample.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/formatting.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
|
Apache-2.0
|
def transform(self, results):
"""Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
|
Performs the Transpose formatting.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/formatting.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`FormatGCNInput`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
keypoint = results['keypoint']
if 'keypoint_score' in results:
keypoint = np.concatenate(
(keypoint, results['keypoint_score'][..., None]), axis=-1)
cur_num_person = keypoint.shape[0]
if cur_num_person < self.num_person:
pad_dim = self.num_person - cur_num_person
pad = np.zeros(
(pad_dim, ) + keypoint.shape[1:], dtype=keypoint.dtype)
keypoint = np.concatenate((keypoint, pad), axis=0)
if self.mode == 'loop' and cur_num_person == 1:
for i in range(1, self.num_person):
keypoint[i] = keypoint[0]
elif cur_num_person > self.num_person:
keypoint = keypoint[:self.num_person]
M, T, V, C = keypoint.shape
nc = results.get('num_clips', 1)
assert T % nc == 0
keypoint = keypoint.reshape(
(M, nc, T // nc, V, C)).transpose(1, 0, 2, 3, 4)
results['keypoint'] = np.ascontiguousarray(keypoint)
return results
|
The transform function of :class:`FormatGCNInput`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/formatting.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/formatting.py
|
Apache-2.0
|
def transform(self, results):
"""Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not self.hvu_initialized:
self.init_hvu_info(results['categories'], results['category_nums'])
onehot = torch.zeros(self.num_tags)
onehot_mask = torch.zeros(self.num_tags)
category_mask = torch.zeros(self.num_categories)
for category, tags in results['label'].items():
# skip if not training on this category
if category not in self.categories:
continue
category_mask[self.categories.index(category)] = 1.
start_idx = self.category2startidx[category]
category_num = self.category2num[category]
tags = [idx + start_idx for idx in tags]
onehot[tags] = 1.
onehot_mask[start_idx:category_num + start_idx] = 1.
results['label'] = onehot
results['mask'] = onehot_mask
results['category_mask'] = category_mask
return results
|
Convert the label dictionary to 3 tensors: "label", "mask" and
"category_mask".
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_train_clips(self, num_frames: int,
ori_clip_len: float) -> np.array:
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
if self.keep_tail_frames:
avg_interval = (num_frames - ori_clip_len + 1) / float(
self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + np.random.uniform(
0, avg_interval, self.num_clips)).astype(np.int32)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32)
else:
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32)
return clip_offsets
|
Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in train mode.
|
_get_train_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_test_clips(self, num_frames: int,
ori_clip_len: float) -> np.array:
"""Get clip offsets in test mode.
If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
if self.clip_len == 1: # 2D recognizer
# assert self.frame_interval == 1
avg_interval = num_frames / float(self.num_clips)
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + avg_interval / 2.0
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else: # 3D recognizer
max_offset = max(num_frames - ori_clip_len, 0)
if self.twice_sample:
num_clips = self.num_clips * 2
else:
num_clips = self.num_clips
if num_clips > 1:
num_segments = self.num_clips - 1
# align test sample strategy with `PySlowFast` repo
if self.target_fps is not None:
offset_between = np.floor(max_offset / float(num_segments))
clip_offsets = np.arange(num_clips) * offset_between
else:
offset_between = max_offset / float(num_segments)
clip_offsets = np.arange(num_clips) * offset_between
clip_offsets = np.round(clip_offsets)
else:
clip_offsets = np.array([max_offset // 2])
return clip_offsets
|
Get clip offsets in test mode.
If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
ori_clip_len (float): length of original sample clip.
Returns:
np.ndarray: Sampled frame indices in test mode.
|
_get_test_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_ori_clip_len(self, fps_scale_ratio: float) -> float:
"""calculate length of clip segment for different strategy.
Args:
fps_scale_ratio (float): Scale ratio to adjust fps.
"""
if self.target_fps is not None:
# align test sample strategy with `PySlowFast` repo
ori_clip_len = self.clip_len * self.frame_interval
ori_clip_len = np.maximum(1, ori_clip_len * fps_scale_ratio)
elif self.test_mode:
ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1
else:
ori_clip_len = self.clip_len * self.frame_interval
return ori_clip_len
|
calculate length of clip segment for different strategy.
Args:
fps_scale_ratio (float): Scale ratio to adjust fps.
|
_get_ori_clip_len
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_sample_clips(self, num_frames: int) -> np.ndarray:
"""To sample an n-frame clip from the video. UniformSample basically
divides the video into n segments of equal length and randomly samples
one frame from each segment. When the duration of video frames is
shorter than the desired length of the target clip, this approach will
duplicate the sampled frame instead of looping the sample in "loop"
mode. In the test mode, when we need to sample multiple clips,
specifically 'n' clips, this method will further divide the segments
based on the number of clips to be sampled. The 'i-th' clip will.
sample the frame located at the position 'i * len(segment) / n'
within the segment.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (np.ndarray): the indexes of frames of sampled from the video.
"""
seg_size = float(num_frames - 1) / self.clip_len
inds = []
if not self.test_mode:
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
inds.append(np.random.randint(start, end + 1))
else:
duration = seg_size / (self.num_clips + 1)
for k in range(self.num_clips):
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
frame_index = start + int(duration * (k + 1))
inds.append(frame_index)
return np.array(inds)
|
To sample an n-frame clip from the video. UniformSample basically
divides the video into n segments of equal length and randomly samples
one frame from each segment. When the duration of video frames is
shorter than the desired length of the target clip, this approach will
duplicate the sampled frame instead of looping the sample in "loop"
mode. In the test mode, when we need to sample multiple clips,
specifically 'n' clips, this method will further divide the segments
based on the number of clips to be sampled. The 'i-th' clip will.
sample the frame located at the position 'i * len(segment) / n'
within the segment.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (np.ndarray): the indexes of frames of sampled from the video.
|
_get_sample_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the Uniform Sampling.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
inds = self._get_sample_clips(num_frames)
start_index = results['start_index']
inds = inds + start_index
results['frame_inds'] = inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
return results
|
Perform the Uniform Sampling.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_train_clips(self, num_frames: int) -> np.array:
"""Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_idx = 0 if sample_position == 1 else np.random.randint(
0, sample_position - 1)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = (base_offsets + start_idx) % num_frames
return clip_offsets
|
Get clip offsets by dense sample strategy in train mode.
It will calculate a sample position and sample interval and set
start index 0 when sample_pos == 1 or randomly choose from
[0, sample_pos - 1]. Then it will shift the start index by each
base offset.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
|
_get_train_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def _get_test_clips(self, num_frames: int) -> np.array:
"""Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
sample_position = max(1, 1 + num_frames - self.sample_range)
interval = self.sample_range // self.num_clips
start_list = np.linspace(
0, sample_position - 1, num=self.num_sample_positions, dtype=int)
base_offsets = np.arange(self.num_clips) * interval
clip_offsets = list()
for start_idx in start_list:
clip_offsets.extend((base_offsets + start_idx) % num_frames)
clip_offsets = np.array(clip_offsets)
return clip_offsets
|
Get clip offsets by dense sample strategy in test mode.
It will calculate a sample position and sample interval and evenly
sample several start indexes as start positions between
[0, sample_position-1]. Then it will shift each start index by the
base offsets.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
|
_get_test_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import av
except ImportError:
raise ImportError('Please run "conda install av -c conda-forge" '
'or "pip install av" to install PyAV first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
container = av.open(file_obj)
results['video_reader'] = container
results['total_frames'] = container.streams.video[0].frames
return results
|
Perform the PyAV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
if self.mode == 'accurate':
# set max indice to make early stop
max_inds = max(results['frame_inds'])
i = 0
for frame in container.decode(video=0):
if i > max_inds + 1:
break
imgs.append(frame.to_rgb().to_ndarray())
i += 1
# the available frame in pyav may be less than its length,
# which may raise error
results['imgs'] = [
imgs[i % len(imgs)] for i in results['frame_inds']
]
elif self.mode == 'efficient':
for frame in container.decode(video=0):
backup_frame = frame
break
stream = container.streams.video[0]
for idx in results['frame_inds']:
pts_scale = stream.average_rate * stream.time_base
frame_pts = int(idx / pts_scale)
container.seek(
frame_pts, any_frame=False, backward=True, stream=stream)
frame = self.frame_generator(container, stream)
if frame is not None:
imgs.append(frame)
backup_frame = frame
else:
imgs.append(backup_frame)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
results['video_reader'] = None
del container
return results
|
Perform the PyAV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the PIMS initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import pims
except ImportError:
raise ImportError('Please run "conda install pims -c conda-forge" '
'or "pip install pims" to install pims first.')
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
if self.mode == 'accurate':
container = pims.PyAVReaderIndexed(file_obj)
else:
container = pims.PyAVReaderTimed(file_obj)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
|
Perform the PIMS initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the PIMS decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
imgs = [container[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
|
Perform the PIMS decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if self.multi_thread:
container.streams.video[0].thread_type = 'AUTO'
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
# set max index to make early stop
max_idx = max(results['frame_inds'])
i = 0
stream = container.streams.video[0]
codec_context = stream.codec_context
codec_context.options = {'flags2': '+export_mvs'}
for packet in container.demux(stream):
for frame in packet.decode():
if i > max_idx + 1:
break
i += 1
height = frame.height
width = frame.width
mv = np.zeros((height, width, 2), dtype=np.int8)
vectors = frame.side_data.get('MOTION_VECTORS')
if frame.key_frame:
# Key frame don't have motion vectors
assert vectors is None
if vectors is not None and len(vectors) > 0:
mv = self._parse_vectors(mv, vectors.to_ndarray(), height,
width)
imgs.append(mv)
results['video_reader'] = None
del container
# the available frame in pyav may be less than its length,
# which may raise error
results['motion_vectors'] = np.array(
[imgs[i % len(imgs)] for i in results['frame_inds']])
return results
|
Perform the PyAV motion vector decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the Decord initialization.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
container = self._get_video_reader(results['filename'])
results['total_frames'] = len(container)
results['video_reader'] = container
results['avg_fps'] = container.get_avg_fps()
return results
|
Perform the Decord initialization.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the Decord decoding.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
imgs = self._decord_load_frames(container, frame_inds)
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
|
Perform the Decord decoding.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: dict) -> dict:
"""Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if self.io_backend == 'disk':
new_path = results['filename']
else:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
thread_id = get_thread_id()
# save the file of same thread at the same place
new_path = osp.join(self.tmp_folder, f'tmp_{thread_id}.mp4')
with open(new_path, 'wb') as f:
f.write(self.file_client.get(results['filename']))
container = mmcv.VideoReader(new_path)
results['new_path'] = new_path
results['video_reader'] = container
results['total_frames'] = len(container)
return results
|
Perform the OpenCV initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: dict) -> dict:
"""Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
for frame_ind in results['frame_inds']:
cur_frame = container[frame_ind]
# last frame may be None in OpenCV
while isinstance(cur_frame, type(None)):
frame_ind -= 1
cur_frame = container[frame_ind]
imgs.append(cur_frame)
results['video_reader'] = None
del container
imgs = np.array(imgs)
# The default channel order of OpenCV is BGR, thus we change it to RGB
imgs = imgs[:, :, :, ::-1]
results['imgs'] = list(imgs)
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
|
Perform the OpenCV decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: dict) -> dict:
"""Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
directory = results['frame_dir']
filename_tmpl = results['filename_tmpl']
modality = results['modality']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
cache = {}
for i, frame_idx in enumerate(results['frame_inds']):
# Avoid loading duplicated frames
if frame_idx in cache:
imgs.append(cp.deepcopy(imgs[cache[frame_idx]]))
continue
else:
cache[frame_idx] = i
frame_idx += offset
if modality == 'RGB':
filepath = osp.join(directory, filename_tmpl.format(frame_idx))
img_bytes = self.file_client.get(filepath)
# Get frame with channel order RGB directly.
cur_frame = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(cur_frame)
elif modality == 'Flow':
x_filepath = osp.join(directory,
filename_tmpl.format('x', frame_idx))
y_filepath = osp.join(directory,
filename_tmpl.format('y', frame_idx))
x_img_bytes = self.file_client.get(x_filepath)
x_frame = mmcv.imfrombytes(x_img_bytes, flag='grayscale')
y_img_bytes = self.file_client.get(y_filepath)
y_frame = mmcv.imfrombytes(y_img_bytes, flag='grayscale')
imgs.append(np.stack([x_frame, y_frame], axis=-1))
else:
raise NotImplementedError
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
# we resize the gt_bboxes and proposals to their real scale
if 'gt_bboxes' in results:
h, w = results['img_shape']
scale_factor = np.array([w, h, w, h])
gt_bboxes = results['gt_bboxes']
gt_bboxes = (gt_bboxes * scale_factor).astype(np.float32)
results['gt_bboxes'] = gt_bboxes
if 'proposals' in results and results['proposals'] is not None:
proposals = results['proposals']
proposals = (proposals * scale_factor).astype(np.float32)
results['proposals'] = proposals
return results
|
Perform the ``RawFrameDecode`` to pick frames given indices.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
mmcv.use_backend(self.decoding_backend)
filename = results['filename']
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
imgs = list()
img_bytes = self.file_client.get(filename)
img = mmcv.imfrombytes(img_bytes, channel_order='rgb')
imgs.append(img)
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
|
Perform the ``ImageDecode`` to load image given the file path.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if osp.exists(results['audio_path']):
feature_map = np.load(results['audio_path'])
else:
# Generate a random dummy 10s input
# Some videos do not have audio stream
pad_func = getattr(self, f'_{self.pad_method}_pad')
feature_map = pad_func((640, 80))
results['length'] = feature_map.shape[0]
results['audios'] = feature_map
return results
|
Perform the numpy loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the building of pseudo clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
# the input should be one single image
assert len(results['imgs']) == 1
im = results['imgs'][0]
for _ in range(1, self.clip_len):
results['imgs'].append(np.copy(im))
results['clip_len'] = self.clip_len
results['num_clips'] = 1
return results
|
Perform the building of pseudo clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
audio = results['audios']
frame_inds = results['frame_inds']
num_clips = results['num_clips']
resampled_clips = list()
frame_inds = frame_inds.reshape(num_clips, -1)
for clip_idx in range(num_clips):
clip_frame_inds = frame_inds[clip_idx]
start_idx = max(
0,
int(
round((clip_frame_inds[0] + 1) / results['total_frames'] *
results['length'])))
end_idx = min(
results['length'],
int(
round((clip_frame_inds[-1] + 1) / results['total_frames'] *
results['length'])))
cropped_audio = audio[start_idx:end_idx, :]
if cropped_audio.shape[0] >= self.fixed_length:
truncated_audio = cropped_audio[:self.fixed_length, :]
else:
truncated_audio = np.pad(
cropped_audio,
((0, self.fixed_length - cropped_audio.shape[0]), (0, 0)),
mode='constant')
resampled_clips.append(truncated_audio)
results['audios'] = np.array(resampled_clips)
results['audios_shape'] = results['audios'].shape
return results
|
Perform the ``AudioFeatureSelector`` to pick audio feature clips.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data_path = results['feature_path']
raw_feature = np.loadtxt(
data_path, dtype=np.float32, delimiter=',', skiprows=1)
results['raw_feature'] = np.transpose(raw_feature, (1, 0))
return results
|
Perform the LoadLocalizationFeature loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_frame = results['duration_frame']
video_second = results['duration_second']
feature_frame = results['feature_frame']
corrected_second = float(feature_frame) / video_frame * video_second
annotations = results['annotations']
gt_bbox = []
for annotation in annotations:
current_start = max(
min(1, annotation['segment'][0] / corrected_second), 0)
current_end = max(
min(1, annotation['segment'][1] / corrected_second), 0)
gt_bbox.append([current_start, current_end])
gt_bbox = np.array(gt_bbox)
results['gt_bbox'] = gt_bbox
return results
|
Perform the GenerateLocalizationLabels loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results):
"""Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
video_name = results['video_name']
proposal_path = osp.join(self.pgm_proposals_dir,
video_name + self.proposal_ext)
if self.proposal_ext == '.csv':
pgm_proposals = np.loadtxt(
proposal_path, dtype=np.float32, delimiter=',', skiprows=1)
pgm_proposals = np.array(pgm_proposals[:self.top_k])
tmin = pgm_proposals[:, 0]
tmax = pgm_proposals[:, 1]
tmin_score = pgm_proposals[:, 2]
tmax_score = pgm_proposals[:, 3]
reference_temporal_iou = pgm_proposals[:, 5]
feature_path = osp.join(self.pgm_features_dir,
video_name + self.feature_ext)
if self.feature_ext == '.npy':
bsp_feature = np.load(feature_path).astype(np.float32)
bsp_feature = bsp_feature[:self.top_k, :]
results['bsp_feature'] = bsp_feature
results['tmin'] = tmin
results['tmax'] = tmax
results['tmin_score'] = tmin_score
results['tmax_score'] = tmax_score
results['reference_temporal_iou'] = reference_temporal_iou
return results
|
Perform the LoadProposals loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/loading.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/loading.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Perform the pose decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
required_keys = ['total_frames', 'frame_inds', 'keypoint']
for k in required_keys:
assert k in results
total_frames = results['total_frames']
frame_inds = results.pop('frame_inds')
keypoint = results['keypoint']
if 'anno_inds' in results:
frame_inds = frame_inds[results['anno_inds']]
keypoint = keypoint[results['anno_inds']]
assert np.all(np.diff(frame_inds) >= 0), \
'frame_inds should be monotonical increasing'
def mapinds(inds):
uni = np.unique(inds)
map_ = {x: i for i, x in enumerate(uni)}
inds = [map_[x] for x in inds]
return np.array(inds, dtype=np.int16)
if self.squeeze:
frame_inds = mapinds(frame_inds)
total_frames = np.max(frame_inds) + 1
results['total_frames'] = total_frames
num_joints = keypoint.shape[1]
num_person = get_mode(frame_inds)[-1][0]
new_kp = np.zeros([num_person, total_frames, num_joints, 2],
dtype=np.float16)
new_kpscore = np.zeros([num_person, total_frames, num_joints],
dtype=np.float16)
nperson_per_frame = np.zeros([total_frames], dtype=np.int16)
for frame_ind, kp in zip(frame_inds, keypoint):
person_ind = nperson_per_frame[frame_ind]
new_kp[person_ind, frame_ind] = kp[:, :2]
new_kpscore[person_ind, frame_ind] = kp[:, 2]
nperson_per_frame[frame_ind] += 1
if num_person > self.max_person:
for i in range(total_frames):
nperson = nperson_per_frame[i]
val = new_kpscore[:nperson, i]
score_sum = val.sum(-1)
inds = sorted(range(nperson), key=lambda x: -score_sum[x])
new_kpscore[:nperson, i] = new_kpscore[inds, i]
new_kp[:nperson, i] = new_kp[inds, i]
num_person = self.max_person
results['num_person'] = num_person
results['keypoint'] = new_kp[:num_person]
results['keypoint_score'] = new_kpscore[:num_person]
return results
|
Perform the pose decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def generate_a_heatmap(self, arr: np.ndarray, centers: np.ndarray,
max_values: np.ndarray) -> None:
"""Generate pseudo heatmap for one keypoint in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
centers (np.ndarray): The coordinates of corresponding keypoints
(of multiple persons). Shape: M * 2.
max_values (np.ndarray): The max values of each keypoint. Shape: M.
"""
sigma = self.sigma
img_h, img_w = arr.shape
for center, max_value in zip(centers, max_values):
if max_value < self.eps:
continue
mu_x, mu_y = center[0], center[1]
st_x = max(int(mu_x - 3 * sigma), 0)
ed_x = min(int(mu_x + 3 * sigma) + 1, img_w)
st_y = max(int(mu_y - 3 * sigma), 0)
ed_y = min(int(mu_y + 3 * sigma) + 1, img_h)
x = np.arange(st_x, ed_x, 1, np.float32)
y = np.arange(st_y, ed_y, 1, np.float32)
# if the keypoint not in the heatmap coordinate system
if not (len(x) and len(y)):
continue
y = y[:, None]
patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2)
patch = patch * max_value
arr[st_y:ed_y, st_x:ed_x] = \
np.maximum(arr[st_y:ed_y, st_x:ed_x], patch)
|
Generate pseudo heatmap for one keypoint in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
centers (np.ndarray): The coordinates of corresponding keypoints
(of multiple persons). Shape: M * 2.
max_values (np.ndarray): The max values of each keypoint. Shape: M.
|
generate_a_heatmap
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def generate_a_limb_heatmap(self, arr: np.ndarray, starts: np.ndarray,
ends: np.ndarray, start_values: np.ndarray,
end_values: np.ndarray) -> None:
"""Generate pseudo heatmap for one limb in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
starts (np.ndarray): The coordinates of one keypoint in the
corresponding limbs. Shape: M * 2.
ends (np.ndarray): The coordinates of the other keypoint in the
corresponding limbs. Shape: M * 2.
start_values (np.ndarray): The max values of one keypoint in the
corresponding limbs. Shape: M.
end_values (np.ndarray): The max values of the other keypoint
in the corresponding limbs. Shape: M.
"""
sigma = self.sigma
img_h, img_w = arr.shape
for start, end, start_value, end_value in zip(starts, ends,
start_values,
end_values):
value_coeff = min(start_value, end_value)
if value_coeff < self.eps:
continue
min_x, max_x = min(start[0], end[0]), max(start[0], end[0])
min_y, max_y = min(start[1], end[1]), max(start[1], end[1])
min_x = max(int(min_x - 3 * sigma), 0)
max_x = min(int(max_x + 3 * sigma) + 1, img_w)
min_y = max(int(min_y - 3 * sigma), 0)
max_y = min(int(max_y + 3 * sigma) + 1, img_h)
x = np.arange(min_x, max_x, 1, np.float32)
y = np.arange(min_y, max_y, 1, np.float32)
if not (len(x) and len(y)):
continue
y = y[:, None]
x_0 = np.zeros_like(x)
y_0 = np.zeros_like(y)
# distance to start keypoints
d2_start = ((x - start[0])**2 + (y - start[1])**2)
# distance to end keypoints
d2_end = ((x - end[0])**2 + (y - end[1])**2)
# the distance between start and end keypoints.
d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2)
if d2_ab < 1:
self.generate_a_heatmap(arr, start[None], start_value[None])
continue
coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab
a_dominate = coeff <= 0
b_dominate = coeff >= 1
seg_dominate = 1 - a_dominate - b_dominate
position = np.stack([x + y_0, y + x_0], axis=-1)
projection = start + np.stack([coeff, coeff], axis=-1) * (
end - start)
d2_line = position - projection
d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2
d2_seg = (
a_dominate * d2_start + b_dominate * d2_end +
seg_dominate * d2_line)
patch = np.exp(-d2_seg / 2. / sigma**2)
patch = patch * value_coeff
arr[min_y:max_y, min_x:max_x] = \
np.maximum(arr[min_y:max_y, min_x:max_x], patch)
|
Generate pseudo heatmap for one limb in one frame.
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: img_h * img_w.
starts (np.ndarray): The coordinates of one keypoint in the
corresponding limbs. Shape: M * 2.
ends (np.ndarray): The coordinates of the other keypoint in the
corresponding limbs. Shape: M * 2.
start_values (np.ndarray): The max values of one keypoint in the
corresponding limbs. Shape: M.
end_values (np.ndarray): The max values of the other keypoint
in the corresponding limbs. Shape: M.
|
generate_a_limb_heatmap
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def generate_heatmap(self, arr: np.ndarray, kps: np.ndarray,
max_values: np.ndarray) -> None:
"""Generate pseudo heatmap for all keypoints and limbs in one frame (if
needed).
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: V * img_h * img_w.
kps (np.ndarray): The coordinates of keypoints in this frame.
Shape: M * V * 2.
max_values (np.ndarray): The confidence score of each keypoint.
Shape: M * V.
"""
if self.with_kp:
num_kp = kps.shape[1]
for i in range(num_kp):
self.generate_a_heatmap(arr[i], kps[:, i], max_values[:, i])
if self.with_limb:
for i, limb in enumerate(self.skeletons):
start_idx, end_idx = limb
starts = kps[:, start_idx]
ends = kps[:, end_idx]
start_values = max_values[:, start_idx]
end_values = max_values[:, end_idx]
self.generate_a_limb_heatmap(arr[i], starts, ends,
start_values, end_values)
|
Generate pseudo heatmap for all keypoints and limbs in one frame (if
needed).
Args:
arr (np.ndarray): The array to store the generated heatmaps.
Shape: V * img_h * img_w.
kps (np.ndarray): The coordinates of keypoints in this frame.
Shape: M * V * 2.
max_values (np.ndarray): The confidence score of each keypoint.
Shape: M * V.
|
generate_heatmap
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def gen_an_aug(self, results: Dict) -> np.ndarray:
"""Generate pseudo heatmaps for all frames.
Args:
results (dict): The dictionary that contains all info of a sample.
Returns:
np.ndarray: The generated pseudo heatmaps.
"""
all_kps = results['keypoint'].astype(np.float32)
kp_shape = all_kps.shape
if 'keypoint_score' in results:
all_kpscores = results['keypoint_score']
else:
all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32)
img_h, img_w = results['img_shape']
# scale img_h, img_w and kps
img_h = int(img_h * self.scaling + 0.5)
img_w = int(img_w * self.scaling + 0.5)
all_kps[..., :2] *= self.scaling
num_frame = kp_shape[1]
num_c = 0
if self.with_kp:
num_c += all_kps.shape[2]
if self.with_limb:
num_c += len(self.skeletons)
ret = np.zeros([num_frame, num_c, img_h, img_w], dtype=np.float32)
for i in range(num_frame):
# M, V, C
kps = all_kps[:, i]
# M, C
kpscores = all_kpscores[:, i] if self.use_score else \
np.ones_like(all_kpscores[:, i])
self.generate_heatmap(ret[i], kps, kpscores)
return ret
|
Generate pseudo heatmaps for all frames.
Args:
results (dict): The dictionary that contains all info of a sample.
Returns:
np.ndarray: The generated pseudo heatmaps.
|
gen_an_aug
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Generate pseudo heatmaps based on joint coordinates and confidence.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
heatmap = self.gen_an_aug(results)
key = 'heatmap_imgs' if 'imgs' in results else 'imgs'
if self.double:
indices = np.arange(heatmap.shape[1], dtype=np.int64)
left, right = (self.left_kp, self.right_kp) if self.with_kp else (
self.left_limb, self.right_limb)
for l, r in zip(left, right): # noqa: E741
indices[l] = r
indices[r] = l
heatmap_flip = heatmap[..., ::-1][:, indices]
heatmap = np.concatenate([heatmap, heatmap_flip])
results[key] = heatmap
return results
|
Generate pseudo heatmaps based on joint coordinates and confidence.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""Convert the coordinates of keypoints to make it more compact.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
img_shape = results['img_shape']
h, w = img_shape
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
kp_x = kp[..., 0]
kp_y = kp[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return results
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
# the order is x, y, w, h (in [0, 1]), a tuple
crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.))
new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w,
(max_y - min_y) / h)
crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple)
results['crop_quadruple'] = crop_quadruple
return results
|
Convert the coordinates of keypoints to make it more compact.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def angle_between(self, v1: np.ndarray, v2: np.ndarray) -> float:
"""Returns the angle in radians between vectors 'v1' and 'v2'."""
if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6:
return 0
v1_u = self.unit_vector(v1)
v2_u = self.unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
|
Returns the angle in radians between vectors 'v1' and 'v2'.
|
angle_between
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def rotation_matrix(self, axis: np.ndarray, theta: float) -> np.ndarray:
"""Returns the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians."""
if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6:
return np.eye(3)
axis = np.asarray(axis)
axis = axis / np.sqrt(np.dot(axis, axis))
a = np.cos(theta / 2.0)
b, c, d = -axis * np.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
|
Returns the rotation matrix associated with counterclockwise
rotation about the given axis by theta radians.
|
rotation_matrix
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PreNormalize3D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
skeleton = results['keypoint']
total_frames = results.get('total_frames', skeleton.shape[1])
M, T, V, C = skeleton.shape
assert T == total_frames
if skeleton.sum() == 0:
return results
index0 = [
i for i in range(T) if not np.all(np.isclose(skeleton[0, i], 0))
]
assert M in [1, 2]
if M == 2:
index1 = [
i for i in range(T)
if not np.all(np.isclose(skeleton[1, i], 0))
]
if len(index0) < len(index1):
skeleton = skeleton[:, np.array(index1)]
skeleton = skeleton[[1, 0]]
else:
skeleton = skeleton[:, np.array(index0)]
else:
skeleton = skeleton[:, np.array(index0)]
T_new = skeleton.shape[1]
if self.align_center:
if skeleton.shape[2] == 25:
main_body_center = skeleton[0, 0, 1].copy()
else:
main_body_center = skeleton[0, 0, -1].copy()
mask = ((skeleton != 0).sum(-1) > 0)[..., None]
skeleton = (skeleton - main_body_center) * mask
if self.align_spine:
joint_bottom = skeleton[0, 0, self.zaxis[0]]
joint_top = skeleton[0, 0, self.zaxis[1]]
axis = np.cross(joint_top - joint_bottom, [0, 0, 1])
angle = self.angle_between(joint_top - joint_bottom, [0, 0, 1])
matrix_z = self.rotation_matrix(axis, angle)
skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_z)
if self.align_shoulder:
joint_rshoulder = skeleton[0, 0, self.xaxis[0]]
joint_lshoulder = skeleton[0, 0, self.xaxis[1]]
axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0])
angle = self.angle_between(joint_rshoulder - joint_lshoulder,
[1, 0, 0])
matrix_x = self.rotation_matrix(axis, angle)
skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_x)
results['keypoint'] = skeleton
results['total_frames'] = T_new
results['body_center'] = main_body_center
return results
|
The transform function of :class:`PreNormalize3D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PreNormalize2D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
h, w = results.get('img_shape', self.img_shape)
results['keypoint'][..., 0] = \
(results['keypoint'][..., 0] - (w / 2)) / (w / 2)
results['keypoint'][..., 1] = \
(results['keypoint'][..., 1] - (h / 2)) / (h / 2)
return results
|
The transform function of :class:`PreNormalize2D`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`JointToBone`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
keypoint = results['keypoint']
M, T, V, C = keypoint.shape
bone = np.zeros((M, T, V, C), dtype=np.float32)
assert C in [2, 3]
for v1, v2 in self.pairs:
bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :]
if C == 3 and self.dataset in ['openpose', 'coco']:
score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2
bone[..., v1, 2] = score
results[self.target] = bone
return results
|
The transform function of :class:`JointToBone`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`ToMotion`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
data = results[self.source]
M, T, V, C = data.shape
motion = np.zeros_like(data)
assert C in [2, 3]
motion[:, :T - 1] = np.diff(data, axis=1)
if C == 3 and self.dataset in ['openpose', 'coco']:
score = (data[:, :T - 1, :, 2] + data[:, 1:, :, 2]) / 2
motion[:, :T - 1, :, 2] = score
results[self.target] = motion
return results
|
The transform function of :class:`ToMotion`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MergeSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
feats = []
for name in self.feat_list:
feats.append(results.pop(name))
feats = np.concatenate(feats, axis=self.axis)
results[self.target] = feats
return results
|
The transform function of :class:`MergeSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`GenSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if 'keypoint_score' in results and 'keypoint' in results:
assert self.dataset != 'nturgb+d'
assert results['keypoint'].shape[
-1] == 2, 'Only 2D keypoints have keypoint_score. '
keypoint = results.pop('keypoint')
keypoint_score = results.pop('keypoint_score')
results['keypoint'] = np.concatenate(
[keypoint, keypoint_score[..., None]], -1)
return self.ops(results)
|
The transform function of :class:`GenSkeFeat`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def _get_train_clips(self, num_frames: int, clip_len: int) -> np.ndarray:
"""Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for training clips.
"""
all_inds = []
for clip_idx in range(self.num_clips):
if num_frames < clip_len:
start = np.random.randint(0, num_frames)
inds = np.arange(start, start + clip_len)
elif clip_len <= num_frames < 2 * clip_len:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int32)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
all_inds.append(inds)
return np.concatenate(all_inds)
|
Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for training clips.
|
_get_train_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def _get_test_clips(self, num_frames: int, clip_len: int) -> np.ndarray:
"""Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for testing clips.
"""
np.random.seed(self.seed)
all_inds = []
for i in range(self.num_clips):
if num_frames < clip_len:
start_ind = i if num_frames < self.num_clips \
else i * num_frames // self.num_clips
inds = np.arange(start_ind, start_ind + clip_len)
elif clip_len <= num_frames < clip_len * 2:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int64)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
all_inds.append(inds)
return np.concatenate(all_inds)
|
Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
Returns:
np.ndarray: The sampled indices for testing clips.
|
_get_test_clips
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`UniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
if self.test_mode:
inds = self._get_test_clips(num_frames, self.clip_len)
else:
inds = self._get_train_clips(num_frames, self.clip_len)
inds = np.mod(inds, num_frames)
start_index = results.get('start_index', 0)
inds = inds + start_index
if 'keypoint' in results:
kp = results['keypoint']
assert num_frames == kp.shape[1]
num_person = kp.shape[0]
num_persons = [num_person] * num_frames
for i in range(num_frames):
j = num_person - 1
while j >= 0 and np.all(np.abs(kp[j, i]) < 1e-5):
j -= 1
num_persons[i] = j + 1
transitional = [False] * num_frames
for i in range(1, num_frames - 1):
if num_persons[i] != num_persons[i - 1]:
transitional[i] = transitional[i - 1] = True
if num_persons[i] != num_persons[i + 1]:
transitional[i] = transitional[i + 1] = True
inds_int = inds.astype(np.int64)
coeff = np.array([transitional[i] for i in inds_int])
inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32)
results['frame_inds'] = inds.astype(np.int32)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
return results
|
The transform function of :class:`UniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PadTo`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
total_frames = results['total_frames']
assert total_frames <= self.length
start_index = results.get('start_index', 0)
inds = np.arange(start_index, start_index + self.length)
inds = np.mod(inds, total_frames)
keypoint = results['keypoint'][:, inds].copy()
if self.mode == 'zero':
keypoint[:, total_frames:] = 0
results['keypoint'] = keypoint
results['total_frames'] = self.length
return results
|
The transform function of :class:`PadTo`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def _load_kpscore(kpscore: np.ndarray,
frame_inds: np.ndarray) -> np.ndarray:
"""Load keypoint scores according to sampled indexes."""
return kpscore[:, frame_inds].astype(np.float32)
|
Load keypoint scores according to sampled indexes.
|
_load_kpscore
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`PoseDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if 'total_frames' not in results:
results['total_frames'] = results['keypoint'].shape[1]
if 'frame_inds' not in results:
results['frame_inds'] = np.arange(results['total_frames'])
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
offset = results.get('offset', 0)
frame_inds = results['frame_inds'] + offset
if 'keypoint_score' in results:
results['keypoint_score'] = self._load_kpscore(
results['keypoint_score'], frame_inds)
results['keypoint'] = self._load_kp(results['keypoint'], frame_inds)
return results
|
The transform function of :class:`PoseDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMUniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
num_frames = results['total_frames']
modalities = []
for modality, clip_len in self.clip_len.items():
if self.test_mode:
inds = self._get_test_clips(num_frames, clip_len)
else:
inds = self._get_train_clips(num_frames, clip_len)
inds = np.mod(inds, num_frames)
results[f'{modality}_inds'] = inds.astype(np.int32)
modalities.append(modality)
results['clip_len'] = self.clip_len
results['frame_interval'] = None
results['num_clips'] = self.num_clips
if not isinstance(results['modality'], list):
# should override
results['modality'] = modalities
return results
|
The transform function of :class:`MMUniformSampleFrames`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
for mod in results['modality']:
if results[f'{mod}_inds'].ndim != 1:
results[f'{mod}_inds'] = np.squeeze(results[f'{mod}_inds'])
frame_inds = results[f'{mod}_inds']
if mod == 'RGB':
if 'filename' not in results:
results['filename'] = results['frame_dir'] + '.mp4'
video_reader = self._get_video_reader(results['filename'])
imgs = self._decord_load_frames(video_reader, frame_inds)
del video_reader
results['imgs'] = imgs
elif mod == 'Pose':
assert 'keypoint' in results
if 'keypoint_score' not in results:
keypoint_score = [
np.ones(keypoint.shape[:-1], dtype=np.float32)
for keypoint in results['keypoint']
]
results['keypoint_score'] = np.stack(keypoint_score)
results['keypoint'] = self._load_kp(results['keypoint'],
frame_inds)
results['keypoint_score'] = self._load_kpscore(
results['keypoint_score'], frame_inds)
else:
raise NotImplementedError(
f'MMDecode: Modality {mod} not supported')
# We need to scale human keypoints to the new image size
if 'imgs' in results and 'keypoint' in results:
real_img_shape = results['imgs'][0].shape[:2]
if real_img_shape != results['img_shape']:
oh, ow = results['img_shape']
nh, nw = real_img_shape
assert results['keypoint'].shape[-1] in [2, 3]
results['keypoint'][..., 0] *= (nw / ow)
results['keypoint'][..., 1] *= (nh / oh)
results['img_shape'] = real_img_shape
results['original_shape'] = real_img_shape
return results
|
The transform function of :class:`MMDecode`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def _get_box(self, keypoint: np.ndarray, img_shape: Tuple[int]) -> Tuple:
"""Calculate the bounding box surrounding all joints in the frames."""
h, w = img_shape
kp_x = keypoint[..., 0]
kp_y = keypoint[..., 1]
min_x = np.min(kp_x[kp_x != 0], initial=np.Inf)
min_y = np.min(kp_y[kp_y != 0], initial=np.Inf)
max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf)
max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf)
# The compact area is too small
if max_x - min_x < self.threshold or max_y - min_y < self.threshold:
return 0, 0, w, h
center = ((max_x + min_x) / 2, (max_y + min_y) / 2)
half_width = (max_x - min_x) / 2 * (1 + self.padding)
half_height = (max_y - min_y) / 2 * (1 + self.padding)
if self.hw_ratio is not None:
half_height = max(self.hw_ratio[0] * half_width, half_height)
half_width = max(1 / self.hw_ratio[1] * half_height, half_width)
min_x, max_x = center[0] - half_width, center[0] + half_width
min_y, max_y = center[1] - half_height, center[1] + half_height
# hot update
if not self.allow_imgpad:
min_x, min_y = int(max(0, min_x)), int(max(0, min_y))
max_x, max_y = int(min(w, max_x)), int(min(h, max_y))
else:
min_x, min_y = int(min_x), int(min_y)
max_x, max_y = int(max_x), int(max_y)
return min_x, min_y, max_x, max_y
|
Calculate the bounding box surrounding all joints in the frames.
|
_get_box
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def _compact_images(self, imgs: List[np.ndarray], img_shape: Tuple[int],
box: Tuple[int]) -> List:
"""Crop the images acoordding the bounding box."""
h, w = img_shape
min_x, min_y, max_x, max_y = box
pad_l, pad_u, pad_r, pad_d = 0, 0, 0, 0
if min_x < 0:
pad_l = -min_x
min_x, max_x = 0, max_x + pad_l
w += pad_l
if min_y < 0:
pad_u = -min_y
min_y, max_y = 0, max_y + pad_u
h += pad_u
if max_x > w:
pad_r = max_x - w
w = max_x
if max_y > h:
pad_d = max_y - h
h = max_y
if pad_l > 0 or pad_r > 0 or pad_u > 0 or pad_d > 0:
imgs = [
np.pad(img, ((pad_u, pad_d), (pad_l, pad_r), (0, 0)))
for img in imgs
]
imgs = [img[min_y:max_y, min_x:max_x] for img in imgs]
return imgs
|
Crop the images acoordding the bounding box.
|
_compact_images
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`MMCompact`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
img_shape = results['img_shape']
kp = results['keypoint']
# Make NaN zero
kp[np.isnan(kp)] = 0.
min_x, min_y, max_x, max_y = self._get_box(kp, img_shape)
kp_x, kp_y = kp[..., 0], kp[..., 1]
kp_x[kp_x != 0] -= min_x
kp_y[kp_y != 0] -= min_y
new_shape = (max_y - min_y, max_x - min_x)
results['img_shape'] = new_shape
results['imgs'] = self._compact_images(results['imgs'], img_shape,
(min_x, min_y, max_x, max_y))
return results
|
The transform function of :class:`MMCompact`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/pose_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/pose_transforms.py
|
Apache-2.0
|
def transform(self, results):
"""Fuse lazy operations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
|
Fuse lazy operations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/processing.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
|
Apache-2.0
|
def transform(self, results):
"""Perform ColorJitter.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
imgs = results['imgs']
num_clips, clip_len = 1, len(imgs)
new_imgs = []
for i in range(num_clips):
b = np.random.uniform(
low=self.brightness[0], high=self.brightness[1])
c = np.random.uniform(low=self.contrast[0], high=self.contrast[1])
s = np.random.uniform(
low=self.saturation[0], high=self.saturation[1])
h = np.random.uniform(low=self.hue[0], high=self.hue[1])
start, end = i * clip_len, (i + 1) * clip_len
for img in imgs[start:end]:
img = img.astype(np.float32)
for fn_id in self.fn_idx:
if fn_id == 0 and b != 1:
img *= b
if fn_id == 1 and c != 1:
img = self.adjust_contrast(img, c)
if fn_id == 2 and s != 1:
img = self.adjust_saturation(img, s)
if fn_id == 3 and h != 0:
img = self.adjust_hue(img, h)
img = np.clip(img, 0, 255).astype(np.uint8)
new_imgs.append(img)
results['imgs'] = new_imgs
return results
|
Perform ColorJitter.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/processing.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/processing.py
|
Apache-2.0
|
def transform(self, results: Dict) -> Dict:
"""The transform function of :class:`CLIPTokenize`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
try:
import clip
except ImportError:
raise ImportError('Please run `pip install '
'git+https://github.com/openai/CLIP.git` '
'to install clip first. ')
text = results['text']
text_tokenized = clip.tokenize(text)[0]
results['text'] = text_tokenized
return results
|
The transform function of :class:`CLIPTokenize`.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/text_transforms.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/text_transforms.py
|
Apache-2.0
|
def transform(self, results):
"""Perform Torchvision augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'imgs' in results
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
imgs = self.trans(imgs).data.numpy()
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
imgs = [x.transpose(1, 2, 0) for x in imgs]
results['imgs'] = imgs
return results
|
Perform Torchvision augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/wrappers.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
|
Apache-2.0
|
def transform(self, results):
"""Perform PytorchVideoTrans augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert 'imgs' in results
assert 'gt_bboxes' not in results,\
f'PytorchVideo {self.op} doesn\'t support bboxes yet.'
assert 'proposals' not in results,\
f'PytorchVideo {self.op} doesn\'t support bboxes yet.'
if self.op in ('AugMix', 'RandAugment'):
# list[ndarray(h, w, 3)] -> torch.tensor(t, c, h, w)
imgs = [x.transpose(2, 0, 1) for x in results['imgs']]
imgs = to_tensor(np.stack(imgs))
else:
# list[ndarray(h, w, 3)] -> torch.tensor(c, t, h, w)
# uint8 -> float32
imgs = to_tensor((np.stack(results['imgs']).transpose(3, 0, 1, 2) /
255.).astype(np.float32))
imgs = self.trans(imgs).data.numpy()
if self.op in ('AugMix', 'RandAugment'):
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(t, c, h, w) -> list[ndarray(h, w, 3)]
imgs = [x.transpose(1, 2, 0) for x in imgs]
else:
# float32 -> uint8
imgs = imgs * 255
imgs[imgs > 255] = 255
imgs[imgs < 0] = 0
imgs = imgs.astype(np.uint8)
# torch.tensor(c, t, h, w) -> list[ndarray(h, w, 3)]
imgs = [x for x in imgs.transpose(1, 2, 3, 0)]
results['imgs'] = imgs
return results
|
Perform PytorchVideoTrans augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/wrappers.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
|
Apache-2.0
|
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Please visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
]
|
Default transforms for imgaug.
Implement RandAugment by imgaug.
Please visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
|
default_transforms
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/wrappers.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
|
Apache-2.0
|
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmengine.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
for aug_list_key in ['children', 'then_list', 'else_list']:
if aug_list_key in args:
args[aug_list_key] = [
self.imgaug_builder(child) for child in args[aug_list_key]
]
return obj_cls(**args)
|
Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
|
imgaug_builder
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/wrappers.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
|
Apache-2.0
|
def transform(self, results):
"""Perform Imgaug augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results
|
Perform Imgaug augmentations.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
|
transform
|
python
|
open-mmlab/mmaction2
|
mmaction/datasets/transforms/wrappers.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/datasets/transforms/wrappers.py
|
Apache-2.0
|
def _draw_samples(self,
batch_idx: int,
data_batch: dict,
data_samples: Sequence[ActionDataSample],
step: int = 0) -> None:
"""Visualize every ``self.interval`` samples from a data batch.
Args:
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
step (int): Global step value to record. Defaults to 0.
"""
if self.enable is False:
return
batch_size = len(data_samples)
videos = data_batch['inputs']
start_idx = batch_size * batch_idx
end_idx = start_idx + batch_size
# The first index divisible by the interval, after the start index
first_sample_id = math.ceil(start_idx / self.interval) * self.interval
for sample_id in range(first_sample_id, end_idx, self.interval):
video = videos[sample_id - start_idx]
# move channel to the last
video = video.permute(1, 2, 3, 0).numpy().astype('uint8')
data_sample = data_samples[sample_id - start_idx]
if 'filename' in data_sample:
# osp.basename works on different platforms even file clients.
sample_name = osp.basename(data_sample.get('filename'))
elif 'frame_dir' in data_sample:
sample_name = osp.basename(data_sample.get('frame_dir'))
else:
sample_name = str(sample_id)
draw_args = self.draw_args
if self.out_dir is not None:
draw_args['out_path'] = self.file_client.join_path(
self.out_dir, f'{sample_name}_{step}')
self._visualizer.add_datasample(
sample_name,
video=video,
data_sample=data_sample,
step=step,
**self.draw_args,
)
|
Visualize every ``self.interval`` samples from a data batch.
Args:
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
step (int): Global step value to record. Defaults to 0.
|
_draw_samples
|
python
|
open-mmlab/mmaction2
|
mmaction/engine/hooks/visualization_hook.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py
|
Apache-2.0
|
def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict,
outputs: Sequence[ActionDataSample]) -> None:
"""Visualize every ``self.interval`` samples during validation.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
"""
if isinstance(runner.train_loop, EpochBasedTrainLoop):
step = runner.epoch
else:
step = runner.iter
self._draw_samples(batch_idx, data_batch, outputs, step=step)
|
Visualize every ``self.interval`` samples during validation.
Args:
runner (:obj:`Runner`): The runner of the validation process.
batch_idx (int): The index of the current batch in the val loop.
data_batch (dict): Data from dataloader.
outputs (Sequence[:obj:`ActionDataSample`]): Outputs from model.
|
after_val_iter
|
python
|
open-mmlab/mmaction2
|
mmaction/engine/hooks/visualization_hook.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/hooks/visualization_hook.py
|
Apache-2.0
|
def conv_branch_init(conv: nn.Module, branches: int) -> None:
"""Perform initialization for a conv branch.
Args:
conv (nn.Module): The conv module of a branch.
branches (int): The number of branches.
"""
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
|
Perform initialization for a conv branch.
Args:
conv (nn.Module): The conv module of a branch.
branches (int): The number of branches.
|
conv_branch_init
|
python
|
open-mmlab/mmaction2
|
mmaction/engine/model/weight_init.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/model/weight_init.py
|
Apache-2.0
|
def add_params(self,
params: List[dict],
module: nn.Module,
prefix: str = 'base',
**kwargs) -> None:
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module. Defaults to ``'base'``.
"""
for name, param in module.named_parameters(recurse=False):
param_group = {'params': [param]}
if not param.requires_grad:
params.append(param_group)
continue
param_group['lr'] = self.base_lr
if self.base_wd is not None:
param_group['weight_decay'] = self.base_wd
processing_keys = [
key for key in self.paramwise_cfg if key in f'{prefix}.{name}'
]
if processing_keys:
param_group['lr'] *= \
reduce(mul, [self.paramwise_cfg[key].get('lr_mult', 1.)
for key in processing_keys])
if self.base_wd is not None:
param_group['weight_decay'] *= \
reduce(mul, [self.paramwise_cfg[key].
get('decay_mult', 1.)
for key in processing_keys])
params.append(param_group)
for key, value in param_group.items():
if key == 'params':
continue
full_name = f'{prefix}.{name}' if prefix else name
print_log(
f'paramwise_options -- '
f'{full_name}: {key} = {round(value, 8)}',
logger='current')
for child_name, child_mod in module.named_children():
child_prefix = f'{prefix}.{child_name}' if prefix else child_name
self.add_params(params, child_mod, prefix=child_prefix)
|
Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module. Defaults to ``'base'``.
|
add_params
|
python
|
open-mmlab/mmaction2
|
mmaction/engine/optimizers/swin_optim_wrapper_constructor.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/swin_optim_wrapper_constructor.py
|
Apache-2.0
|
def add_params(self, params, model, **kwargs):
"""Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
"""
# use fc_lr5 to determine whether to specify higher multi-factor
# for fc layer weights and bias.
fc_lr5 = self.paramwise_cfg['fc_lr5']
first_conv_weight = []
first_conv_bias = []
normal_weight = []
normal_bias = []
lr5_weight = []
lr10_bias = []
bn = []
conv_cnt = 0
for m in model.modules():
if isinstance(m, _ConvNd):
m_params = list(m.parameters())
conv_cnt += 1
if conv_cnt == 1:
first_conv_weight.append(m_params[0])
if len(m_params) == 2:
first_conv_bias.append(m_params[1])
else:
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m, torch.nn.Linear):
m_params = list(m.parameters())
normal_weight.append(m_params[0])
if len(m_params) == 2:
normal_bias.append(m_params[1])
elif isinstance(m,
(_BatchNorm, SyncBatchNorm_, torch.nn.GroupNorm)):
for param in list(m.parameters()):
if param.requires_grad:
bn.append(param)
elif len(m._modules) == 0:
if len(list(m.parameters())) > 0:
raise ValueError(f'New atomic module type: {type(m)}. '
'Need to give it a learning policy')
# pop the cls_head fc layer params
last_fc_weight = normal_weight.pop()
last_fc_bias = normal_bias.pop()
if fc_lr5:
lr5_weight.append(last_fc_weight)
lr10_bias.append(last_fc_bias)
else:
normal_weight.append(last_fc_weight)
normal_bias.append(last_fc_bias)
params.append({
'params': first_conv_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': first_conv_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({
'params': normal_weight,
'lr': self.base_lr,
'weight_decay': self.base_wd
})
params.append({
'params': normal_bias,
'lr': self.base_lr * 2,
'weight_decay': 0
})
params.append({'params': bn, 'lr': self.base_lr, 'weight_decay': 0})
params.append({
'params': lr5_weight,
'lr': self.base_lr * 5,
'weight_decay': self.base_wd
})
params.append({
'params': lr10_bias,
'lr': self.base_lr * 10,
'weight_decay': 0
})
|
Add parameters and their corresponding lr and wd to the params.
Args:
params (list): The list to be modified, containing all parameter
groups and their corresponding lr and wd configurations.
model (nn.Module): The model to be trained with the optimizer.
|
add_params
|
python
|
open-mmlab/mmaction2
|
mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/engine/optimizers/tsm_optim_wrapper_constructor.py
|
Apache-2.0
|
def confusion_matrix(y_pred, y_real, normalize=None):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix.
"""
if normalize not in ['true', 'pred', 'all', None]:
raise ValueError("normalize must be one of {'true', 'pred', "
"'all', None}")
if isinstance(y_pred, list):
y_pred = np.array(y_pred)
if y_pred.dtype == np.int32:
y_pred = y_pred.astype(np.int64)
if not isinstance(y_pred, np.ndarray):
raise TypeError(
f'y_pred must be list or np.ndarray, but got {type(y_pred)}')
if not y_pred.dtype == np.int64:
raise TypeError(
f'y_pred dtype must be np.int64, but got {y_pred.dtype}')
if isinstance(y_real, list):
y_real = np.array(y_real)
if y_real.dtype == np.int32:
y_real = y_real.astype(np.int64)
if not isinstance(y_real, np.ndarray):
raise TypeError(
f'y_real must be list or np.ndarray, but got {type(y_real)}')
if not y_real.dtype == np.int64:
raise TypeError(
f'y_real dtype must be np.int64, but got {y_real.dtype}')
label_set = np.unique(np.concatenate((y_pred, y_real)))
num_labels = len(label_set)
max_label = label_set[-1]
label_map = np.zeros(max_label + 1, dtype=np.int64)
for i, label in enumerate(label_set):
label_map[label] = i
y_pred_mapped = label_map[y_pred]
y_real_mapped = label_map[y_real]
confusion_mat = np.bincount(
num_labels * y_real_mapped + y_pred_mapped,
minlength=num_labels**2).reshape(num_labels, num_labels)
with np.errstate(all='ignore'):
if normalize == 'true':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=1, keepdims=True))
elif normalize == 'pred':
confusion_mat = (
confusion_mat / confusion_mat.sum(axis=0, keepdims=True))
elif normalize == 'all':
confusion_mat = (confusion_mat / confusion_mat.sum())
confusion_mat = np.nan_to_num(confusion_mat)
return confusion_mat
|
Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the true
(rows), predicted (columns) conditions or all the population.
If None, confusion matrix will not be normalized. Options are
"true", "pred", "all", None. Default: None.
Returns:
np.ndarray: Confusion matrix.
|
confusion_matrix
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def mean_class_accuracy(scores, labels):
"""Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
"""
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
mean_class_acc = np.mean(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
return mean_class_acc
|
Calculate mean class accuracy.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
Returns:
np.ndarray: Mean class accuracy.
|
mean_class_accuracy
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def top_k_classes(scores, labels, k=10, mode='accurate'):
"""Calculate the most K accurate (inaccurate) classes.
Given the prediction scores, ground truth label and top-k value,
compute the top K accurate (inaccurate) classes.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int] | np.ndarray): Ground truth labels.
k (int): Top-k values. Default: 10.
mode (str): Comparison mode for Top-k. Options are 'accurate'
and 'inaccurate'. Default: 'accurate'.
Return:
list: List of sorted (from high accuracy to low accuracy for
'accurate' mode, and from low accuracy to high accuracy for
inaccurate mode) top K classes in format of (label_id,
acc_ratio).
"""
assert mode in ['accurate', 'inaccurate']
pred = np.argmax(scores, axis=1)
cf_mat = confusion_matrix(pred, labels).astype(float)
cls_cnt = cf_mat.sum(axis=1)
cls_hit = np.diag(cf_mat)
hit_ratio = np.array(
[hit / cnt if cnt else 0.0 for cnt, hit in zip(cls_cnt, cls_hit)])
if mode == 'accurate':
max_index = np.argsort(hit_ratio)[-k:][::-1]
max_value = hit_ratio[max_index]
results = list(zip(max_index, max_value))
else:
min_index = np.argsort(hit_ratio)[:k]
min_value = hit_ratio[min_index]
results = list(zip(min_index, min_value))
return results
|
Calculate the most K accurate (inaccurate) classes.
Given the prediction scores, ground truth label and top-k value,
compute the top K accurate (inaccurate) classes.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int] | np.ndarray): Ground truth labels.
k (int): Top-k values. Default: 10.
mode (str): Comparison mode for Top-k. Options are 'accurate'
and 'inaccurate'. Default: 'accurate'.
Return:
list: List of sorted (from high accuracy to low accuracy for
'accurate' mode, and from low accuracy to high accuracy for
inaccurate mode) top K classes in format of (label_id,
acc_ratio).
|
top_k_classes
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def top_k_accuracy(scores, labels, topk=(1, )):
"""Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
"""
res = []
labels = np.array(labels)[:, np.newaxis]
for k in topk:
max_k_preds = np.argsort(scores, axis=1)[:, -k:][:, ::-1]
match_array = np.logical_or.reduce(max_k_preds == labels, axis=1)
topk_acc_score = match_array.sum() / match_array.shape[0]
res.append(topk_acc_score)
return res
|
Calculate top k accuracy score.
Args:
scores (list[np.ndarray]): Prediction scores for each class.
labels (list[int]): Ground truth labels.
topk (tuple[int]): K value for top_k_accuracy. Default: (1, ).
Returns:
list[float]: Top k accuracy score for each k.
|
top_k_accuracy
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def mean_average_precision(scores, labels):
"""Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The mean average precision.
"""
results = []
scores = np.stack(scores).T
labels = np.stack(labels).T
for score, label in zip(scores, labels):
precision, recall, _ = binary_precision_recall_curve(score, label)
ap = -np.sum(np.diff(recall) * np.array(precision)[:-1])
results.append(ap)
results = [x for x in results if not np.isnan(x)]
if results == []:
return np.nan
return np.mean(results)
|
Mean average precision for multi-label recognition.
Args:
scores (list[np.ndarray]): Prediction scores of different classes for
each sample.
labels (list[np.ndarray]): Ground truth many-hot vector for each
sample.
Returns:
np.float64: The mean average precision.
|
mean_average_precision
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def binary_precision_recall_curve(y_score, y_true):
"""Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precision and
recall are tested.
"""
assert isinstance(y_score, np.ndarray)
assert isinstance(y_true, np.ndarray)
assert y_score.shape == y_true.shape
# make y_true a boolean vector
y_true = (y_true == 1)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind='mergesort')[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
# There may be ties in values, therefore find the `distinct_value_inds`
distinct_value_inds = np.where(np.diff(y_score))[0]
threshold_inds = np.r_[distinct_value_inds, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = np.cumsum(y_true)[threshold_inds]
fps = 1 + threshold_inds - tps
thresholds = y_score[threshold_inds]
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
|
Calculate the binary precision recall curve at step thresholds.
Args:
y_score (np.ndarray): Prediction scores for each class.
Shape should be (num_classes, ).
y_true (np.ndarray): Ground truth many-hot vector.
Shape should be (num_classes, ).
Returns:
precision (np.ndarray): The precision of different thresholds.
recall (np.ndarray): The recall of different thresholds.
thresholds (np.ndarray): Different thresholds at which precision and
recall are tested.
|
binary_precision_recall_curve
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def pairwise_temporal_iou(candidate_segments,
target_segments,
calculate_overlap_self=False):
"""Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True.
"""
candidate_segments_ndim = candidate_segments.ndim
if target_segments.ndim != 2 or candidate_segments_ndim not in [1, 2]:
raise ValueError('Dimension of arguments is incorrect')
if candidate_segments_ndim == 1:
candidate_segments = candidate_segments[np.newaxis, :]
n, m = target_segments.shape[0], candidate_segments.shape[0]
t_iou = np.empty((n, m), dtype=np.float32)
if calculate_overlap_self:
t_overlap_self = np.empty((n, m), dtype=np.float32)
for i in range(m):
candidate_segment = candidate_segments[i, :]
tt1 = np.maximum(candidate_segment[0], target_segments[:, 0])
tt2 = np.minimum(candidate_segment[1], target_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clip(0)
# Segment union.
segments_union = ((target_segments[:, 1] - target_segments[:, 0]) +
(candidate_segment[1] - candidate_segment[0]) -
segments_intersection)
# Compute overlap as the ratio of the intersection
# over union of two segments.
t_iou[:, i] = (segments_intersection.astype(float) / segments_union)
if calculate_overlap_self:
candidate_length = candidate_segment[1] - candidate_segment[0]
t_overlap_self[:, i] = (
segments_intersection.astype(float) / candidate_length)
if candidate_segments_ndim == 1:
t_iou = np.squeeze(t_iou, axis=1)
if calculate_overlap_self:
if candidate_segments_ndim == 1:
t_overlap_self = np.squeeze(t_overlap_self, axis=1)
return t_iou, t_overlap_self
return t_iou
|
Compute intersection over union between segments.
Args:
candidate_segments (np.ndarray): 1-dim/2-dim array in format
``[init, end]/[m x 2:=[init, end]]``.
target_segments (np.ndarray): 2-dim array in format
``[n x 2:=[init, end]]``.
calculate_overlap_self (bool): Whether to calculate overlap_self
(union / candidate_length) or not. Default: False.
Returns:
t_iou (np.ndarray): 1-dim array [n] /
2-dim array [n x m] with IoU ratio.
t_overlap_self (np.ndarray, optional): 1-dim array [n] /
2-dim array [n x m] with overlap_self, returns when
calculate_overlap_self is True.
|
pairwise_temporal_iou
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def average_recall_at_avg_proposals(ground_truth,
proposals,
total_num_proposals,
max_avg_proposals=None,
temporal_iou_thresholds=np.linspace(
0.5, 0.95, 10)):
"""Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve.
"""
total_num_videos = len(ground_truth)
if not max_avg_proposals:
max_avg_proposals = float(total_num_proposals) / total_num_videos
ratio = (max_avg_proposals * float(total_num_videos) / total_num_proposals)
# For each video, compute temporal_iou scores among the retrieved proposals
score_list = []
total_num_retrieved_proposals = 0
for video_id in ground_truth:
# Get proposals for this video.
proposals_video_id = proposals[video_id]
this_video_proposals = proposals_video_id[:, :2]
# Sort proposals by score.
sort_idx = proposals_video_id[:, 2].argsort()[::-1]
this_video_proposals = this_video_proposals[sort_idx, :].astype(
np.float32)
# Get ground-truth instances associated to this video.
ground_truth_video_id = ground_truth[video_id]
this_video_ground_truth = ground_truth_video_id[:, :2].astype(
np.float32)
if this_video_proposals.shape[0] == 0:
n = this_video_ground_truth.shape[0]
score_list.append(np.zeros((n, 1)))
continue
if this_video_proposals.ndim != 2:
this_video_proposals = np.expand_dims(this_video_proposals, axis=0)
if this_video_ground_truth.ndim != 2:
this_video_ground_truth = np.expand_dims(
this_video_ground_truth, axis=0)
num_retrieved_proposals = np.minimum(
int(this_video_proposals.shape[0] * ratio),
this_video_proposals.shape[0])
total_num_retrieved_proposals += num_retrieved_proposals
this_video_proposals = this_video_proposals[:
num_retrieved_proposals, :]
# Compute temporal_iou scores.
t_iou = pairwise_temporal_iou(this_video_proposals,
this_video_ground_truth)
score_list.append(t_iou)
# Given that the length of the videos is really varied, we
# compute the number of proposals in terms of a ratio of the total
# proposals retrieved, i.e. average recall at a percentage of proposals
# retrieved per video.
# Computes average recall.
pcn_list = np.arange(1, 101) / 100.0 * (
max_avg_proposals * float(total_num_videos) /
total_num_retrieved_proposals)
matches = np.empty((total_num_videos, pcn_list.shape[0]))
positives = np.empty(total_num_videos)
recall = np.empty((temporal_iou_thresholds.shape[0], pcn_list.shape[0]))
# Iterates over each temporal_iou threshold.
for ridx, temporal_iou in enumerate(temporal_iou_thresholds):
# Inspect positives retrieved per video at different
# number of proposals (percentage of the total retrieved).
for i, score in enumerate(score_list):
# Total positives per video.
positives[i] = score.shape[0]
# Find proposals that satisfies minimum temporal_iou threshold.
true_positives_temporal_iou = score >= temporal_iou
# Get number of proposals as a percentage of total retrieved.
pcn_proposals = np.minimum(
(score.shape[1] * pcn_list).astype(np.int32), score.shape[1])
for j, num_retrieved_proposals in enumerate(pcn_proposals):
# Compute the number of matches
# for each percentage of the proposals
matches[i, j] = np.count_nonzero(
(true_positives_temporal_iou[:, :num_retrieved_proposals]
).sum(axis=1))
# Computes recall given the set of matches per video.
recall[ridx, :] = matches.sum(axis=0) / positives.sum()
# Recall is averaged.
avg_recall = recall.mean(axis=0)
# Get the average number of proposals per video.
proposals_per_video = pcn_list * (
float(total_num_retrieved_proposals) / total_num_videos)
# Get AUC
area_under_curve = np.trapz(avg_recall, proposals_per_video)
auc = 100. * float(area_under_curve) / proposals_per_video[-1]
return recall, avg_recall, proposals_per_video, auc
|
Computes the average recall given an average number (percentile) of
proposals per video.
Args:
ground_truth (dict): Dict containing the ground truth instances.
proposals (dict): Dict containing the proposal instances.
total_num_proposals (int): Total number of proposals in the
proposal dict.
max_avg_proposals (int | None): Max number of proposals for one video.
Default: None.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
tuple([np.ndarray, np.ndarray, np.ndarray, float]):
(recall, average_recall, proposals_per_video, auc)
In recall, ``recall[i,j]`` is recall at i-th temporal_iou threshold
at the j-th average number (percentile) of average number of
proposals per video. The average_recall is recall averaged
over a list of temporal_iou threshold (1D array). This is
equivalent to ``recall.mean(axis=0)``. The ``proposals_per_video``
is the average number of proposals per video. The auc is the area
under ``AR@AN`` curve.
|
average_recall_at_avg_proposals
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def get_weighted_score(score_list, coeff_list):
"""Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores.
"""
assert len(score_list) == len(coeff_list)
num_samples = len(score_list[0])
for i in range(1, len(score_list)):
assert len(score_list[i]) == num_samples
scores = np.array(score_list) # (num_coeff, num_samples, num_classes)
coeff = np.array(coeff_list) # (num_coeff, )
weighted_scores = list(np.dot(scores.T, coeff).T)
return weighted_scores
|
Get weighted score with given scores and coefficients.
Given n predictions by different classifier: [score_1, score_2, ...,
score_n] (score_list) and their coefficients: [coeff_1, coeff_2, ...,
coeff_n] (coeff_list), return weighted score: weighted_score =
score_1 * coeff_1 + score_2 * coeff_2 + ... + score_n * coeff_n
Args:
score_list (list[list[np.ndarray]]): List of list of scores, with shape
n(number of predictions) X num_samples X num_classes
coeff_list (list[float]): List of coefficients, with shape n.
Returns:
list[np.ndarray]: List of weighted scores.
|
get_weighted_score
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def average_precision_at_temporal_iou(ground_truth,
prediction,
temporal_iou_thresholds=(np.linspace(
0.5, 0.95, 10))):
"""Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score.
"""
ap = np.zeros(len(temporal_iou_thresholds), dtype=np.float32)
if len(prediction) < 1:
return ap
num_gts = 0.
lock_gt = dict()
for key in ground_truth:
lock_gt[key] = np.ones(
(len(temporal_iou_thresholds), len(ground_truth[key]))) * -1
num_gts += len(ground_truth[key])
# Sort predictions by decreasing score order.
prediction = np.array(prediction)
scores = prediction[:, 4].astype(float)
sort_idx = np.argsort(scores)[::-1]
prediction = prediction[sort_idx]
# Initialize true positive and false positive vectors.
tp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
fp = np.zeros((len(temporal_iou_thresholds), len(prediction)),
dtype=np.int32)
# Assigning true positive to truly grount truth instances.
for idx, this_pred in enumerate(prediction):
# Check if there is at least one ground truth in the video.
if this_pred[0] in ground_truth:
this_gt = np.array(ground_truth[this_pred[0]], dtype=float)
else:
fp[:, idx] = 1
continue
t_iou = pairwise_temporal_iou(this_pred[2:4].astype(float), this_gt)
# We would like to retrieve the predictions with highest t_iou score.
t_iou_sorted_idx = t_iou.argsort()[::-1]
for t_idx, t_iou_threshold in enumerate(temporal_iou_thresholds):
for jdx in t_iou_sorted_idx:
if t_iou[jdx] < t_iou_threshold:
fp[t_idx, idx] = 1
break
if lock_gt[this_pred[0]][t_idx, jdx] >= 0:
continue
# Assign as true positive after the filters above.
tp[t_idx, idx] = 1
lock_gt[this_pred[0]][t_idx, jdx] = idx
break
if fp[t_idx, idx] == 0 and tp[t_idx, idx] == 0:
fp[t_idx, idx] = 1
tp_cumsum = np.cumsum(tp, axis=1).astype(np.float32)
fp_cumsum = np.cumsum(fp, axis=1).astype(np.float32)
recall_cumsum = tp_cumsum / num_gts
precision_cumsum = tp_cumsum / (tp_cumsum + fp_cumsum)
for t_idx in range(len(temporal_iou_thresholds)):
ap[t_idx] = interpolated_precision_recall(precision_cumsum[t_idx, :],
recall_cumsum[t_idx, :])
return ap
|
Compute average precision (in detection task) between ground truth and
predicted data frames. If multiple predictions match the same predicted
segment, only the one with highest score is matched as true positive. This
code is greatly inspired by Pascal VOC devkit.
Args:
ground_truth (dict): Dict containing the ground truth instances.
Key: 'video_id'
Value (np.ndarray): 1D array of 't-start' and 't-end'.
prediction (np.ndarray): 2D array containing the information of
proposal instances, including 'video_id', 'class_id', 't-start',
't-end' and 'score'.
temporal_iou_thresholds (np.ndarray): 1D array with temporal_iou
thresholds. Default: ``np.linspace(0.5, 0.95, 10)``.
Returns:
np.ndarray: 1D array of average precision score.
|
average_precision_at_temporal_iou
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/accuracy.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/accuracy.py
|
Apache-2.0
|
def det2csv(results, custom_classes):
"""Convert detection results to csv file."""
csv_results = []
for idx in range(len(results)):
video_id = results[idx]['video_id']
timestamp = results[idx]['timestamp']
result = results[idx]['outputs']
for label, _ in enumerate(result):
for bbox in result[label]:
bbox_ = tuple(bbox.tolist())
if custom_classes is not None:
actual_label = custom_classes[label + 1]
else:
actual_label = label + 1
csv_results.append((
video_id,
timestamp,
) + bbox_[:4] + (actual_label, ) + bbox_[4:])
return csv_results
|
Convert detection results to csv file.
|
det2csv
|
python
|
open-mmlab/mmaction2
|
mmaction/evaluation/functional/ava_utils.py
|
https://github.com/open-mmlab/mmaction2/blob/master/mmaction/evaluation/functional/ava_utils.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.