diff --git a/pyskl/datasets/pipelines/augmentations.py b/pyskl/datasets/pipelines/augmentations.py index 833422c8..6e714185 100644 --- a/pyskl/datasets/pipelines/augmentations.py +++ b/pyskl/datasets/pipelines/augmentations.py @@ -293,9 +293,9 @@ def get_crop_bbox(img_shape, np.log(min_ar), np.log(max_ar), size=max_attempts)) target_areas = np.random.uniform(*area_range, size=max_attempts) * area candidate_crop_w = np.round(np.sqrt(target_areas * - aspect_ratios)).astype(np.int32) + aspect_ratios)).astype(int) candidate_crop_h = np.round(np.sqrt(target_areas / - aspect_ratios)).astype(np.int32) + aspect_ratios)).astype(int) for i in range(max_attempts): crop_w = candidate_crop_w[i] diff --git a/pyskl/datasets/pipelines/multi_modality.py b/pyskl/datasets/pipelines/multi_modality.py index 5cac22e7..6f989641 100644 --- a/pyskl/datasets/pipelines/multi_modality.py +++ b/pyskl/datasets/pipelines/multi_modality.py @@ -67,7 +67,7 @@ def __call__(self, results): else: inds = self._get_train_clips(num_frames, clip_len) inds = np.mod(inds, num_frames) - results[f'{modality}_inds'] = inds.astype(np.int) + results[f'{modality}_inds'] = inds.astype(int) modalities.append(modality) results['clip_len'] = self.clip_len results['frame_interval'] = None diff --git a/pyskl/datasets/pipelines/sampling.py b/pyskl/datasets/pipelines/sampling.py index 7c29665b..11fe601d 100644 --- a/pyskl/datasets/pipelines/sampling.py +++ b/pyskl/datasets/pipelines/sampling.py @@ -153,11 +153,11 @@ def __call__(self, results): transitional[i] = transitional[i - 1] = True if num_persons[i] != num_persons[i + 1]: transitional[i] = transitional[i + 1] = True - inds_int = inds.astype(np.int) + inds_int = inds.astype(int) coeff = np.array([transitional[i] for i in inds_int]) inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32) - results['frame_inds'] = inds.astype(np.int) + results['frame_inds'] = inds.astype(int) results['clip_len'] = self.clip_len results['frame_interval'] = None results['num_clips'] = self.num_clips @@ -354,9 +354,9 @@ def _get_train_clips(self, num_frames): if num_frames > ori_clip_len - 1: base_offsets = np.arange(self.num_clips) * avg_interval clip_offsets = (base_offsets + np.random.uniform( - 0, avg_interval, self.num_clips)).astype(np.int) + 0, avg_interval, self.num_clips)).astype(int) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + clip_offsets = np.zeros((self.num_clips, ), dtype=int) else: avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips @@ -372,7 +372,7 @@ def _get_train_clips(self, num_frames): ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips clip_offsets = np.around(np.arange(self.num_clips) * ratio) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + clip_offsets = np.zeros((self.num_clips, ), dtype=int) return clip_offsets @@ -394,11 +394,11 @@ def _get_test_clips(self, num_frames): avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips) if num_frames > ori_clip_len - 1: base_offsets = np.arange(self.num_clips) * avg_interval - clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int) + clip_offsets = (base_offsets + avg_interval / 2.0).astype(int) if self.twice_sample: clip_offsets = np.concatenate([clip_offsets, base_offsets]) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + clip_offsets = np.zeros((self.num_clips, ), dtype=int) return clip_offsets def _sample_clips(self, num_frames, test_mode=False): @@ -450,7 +450,7 @@ def __call__(self, results): start_index = results['start_index'] frame_inds = np.concatenate(frame_inds) + start_index - results['frame_inds'] = frame_inds.astype(np.int) + results['frame_inds'] = frame_inds.astype(int) results['clip_len'] = self.clip_len results['frame_interval'] = self.frame_interval results['num_clips'] = self.num_clips