Skip to content

Commit

Permalink
black formatting (DeepLabCut#1605)
Browse files Browse the repository at this point in the history
  • Loading branch information
AlexEMG authored Dec 9, 2021
1 parent 1867e47 commit 5ac4c8c
Show file tree
Hide file tree
Showing 53 changed files with 751 additions and 777 deletions.
4 changes: 1 addition & 3 deletions deeplabcut/create_project/add.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,7 @@ def add_new_videos(config, videos, copy_videos=False, coords=None):
os.symlink(src, dst)

if copy_videos:
videos = (
destinations
) # in this case the *new* location should be added to the config file
videos = destinations # in this case the *new* location should be added to the config file
# adds the video list to the config.yaml file
for idx, video in enumerate(videos):
try:
Expand Down
15 changes: 7 additions & 8 deletions deeplabcut/create_project/new.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,18 +158,19 @@ def create_new_project(
except OSError:
try:
import subprocess

subprocess.check_call("mklink %s %s" % (dst, src), shell=True)
except OSError:
print("Symlink creation impossible (exFat architecture?): "
"cutting/pasting the video instead.")
print(
"Symlink creation impossible (exFat architecture?): "
"cutting/pasting the video instead."
)
shutil.move(os.fspath(src), os.fspath(dst))
print("{} moved to {}".format(src, dst))
videos = destinations

if copy_videos == True:
videos = (
destinations
) # in this case the *new* location should be added to the config file
videos = destinations # in this case the *new* location should be added to the config file

# adds the video list to the config.yaml file
video_sets = {}
Expand Down Expand Up @@ -240,9 +241,7 @@ def create_new_project(
cfg_file["y2"] = 624
cfg_file[
"batch_size"
] = (
8
) # batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
] = 8 # batch size during inference (video - analysis); see https://www.biorxiv.org/content/early/2018/10/30/457242
cfg_file["corner2move2"] = (50, 50)
cfg_file["move2corner"] = True
cfg_file["skeleton_color"] = "black"
Expand Down
4 changes: 3 additions & 1 deletion deeplabcut/generate_training_dataset/frame_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,9 @@ def extract_frames(
elif any(has_failed):
print("Although most frames were extracted, some were invalid.")
else:
print("Frames were successfully extracted, for the videos listed in the config.yaml file.")
print(
"Frames were successfully extracted, for the videos listed in the config.yaml file."
)
print(
"\nYou can now label the frames using the function 'label_frames' "
"(Note, you should label frames extracted from diverse videos (and many videos; we do not recommend training on single videos!))."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,7 @@


def format_multianimal_training_data(
df,
train_inds,
project_path,
n_decimals=2,
df, train_inds, project_path, n_decimals=2,
):
train_data = []
nrows = df.shape[0]
Expand All @@ -45,9 +42,7 @@ def format_multianimal_training_data(
mask_single = individuals.str.contains("single")
n_animals = n_individuals - 1 if np.any(mask_single) else n_individuals
array = np.full(
(nrows, n_individuals, n_bodyparts, 3),
fill_value=np.nan,
dtype=np.float32
(nrows, n_individuals, n_bodyparts, 3), fill_value=np.nan, dtype=np.float32
)
array[..., 0] = np.arange(n_bodyparts)
temp = df.to_numpy()
Expand All @@ -56,9 +51,7 @@ def format_multianimal_training_data(
array[:, :n_animals, :n_multibodyparts, 1:] = temp_multi
if n_animals != n_individuals: # There is a unique individual
n_uniquebodyparts = n_bodyparts - n_multibodyparts
temp_single = np.reshape(
temp[:, mask_single], (nrows, 1, n_uniquebodyparts, 2)
)
temp_single = np.reshape(temp[:, mask_single], (nrows, 1, n_uniquebodyparts, 2))
array[:, -1:, -n_uniquebodyparts:, 1:] = temp_single
array = np.round(array, decimals=n_decimals)
for i in tqdm(train_inds):
Expand Down Expand Up @@ -177,8 +170,10 @@ def create_multianimaltraining_dataset(
raise ValueError("Crop size must be a tuple of two integers (width, height).")

if crop_sampling not in ("uniform", "keypoints", "density", "hybrid"):
raise ValueError(f"Invalid sampling {crop_sampling}. Must be "
f"either 'uniform', 'keypoints', 'density', or 'hybrid.")
raise ValueError(
f"Invalid sampling {crop_sampling}. Must be "
f"either 'uniform', 'keypoints', 'density', or 'hybrid."
)

# Loading metadata from config file:
cfg = auxiliaryfunctions.read_config(config)
Expand All @@ -204,7 +199,7 @@ def create_multianimaltraining_dataset(
### dlcr101_ms5/dlcr152_ms5: backbone resnet101/152 + multi-fusion & multi-stage module
if all(net in net_type for net in ("dlcr", "_ms5")):
num_layers = re.findall("dlcr([0-9]*)", net_type)[0]
if num_layers == '':
if num_layers == "":
num_layers = 50
net_type = "resnet_{}".format(num_layers)
multi_stage = True
Expand All @@ -219,19 +214,16 @@ def create_multianimaltraining_dataset(
if paf_graph is None: # Automatically form a complete PAF graph
partaffinityfield_graph = [
list(edge) for edge in combinations(range(len(multianimalbodyparts)), 2)
]
]
else:
# Ignore possible connections between 'multi' and 'unique' body parts;
# one can never be too careful...
to_ignore = auxfun_multianimal.filter_unwanted_paf_connections(
cfg, paf_graph
)
to_ignore = auxfun_multianimal.filter_unwanted_paf_connections(cfg, paf_graph)
partaffinityfield_graph = [
edge for i, edge in enumerate(paf_graph) if i not in to_ignore
]
auxfun_multianimal.validate_paf_graph(cfg, partaffinityfield_graph)


print("Utilizing the following graph:", partaffinityfield_graph)
# Disable the prediction of PAFs if the graph is empty
partaffinityfield_predict = bool(partaffinityfield_graph)
Expand All @@ -253,12 +245,8 @@ def create_multianimaltraining_dataset(
splits = []
for shuffle in Shuffles: # Creating shuffles starting from 1
for train_frac in cfg["TrainingFraction"]:
train_inds, test_inds = SplitTrials(
range(len(Data)), train_frac
)
splits.append(
(train_frac, shuffle, (train_inds, test_inds))
)
train_inds, test_inds = SplitTrials(range(len(Data)), train_frac)
splits.append((train_frac, shuffle, (train_inds, test_inds)))
else:
if len(trainIndices) != len(testIndices) != len(Shuffles):
raise ValueError(
Expand All @@ -280,9 +268,7 @@ def create_multianimaltraining_dataset(
train_inds = train_inds[train_inds != -1]
test_inds = np.asarray(test_inds)
test_inds = test_inds[test_inds != -1]
splits.append(
(trainFraction, Shuffles[shuffle], (train_inds, test_inds))
)
splits.append((trainFraction, Shuffles[shuffle], (train_inds, test_inds)))

for trainFraction, shuffle, (trainIndices, testIndices) in splits:
####################################################
Expand All @@ -297,10 +283,7 @@ def create_multianimaltraining_dataset(

# Make training file!
data = format_multianimal_training_data(
Data,
trainIndices,
cfg["project_path"],
numdigits,
Data, trainIndices, cfg["project_path"], numdigits,
)

if len(trainIndices) > 0:
Expand Down Expand Up @@ -356,10 +339,7 @@ def create_multianimaltraining_dataset(
)
path_test_config = str(
os.path.join(
cfg["project_path"],
Path(modelfoldername),
"test",
"pose_cfg.yaml",
cfg["project_path"], Path(modelfoldername), "test", "pose_cfg.yaml",
)
)
path_inference_config = str(
Expand All @@ -379,8 +359,7 @@ def create_multianimaltraining_dataset(
"num_joints": len(multianimalbodyparts)
+ len(uniquebodyparts), # cfg["uniquebodyparts"]),
"all_joints": [
[i]
for i in range(len(multianimalbodyparts) + len(uniquebodyparts))
[i] for i in range(len(multianimalbodyparts) + len(uniquebodyparts))
], # cfg["uniquebodyparts"]))],
"all_joints_names": jointnames,
"init_weights": model_path,
Expand Down Expand Up @@ -444,9 +423,7 @@ def create_multianimaltraining_dataset(
dlcparent_path, "inference_cfg.yaml"
)
items2change = {
"minimalnumberofconnections": int(
len(cfg["multianimalbodyparts"]) / 2
),
"minimalnumberofconnections": int(len(cfg["multianimalbodyparts"]) / 2),
"topktoretain": len(cfg["individuals"])
+ 1 * (len(cfg["uniquebodyparts"]) > 0),
"withid": cfg.get("identity", False),
Expand All @@ -463,10 +440,7 @@ def create_multianimaltraining_dataset(


def convert_cropped_to_standard_dataset(
config_path,
recreate_datasets=True,
delete_crops=True,
back_up=True,
config_path, recreate_datasets=True, delete_crops=True, back_up=True,
):
import pandas as pd
import pickle
Expand All @@ -478,8 +452,10 @@ def convert_cropped_to_standard_dataset(
videos_orig = cfg.pop("video_sets_original")
is_cropped = cfg.pop("croppedtraining")
if videos_orig is None or not is_cropped:
print("Labeled data do not appear to be cropped. "
"Project will remain unchanged...")
print(
"Labeled data do not appear to be cropped. "
"Project will remain unchanged..."
)
return

project_path = cfg["project_path"]
Expand Down Expand Up @@ -545,9 +521,7 @@ def strip_cropped_image_name(path):

# Search a pose_config.yaml file to parse missing information
pose_config_path = ""
for dirpath, _, filenames in os.walk(
os.path.join(project_path, "dlc-models")
):
for dirpath, _, filenames in os.walk(os.path.join(project_path, "dlc-models")):
for file in filenames:
if file.endswith("pose_cfg.yaml"):
pose_config_path = os.path.join(dirpath, file)
Expand Down
37 changes: 15 additions & 22 deletions deeplabcut/generate_training_dataset/trainingsetmanipulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,12 +395,12 @@ def _robust_path_split(path):
sep = "\\" if "\\" in path else "/"
splits = path.rsplit(sep, 1)
if len(splits) == 1:
parent = '.'
parent = "."
file = splits[0]
elif len(splits) == 2:
parent, file = splits
else:
raise('Unknown filepath split for path {}'.format(path))
raise ("Unknown filepath split for path {}".format(path))
filename, ext = os.path.splitext(file)
return parent, filename, ext

Expand All @@ -425,10 +425,7 @@ def merge_annotateddatasets(cfg, trainingsetfolder_full):
data = pd.read_hdf(file_path)
AnnotationData.append(data)
except FileNotFoundError:
print(
file_path,
" not found (perhaps not annotated)."
)
print(file_path, " not found (perhaps not annotated).")

if not len(AnnotationData):
print(
Expand Down Expand Up @@ -463,9 +460,7 @@ def merge_annotateddatasets(cfg, trainingsetfolder_full):


def SplitTrials(
trialindex,
trainFraction=0.8,
enforce_train_fraction=False,
trialindex, trainFraction=0.8, enforce_train_fraction=False,
):
""" Split a trial index into train and test sets. Also checks that the trainFraction is a two digit number between 0 an 1. The reason
is that the folders contain the trainfraction as int(100*trainFraction).
Expand All @@ -488,8 +483,8 @@ def SplitTrials(
train_fraction = round(trainFraction, 2)
train_size = index_len * train_fraction
shuffle = np.random.permutation(trialindex)
test_indices = shuffle[int(train_size):]
train_indices = shuffle[:int(train_size)]
test_indices = shuffle[int(train_size) :]
train_indices = shuffle[: int(train_size)]
if enforce_train_fraction and not train_size.is_integer():
train_indices, test_indices = pad_train_test_indices(
train_indices, test_indices, train_fraction,
Expand All @@ -510,8 +505,7 @@ def pad_train_test_indices(train_inds, test_inds, train_fraction):
min_n_train = int(round(min_length_req * train_fraction))
min_n_test = min_length_req - min_n_train
mult = max(
math.ceil(n_train_inds / min_n_train),
math.ceil(n_test_inds / min_n_test),
math.ceil(n_train_inds / min_n_train), math.ceil(n_test_inds / min_n_test),
)
n_train = mult * min_n_train
n_test = mult * min_n_test
Expand Down Expand Up @@ -576,8 +570,7 @@ def mergeandsplit(config, trainindex=0, uniform=True):
Data = pd.read_hdf(fn + ".h5")
except FileNotFoundError:
Data = merge_annotateddatasets(
cfg,
Path(os.path.join(project_path, trainingsetfolder)),
cfg, Path(os.path.join(project_path, trainingsetfolder)),
)
if Data is None:
return [], []
Expand Down Expand Up @@ -766,13 +759,13 @@ def create_training_dataset(
auxiliaryfunctions.edit_config(config, {"default_augmenter": "imgaug"})
augmenter_type = "imgaug"
elif augmenter_type not in [
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
raise ValueError("Invalid augmenter type:", augmenter_type)
"default",
"scalecrop",
"imgaug",
"tensorpack",
"deterministic",
]:
raise ValueError("Invalid augmenter type:", augmenter_type)

# Loading the encoder (if necessary downloading from TF)
dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
Expand Down
4 changes: 1 addition & 3 deletions deeplabcut/pose_estimation_3d/camera_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,9 +233,7 @@ def calibrate_cameras(config, cbrow=8, cbcol=6, calibrate=False, alpha=0.4):
)

# Stereo Rectification
rectify_scale = (
alpha
) # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
rectify_scale = alpha # Free scaling parameter check this https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#fisheye-stereorectify
R1, R2, P1, P2, Q, roi1, roi2 = cv2.stereoRectify(
cameraMatrix1,
distCoeffs1,
Expand Down
4 changes: 1 addition & 3 deletions deeplabcut/pose_estimation_3d/triangulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,7 @@ def triangulate(
)
stereo_file = auxiliaryfunctions.read_pickle(path_stereo_file)
cam_pair = str(cam_names[0] + "-" + cam_names[1])
if_video_analyzed = (
False
) # variable to keep track if the video was already analyzed
if_video_analyzed = False # variable to keep track if the video was already analyzed
# Check for the camera matrix
for k in metadata_["stereo_matrix"].keys():
if np.all(
Expand Down
Loading

0 comments on commit 5ac4c8c

Please sign in to comment.