diff --git a/README.md b/README.md index 8d311d32..45b7f607 100644 --- a/README.md +++ b/README.md @@ -57,11 +57,15 @@ cd git clone https://github.com/danbider/lightning-pose.git ``` -Then move into the newly-created repository folder, and install dependencies: +Then move into the newly-created repository folder: ```console cd lightning-pose -pip install -e . ``` +and install dependencies using one of the lines below that suits your needs best: +* `pip install -e . `: basic installation, covers most use-cases (note the period!) +* `pip install -e .[dev] `: basic install + dev tools +* `pip install -e .[extra_models] `: basic install + tools for loading resnet-50 simclr weights +* `pip install -e .[dev,extra_models] `: install all available requirements This installation might take between 3-10 minutes, depending on your machine and internet connection. diff --git a/docs/contributing.md b/docs/contributing.md index 5ea71bb7..c19df551 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -6,9 +6,18 @@ If you have found a bug or would like to request a minor change, please In order to contribute code to the repo, please follow the steps below. -You will also need to install the following dev packages: +Whenever you initially install the lightning pose repo, instead of ```bash -pip install flake8 isort +pip install -e . +``` +run +```bash +pip install -e .[dev] +``` + +Alternatively, if you have already installed the repo, install the following dev packages: +```bash +pip install black flake8 isort ``` ### Create a pull request diff --git a/lightning_pose/models/__init__.py b/lightning_pose/models/__init__.py index 6a76d86f..9e130c3a 100644 --- a/lightning_pose/models/__init__.py +++ b/lightning_pose/models/__init__.py @@ -6,7 +6,7 @@ "resnet50", "resnet101", "resnet152", - "resnet50_contrastive", + "resnet50_contrastive", # needs extra install: pip install -e .[extra_models] "resnet50_animal_apose", "resnet50_animal_ap10k", "resnet50_human_jhmdb", @@ -15,6 +15,6 @@ "efficientnet_b0", "efficientnet_b1", "efficientnet_b2", - "vit_h_sam", - "vit_b_sam", + # "vit_h_sam", + # "vit_b_sam", ] diff --git a/lightning_pose/models/backbones/torchvision.py b/lightning_pose/models/backbones/torchvision.py index 6e3b0c3b..e40445e5 100644 --- a/lightning_pose/models/backbones/torchvision.py +++ b/lightning_pose/models/backbones/torchvision.py @@ -1,4 +1,5 @@ from collections import OrderedDict +from typing import Tuple import torch import torchvision.models as tvmodels @@ -28,8 +29,14 @@ def build_backbone( if backbone_arch == "resnet50_contrastive": # load resnet50 pretrained using SimCLR on imagenet - from pl_bolts.models.self_supervised import SimCLR - + try: + from pl_bolts.models.self_supervised import SimCLR + except ImportError: + raise Exception( + "lightning-bolts package is not installed.\n" + "Run `pip install lightning-bolts` " + "in order to access 'resnet50_contrastive' backbone" + ) ckpt_url = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt" # noqa: E501 simclr = SimCLR.load_from_checkpoint(ckpt_url, strict=False) base = simclr.encoder diff --git a/lightning_pose/models/heatmap_tracker.py b/lightning_pose/models/heatmap_tracker.py index 1c7207b9..2d73b8c1 100644 --- a/lightning_pose/models/heatmap_tracker.py +++ b/lightning_pose/models/heatmap_tracker.py @@ -1,6 +1,6 @@ """Models that produce heatmaps of keypoints from images.""" -from typing import Optional, Tuple, Union +from typing import Dict, Optional, Tuple, Union import torch from kornia.filters import filter2d @@ -9,6 +9,7 @@ from omegaconf import DictConfig from torch import nn from torchtyping import TensorType +from typeguard import typechecked from typing_extensions import Literal from lightning_pose.data.utils import ( @@ -349,6 +350,7 @@ def predict_step( return predicted_keypoints, confidence +@typechecked class SemiSupervisedHeatmapTracker(SemiSupervisedTrackerMixin, HeatmapTracker): """Model produces heatmaps of keypoints from labeled/unlabeled images.""" @@ -411,7 +413,7 @@ def __init__( # self.register_buffer("total_unsupervised_importance", torch.tensor(1.0)) self.total_unsupervised_importance = torch.tensor(1.0) - def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> dict: + def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> Dict: """Return predicted heatmaps and their softmaxes (estimated keypoints).""" # images -> heatmaps predicted_heatmaps = self.forward(batch["frames"]) diff --git a/lightning_pose/models/heatmap_tracker_mhcrnn.py b/lightning_pose/models/heatmap_tracker_mhcrnn.py index 30e453d9..bf741983 100644 --- a/lightning_pose/models/heatmap_tracker_mhcrnn.py +++ b/lightning_pose/models/heatmap_tracker_mhcrnn.py @@ -1,6 +1,6 @@ """Models that produce heatmaps of keypoints from images.""" -from typing import Optional, Tuple, Union +from typing import Dict, Optional, Tuple, Union import torch from kornia.geometry.subpix import spatial_softmax2d @@ -259,7 +259,7 @@ def __init__( # self.register_buffer("total_unsupervised_importance", torch.tensor(1.0)) self.total_unsupervised_importance = torch.tensor(1.0) - def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> dict: + def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> Dict: """Return predicted heatmaps and their softmaxes (estimated keypoints).""" # images -> heatmaps pred_heatmaps_crnn, pred_heatmaps_sf = self.forward(batch["frames"]) diff --git a/lightning_pose/models/regression_tracker.py b/lightning_pose/models/regression_tracker.py index 77f26579..1affd6c4 100644 --- a/lightning_pose/models/regression_tracker.py +++ b/lightning_pose/models/regression_tracker.py @@ -1,6 +1,6 @@ """Models that produce (x, y) coordinates of keypoints from images.""" -from typing import Optional, Tuple, Union +from typing import Dict, Optional, Tuple, Union import torch from omegaconf import DictConfig @@ -204,7 +204,7 @@ def __init__( self.total_unsupervised_importance = torch.tensor(1.0) # self.register_buffer("total_unsupervised_importance", torch.tensor(1.0)) - def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> dict: + def get_loss_inputs_unlabeled(self, batch: UnlabeledBatchDict) -> Dict: """Return predicted heatmaps and their softmaxes (estimated keypoints).""" predicted_keypoints = self.forward(batch["frames"]) # undo augmentation if needed diff --git a/lightning_pose/utils/io.py b/lightning_pose/utils/io.py index 00c63573..cada7faa 100644 --- a/lightning_pose/utils/io.py +++ b/lightning_pose/utils/io.py @@ -190,7 +190,7 @@ def get_videos_in_dir(video_dir: str, return_mp4_only: bool = True) -> List[str] @typechecked -def check_video_paths(video_paths: Union[List[str], str]) -> list: +def check_video_paths(video_paths: Union[List[str], str]) -> List[str]: # get input data if isinstance(video_paths, list): # presumably a list of files @@ -203,8 +203,7 @@ def check_video_paths(video_paths: Union[List[str], str]) -> list: filenames = get_videos_in_dir(video_paths) else: raise ValueError( - "`video_paths_list` must be a list of files, a single file, " - + "or a directory name" + "`video_paths_list` must be a list of files, a single file, or a directory name" ) for filename in filenames: assert filename.endswith(".mp4"), "video files must be mp4 format!" diff --git a/lightning_pose/utils/predictions.py b/lightning_pose/utils/predictions.py index 6a04bfa6..233fb3b3 100644 --- a/lightning_pose/utils/predictions.py +++ b/lightning_pose/utils/predictions.py @@ -4,6 +4,7 @@ import time from typing import List, Optional, Tuple, Union +import cv2 import lightning.pytorch as pl import matplotlib.pyplot as plt import numpy as np @@ -11,7 +12,6 @@ import torch from omegaconf import DictConfig, OmegaConf from pytorch_lightning import LightningModule -from skimage.draw import disk from torchtyping import TensorType from tqdm import tqdm from typeguard import typechecked @@ -690,7 +690,7 @@ def create_labeled_video( xs_arr, ys_arr, mask_array=None, - dotsize=5, + dotsize=4, colormap="cool", fps=None, filename="movie.mp4", @@ -719,14 +719,10 @@ def create_labeled_video( colors = make_cmap(n_keypoints, cmap=colormap) nx, ny = clip.size - duration = int(clip.duration - clip.start) + dur = int(clip.duration - clip.start) fps_og = clip.fps - print( - "Duration of video [s]: {}, recorded with {} fps!".format( - np.round(duration, 2), np.round(fps_og, 2) - ) - ) + print(f"Duration of video [s]: {np.round(dur, 2)}, recorded at {np.round(fps_og, 2)} fps!") # add marker to each frame t, where t is in sec def add_marker(get_frame, t): @@ -742,8 +738,13 @@ def add_marker(get_frame, t): if mask_array[index, bpindex]: xc = min(int(xs_arr[index, bpindex]), nx - 1) yc = min(int(ys_arr[index, bpindex]), ny - 1) - rr, cc = disk(center=(yc, xc), radius=dotsize, shape=(ny, nx)) - frame[rr, cc, :] = colors[bpindex] + frame = cv2.circle( + frame, + center=(xc, yc), + radius=dotsize, + color=colors[bpindex].tolist(), + thickness=-1 + ) return frame clip_marked = clip.fl(add_marker) diff --git a/lightning_pose/utils/scripts.py b/lightning_pose/utils/scripts.py index b544b869..b25c84be 100644 --- a/lightning_pose/utils/scripts.py +++ b/lightning_pose/utils/scripts.py @@ -1,7 +1,7 @@ """Helper functions to build pipeline components from config dictionary.""" import os -from typing import Dict, Optional, Union +from typing import Dict, List, Optional, Union import imgaug.augmenters as iaa import lightning.pytorch as pl diff --git a/setup.py b/setup.py index 765f5f48..1a66c596 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ from setuptools import find_packages, setup -VERSION = "0.0.3" +VERSION = "0.0.4" # add the README.md file to the long_description with open("README.md", "r") as fh: @@ -43,45 +43,56 @@ def get_cuda_version(): print(f"Found CUDA version: {cuda_version}, using DALI: {dali}") +# basic requirements install_requires = [ - "black==23.3.0", - "fiftyone==0.20.1", - "h5py==3.8.0", - "hydra-core==1.3.2", - "imgaug==0.4.0", - "kaleido==0.2.1", - "kornia==0.6.12", - "matplotlib==3.7.1", - "moviepy==1.0.3", - "opencv-python==4.7.0.72", - "pandas==2.0.1", - "pillow==9.5.0", - "pytest==7.3.1", + "fiftyone", + "h5py", + "hydra-core", + "imgaug", + "kaleido", # export plotly figures as static images + "kornia", "lightning", + "matplotlib", + "moviepy", + "opencv-python", + "pandas>=2.0.0", + "pillow", + "plotly", + "pytest", + "scikit-learn", + "seaborn", + "streamlit", + "tensorboard", + "torchtyping", + "torchvision", + "typeguard", + "typing", dali, - "tensorboard==2.13.0", - "lightning-bolts==0.6.0.post1", - "seaborn==0.12.2", - "scikit-image==0.20.0", - "scikit-learn==1.2.2", - "streamlit==1.22.0", - "torchtyping==0.1.4", - "torchvision==0.15.2", - "typeguard==3.0.2", - "typing==3.7.4.3", - "botocore==1.27.59", ] +# additional requirements +extras_require = { + "dev": { + "black", + "flake8", + "isort", + }, + "extra_models": { + "lightning-bolts", # resnet-50 trained on imagenet using simclr + }, +} + setup( name="lightning-pose", packages=find_packages(), version=VERSION, - description="Semi-supervised pose estimation using Lightning.", + description="Semi-supervised pose estimation using pytorch lightning", long_description=long_description, long_description_content_type="text/markdown", author="Dan Biderman and Matt Whiteway", - install_requires=install_requires, # load_requirements(PATH_ROOT), + install_requires=install_requires, + extras_require=extras_require, author_email="danbider@gmail.com", url="https://github.com/danbider/lightning-pose", keywords=["machine learning", "deep learning", "computer_vision"],