From d966f152d4f3f1639d73730da116d0193f950a85 Mon Sep 17 00:00:00 2001 From: ci-bot Date: Sat, 27 Jul 2024 16:14:50 +0000 Subject: [PATCH] Deployed ac9c00b to dev with MkDocs 1.5.3 and mike 2.0.0 --- dev/index.html | 2 +- dev/objects.inv | Bin 1355 -> 1300 bytes dev/reference/distances/index.html | 1395 +-------- dev/reference/index.html | 4699 +++++++++++++++++++++++++++- dev/search/search_index.json | 2 +- dev/sitemap.xml.gz | Bin 127 -> 127 bytes 6 files changed, 4652 insertions(+), 1446 deletions(-) diff --git a/dev/index.html b/dev/index.html index ac58415d..b8a09b0d 100644 --- a/dev/index.html +++ b/dev/index.html @@ -965,7 +965,7 @@

Adding tracking to different det
  • YOLOv5: tracking object centroids or bounding boxes.
  • YOLOv4: tracking object centroids.
  • Detectron2: tracking object centroids.
  • -
  • AlphaPose: tracking human keypoints (pose estimation) and inserting Norfair into a complex existing pipeline using.
  • +
  • AlphaPose: tracking human keypoints (pose estimation) and inserting Norfair into a complex existing pipeline.
  • OpenPose: tracking human keypoints.
  • YOLOPv2: tracking with a model for traffic object detection, drivable road area segmentation, and lane line detection.
  • YOLO-NAS: tracking object centroids or bounding boxes.
  • diff --git a/dev/objects.inv b/dev/objects.inv index 58451a94c20ef4dc97955d30088f53fcba9355a1..bc8cf42a6905a56e6734576852a894f926f3e371 100644 GIT binary patch delta 1194 zcmV;b1XcUX3X}?vdVjr|O^@3)5Qgvl6$|WM>!3aMR-{b|6h-zB^h(gu$Yvsu0y*-o z`|C@6(4s6-q$oM3MDe^2hvdv~M1>1kP$r-1>u;6Fn-ZRWd6H0o1fIc%=a;9Sf8xE# z;rF8wWrOfa@vK@wR#K(>_8&zlWQ5%xfRG^*e6;=5vPDU=?|&daJRAGrA1s~4lJt!p z^(?}}eqtqpe7*in%Zl>;=@ZRR$gA6tqB9x&=${uk_Gs^rf20*i8s*!~`attt_a zL#~4-j{CI_!GDzjYg8lWUzUS#Q!edKBcpp9>_>gpg$SvHNgqcJPi&^snEa&kRmhxi zilDToTopoAbefk*+`mL6PLkH=>kq+xZUaVcg#Xk1@u$-9Pf_elJ2Z7p(VxuG+y_j& zLzW$H*WL1RQ@xg;mympiYb_W@MK0QipGeLWQmu{4<9~s|TW89)(2)u#C(vZ2&NThE zbvR?KJgIp5-6PxeC>*JM-O0ZkziFeSnq{;udh$*LS7ZC`1cxZ3I09#lN=Ka9qiJ{| z$1CAW=X8+9M3XL_F3(1eKJd>JF@d9xQ9_4ik(?yUBeSQ~9mV@3ee57@4RmCUK|1_{ zt-QnaA%B^~8>Dx@J}>go&&YWho#+y2g;M+Dt0u;Mhs4*c+tYt`H}-SZT_f*g^fq(t zydihWZ)S6XvvYPrY|A-k)}=Zb#>%A+ucU7UXW6iJJ12lO8Ma8hb<%6y>4Zx!coFKw zw}yEA;=9B;OynCz1oRB=1O+*sHc@IS6>M=P5Pv7m5O!!h;QLa&&aFU^ae#-kR`X$|>zx<~7Ao!(i$(h4bj4!Zh{dtqsN_W4_k8w94YE>=<8+Uf%tN0w z3hNf4mp?9?2mS-&c^Kj00X>X!x9ExSXTxGUtZ3?w3F~37N5!>2)BV+dstJHy%N<4= zm46!!a)-XLXuCm@-5STWd2Q>%*SATixVznW^aFyZ)*Wri>b1Ix6E~JFhbD%R7eRAP_|oT2gOX*wwts2()hXGvRhlt%ROq6k&tA{#X`E*xrQb6^ zrAxJ_0`+#`@$^n* z=e`(G-gVL&fhdrcJ54t5Wz)&%>VPM`hmtWf_npX5+K93h#c{W70qMnj@^{R3_~t(| zzyE&%@tOlY9wl5@TQYJ`nPj!8ha*mh1GIM4^^(^OwjCT0^LCKJQfKj&s(q7eA<8WA I|CEk%M`w_0)&Kwi delta 1249 zcmV<71Rnd83d;(RdVj^5O^@3)5Qgvl6$avK6|~3RinM8gAjkp@awRBfY%`%qfuy|a z{`yiMTv8IL4@%meb}jRLW;7fQhmsi=7fi&deEe4MyrOXWG28E0<-qhN1Q1x9)9|z8Din>=Shxh?FI4=xB!7y zM&eQ{QPxO%7z_VWDpHVtA>Vp0lOSvF2~$uCqNdbnU0P(m4fE`4JKHOB!>BCh8_9wD zDfjv-%V<7!gLisb>sl6?|*~Vg$xyd8Gv80tWKavyPEF6A%1 zEntxmM>)W?nH55{Ik@Zj)C448Z-a1OFUS1rhllJne?^pxHIqHV&~PPsf*Jm64Zvf zcQGi33$Ca_uW^Zm-rxl(j?(KUN{NMpEzbN8o6BIUh{!FceD8YP7~_vA9*%)S+TFk| zH{L8`B0jVK#vnskydu~xV$#+#77X#JGj*j1bxtwOM@JT*k6N5{f#}+U!_v9qADGXb zg@3~n+HIt4&?ED2sw>+}eVerIj2)HNZ`f&%Y`$T~-Rmf0R&lT+UEjQfPzlbzS5l#i zm^>0MMjwzeb&DGlJdW*QZ6c^qGa%2T;4D2{>?VGf~jG5PN=aFF9>&Wda z^T^7&>7A1a__CLcvoP;tt;LT&t=uB($A4T|rFf;*&7!00!zQ1ZEq2vQvWA9_(-w1U zp~Xx*SX)dSFa~h{)3$YR%W&aJkSx^ab>H@rr@OdoZ#QD<#MX6FHw%H&_A<{brA-7- zQtvjrnAvhB;ZFi`(rDyYcu}>92p2P6=w1j=jumd4k#XKoj`R9uV0Ocl0=urfsee-F zp2Bny2uq}JN>|q50K*zl_Ll*k9DB`^^H}1`vBlcJz7?lWpnX%_#6~Y7v6X}FKzT;g zV=J&uY}YyOM~8iqyM1!Zdy$KQF*NblwcN}jvz zexWoPKKxbFa!rEMRKT6oB_ju!2~wJR=yiC8R!3cjysT8+gaR@LoEVn8@#k3Wvmg^$ L=7|3T_vA3*sdR - - -
  • - - - Distance - - - - - -
  • - -
  • - - - ScalarDistance - - - - - -
  • - -
  • - - - VectorizedDistance - - - - - -
  • - -
  • - - - ScipyDistance - - -
  • @@ -760,87 +679,6 @@ -
  • - -
  • - - - Distance - - - - - -
  • - -
  • - - - ScalarDistance - - - - - -
  • - -
  • - - - VectorizedDistance - - - - - -
  • - -
  • - - - ScipyDistance - - -
  • @@ -944,1237 +782,6 @@

    Distances - - - -

    - Distance - - -#

    - - -
    -

    - Bases: ABC

    - - -

    Abstract class representing a distance.

    -

    Subclasses must implement the method get_distances

    - -
    - Source code in norfair/distances.py -
    14
    -15
    -16
    -17
    -18
    -19
    -20
    -21
    -22
    -23
    -24
    -25
    -26
    -27
    -28
    -29
    -30
    -31
    -32
    -33
    -34
    -35
    -36
    -37
    -38
    -39
    -40
    -41
    -42
    class Distance(ABC):
    -    """
    -    Abstract class representing a distance.
    -
    -    Subclasses must implement the method `get_distances`
    -    """
    -
    -    @abstractmethod
    -    def get_distances(
    -        self,
    -        objects: Sequence["TrackedObject"],
    -        candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -    ) -> np.ndarray:
    -        """
    -        Method that calculates the distances between new candidates and objects.
    -
    -        Parameters
    -        ----------
    -        objects : Sequence[TrackedObject]
    -            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -            candidates.
    -        candidates : Union[List[Detection], List[TrackedObject]], optional
    -            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -        Returns
    -        -------
    -        np.ndarray
    -            A matrix containing the distances between objects and candidates.
    -        """
    -
    -
    - - - -
    - - - - - - - - - - -
    - - - -

    - get_distances(objects, candidates) - - - abstractmethod - - -#

    - - -
    - -

    Method that calculates the distances between new candidates and objects.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    objects - Sequence[TrackedObject] - -
    -

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject -candidates.

    -
    -
    - required -
    candidates - Union[List[Detection], List[TrackedObject]] - -
    -

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    -
    -
    - required -
    - - - -

    Returns:

    - - - - - - - - - - - - - -
    TypeDescription
    - ndarray - -
    -

    A matrix containing the distances between objects and candidates.

    -
    -
    - -
    - Source code in norfair/distances.py -
    21
    -22
    -23
    -24
    -25
    -26
    -27
    -28
    -29
    -30
    -31
    -32
    -33
    -34
    -35
    -36
    -37
    -38
    -39
    -40
    -41
    -42
    @abstractmethod
    -def get_distances(
    -    self,
    -    objects: Sequence["TrackedObject"],
    -    candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -) -> np.ndarray:
    -    """
    -    Method that calculates the distances between new candidates and objects.
    -
    -    Parameters
    -    ----------
    -    objects : Sequence[TrackedObject]
    -        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -        candidates.
    -    candidates : Union[List[Detection], List[TrackedObject]], optional
    -        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -    Returns
    -    -------
    -    np.ndarray
    -        A matrix containing the distances between objects and candidates.
    -    """
    -
    -
    -
    - -
    - - - -
    - -
    - - - - -
    - - - -

    - ScalarDistance - - -#

    - - -
    -

    - Bases: Distance

    - - -

    ScalarDistance class represents a distance that is calculated pointwise.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    distance_function - Union[Callable[[Detection, TrackedObject], float], Callable[[TrackedObject, TrackedObject], float]] - -
    -

    Distance function used to determine the pointwise distance between new candidates and objects. -This function should take 2 input arguments, the first being a Union[Detection, TrackedObject], -and the second TrackedObject. It has to return a float with the distance it calculates.

    -
    -
    - required -
    - -
    - Source code in norfair/distances.py -
     45
    - 46
    - 47
    - 48
    - 49
    - 50
    - 51
    - 52
    - 53
    - 54
    - 55
    - 56
    - 57
    - 58
    - 59
    - 60
    - 61
    - 62
    - 63
    - 64
    - 65
    - 66
    - 67
    - 68
    - 69
    - 70
    - 71
    - 72
    - 73
    - 74
    - 75
    - 76
    - 77
    - 78
    - 79
    - 80
    - 81
    - 82
    - 83
    - 84
    - 85
    - 86
    - 87
    - 88
    - 89
    - 90
    - 91
    - 92
    - 93
    - 94
    - 95
    - 96
    - 97
    - 98
    - 99
    -100
    -101
    -102
    class ScalarDistance(Distance):
    -    """
    -    ScalarDistance class represents a distance that is calculated pointwise.
    -
    -    Parameters
    -    ----------
    -    distance_function : Union[Callable[["Detection", "TrackedObject"], float], Callable[["TrackedObject", "TrackedObject"], float]]
    -        Distance function used to determine the pointwise distance between new candidates and objects.
    -        This function should take 2 input arguments, the first being a `Union[Detection, TrackedObject]`,
    -        and the second [TrackedObject][norfair.tracker.TrackedObject]. It has to return a `float` with the distance it calculates.
    -    """
    -
    -    def __init__(
    -        self,
    -        distance_function: Union[
    -            Callable[["Detection", "TrackedObject"], float],
    -            Callable[["TrackedObject", "TrackedObject"], float],
    -        ],
    -    ):
    -        self.distance_function = distance_function
    -
    -    def get_distances(
    -        self,
    -        objects: Sequence["TrackedObject"],
    -        candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -    ) -> np.ndarray:
    -        """
    -        Method that calculates the distances between new candidates and objects.
    -
    -        Parameters
    -        ----------
    -        objects : Sequence[TrackedObject]
    -            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -            candidates.
    -        candidates : Union[List[Detection], List[TrackedObject]], optional
    -            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -        Returns
    -        -------
    -        np.ndarray
    -            A matrix containing the distances between objects and candidates.
    -        """
    -        distance_matrix = np.full(
    -            (len(candidates), len(objects)),
    -            fill_value=np.inf,
    -            dtype=np.float32,
    -        )
    -        if not objects or not candidates:
    -            return distance_matrix
    -        for c, candidate in enumerate(candidates):
    -            for o, obj in enumerate(objects):
    -                if candidate.label != obj.label:
    -                    if (candidate.label is None) or (obj.label is None):
    -                        print("\nThere are detections with and without label!")
    -                    continue
    -                distance = self.distance_function(candidate, obj)
    -                distance_matrix[c, o] = distance
    -        return distance_matrix
    -
    -
    - - - -
    - - - - - - - - - - -
    - - - -

    - get_distances(objects, candidates) - -#

    - - -
    - -

    Method that calculates the distances between new candidates and objects.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    objects - Sequence[TrackedObject] - -
    -

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject -candidates.

    -
    -
    - required -
    candidates - Union[List[Detection], List[TrackedObject]] - -
    -

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    -
    -
    - required -
    - - - -

    Returns:

    - - - - - - - - - - - - - -
    TypeDescription
    - ndarray - -
    -

    A matrix containing the distances between objects and candidates.

    -
    -
    - -
    - Source code in norfair/distances.py -
     66
    - 67
    - 68
    - 69
    - 70
    - 71
    - 72
    - 73
    - 74
    - 75
    - 76
    - 77
    - 78
    - 79
    - 80
    - 81
    - 82
    - 83
    - 84
    - 85
    - 86
    - 87
    - 88
    - 89
    - 90
    - 91
    - 92
    - 93
    - 94
    - 95
    - 96
    - 97
    - 98
    - 99
    -100
    -101
    -102
    def get_distances(
    -    self,
    -    objects: Sequence["TrackedObject"],
    -    candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -) -> np.ndarray:
    -    """
    -    Method that calculates the distances between new candidates and objects.
    -
    -    Parameters
    -    ----------
    -    objects : Sequence[TrackedObject]
    -        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -        candidates.
    -    candidates : Union[List[Detection], List[TrackedObject]], optional
    -        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -    Returns
    -    -------
    -    np.ndarray
    -        A matrix containing the distances between objects and candidates.
    -    """
    -    distance_matrix = np.full(
    -        (len(candidates), len(objects)),
    -        fill_value=np.inf,
    -        dtype=np.float32,
    -    )
    -    if not objects or not candidates:
    -        return distance_matrix
    -    for c, candidate in enumerate(candidates):
    -        for o, obj in enumerate(objects):
    -            if candidate.label != obj.label:
    -                if (candidate.label is None) or (obj.label is None):
    -                    print("\nThere are detections with and without label!")
    -                continue
    -            distance = self.distance_function(candidate, obj)
    -            distance_matrix[c, o] = distance
    -    return distance_matrix
    -
    -
    -
    - -
    - - - -
    - -
    - - -
    - -
    - - - -

    - VectorizedDistance - - -#

    - - -
    -

    - Bases: Distance

    - - -

    VectorizedDistance class represents a distance that is calculated in a vectorized way. This means -that instead of going through every pair and explicitly calculating its distance, VectorizedDistance -uses the entire vectors to compare to each other in a single operation.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    distance_function - Callable[[ndarray, ndarray], ndarray] - -
    -

    Distance function used to determine the distances between new candidates and objects. -This function should take 2 input arguments, the first being a np.ndarray and the second -np.ndarray. It has to return a np.ndarray with the distance matrix it calculates.

    -
    -
    - required -
    - -
    - Source code in norfair/distances.py -
    105
    -106
    -107
    -108
    -109
    -110
    -111
    -112
    -113
    -114
    -115
    -116
    -117
    -118
    -119
    -120
    -121
    -122
    -123
    -124
    -125
    -126
    -127
    -128
    -129
    -130
    -131
    -132
    -133
    -134
    -135
    -136
    -137
    -138
    -139
    -140
    -141
    -142
    -143
    -144
    -145
    -146
    -147
    -148
    -149
    -150
    -151
    -152
    -153
    -154
    -155
    -156
    -157
    -158
    -159
    -160
    -161
    -162
    -163
    -164
    -165
    -166
    -167
    -168
    -169
    -170
    -171
    -172
    -173
    -174
    -175
    -176
    -177
    -178
    -179
    -180
    -181
    -182
    -183
    -184
    -185
    -186
    -187
    -188
    -189
    -190
    -191
    -192
    -193
    -194
    -195
    -196
    -197
    -198
    -199
    -200
    -201
    -202
    -203
    -204
    -205
    -206
    -207
    class VectorizedDistance(Distance):
    -    """
    -    VectorizedDistance class represents a distance that is calculated in a vectorized way. This means
    -    that instead of going through every pair and explicitly calculating its distance, VectorizedDistance
    -    uses the entire vectors to compare to each other in a single operation.
    -
    -    Parameters
    -    ----------
    -    distance_function : Callable[[np.ndarray, np.ndarray], np.ndarray]
    -        Distance function used to determine the distances between new candidates and objects.
    -        This function should take 2 input arguments, the first being a `np.ndarray` and the second
    -        `np.ndarray`. It has to return a `np.ndarray` with the distance matrix it calculates.
    -    """
    -
    -    def __init__(
    -        self,
    -        distance_function: Callable[[np.ndarray, np.ndarray], np.ndarray],
    -    ):
    -        self.distance_function = distance_function
    -
    -    def get_distances(
    -        self,
    -        objects: Sequence["TrackedObject"],
    -        candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -    ) -> np.ndarray:
    -        """
    -        Method that calculates the distances between new candidates and objects.
    -
    -        Parameters
    -        ----------
    -        objects : Sequence[TrackedObject]
    -            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -            candidates.
    -        candidates : Union[List[Detection], List[TrackedObject]], optional
    -            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -        Returns
    -        -------
    -        np.ndarray
    -            A matrix containing the distances between objects and candidates.
    -        """
    -        distance_matrix = np.full(
    -            (len(candidates), len(objects)),
    -            fill_value=np.inf,
    -            dtype=np.float32,
    -        )
    -        if not objects or not candidates:
    -            return distance_matrix
    -
    -        object_labels = np.array([o.label for o in objects]).astype(str)
    -        candidate_labels = np.array([c.label for c in candidates]).astype(str)
    -
    -        # iterate over labels that are present both in objects and detections
    -        for label in np.intersect1d(
    -            np.unique(object_labels), np.unique(candidate_labels)
    -        ):
    -            # generate masks of the subset of object and detections for this label
    -            obj_mask = object_labels == label
    -            cand_mask = candidate_labels == label
    -
    -            stacked_objects = []
    -            for o in objects:
    -                if str(o.label) == label:
    -                    stacked_objects.append(o.estimate.ravel())
    -            stacked_objects = np.stack(stacked_objects)
    -
    -            stacked_candidates = []
    -            for c in candidates:
    -                if str(c.label) == label:
    -                    if "Detection" in str(type(c)):
    -                        stacked_candidates.append(c.points.ravel())
    -                    else:
    -                        stacked_candidates.append(c.estimate.ravel())
    -            stacked_candidates = np.stack(stacked_candidates)
    -
    -            # calculate the pairwise distances between objects and candidates with this label
    -            # and assign the result to the correct positions inside distance_matrix
    -            distance_matrix[np.ix_(cand_mask, obj_mask)] = self._compute_distance(
    -                stacked_candidates, stacked_objects
    -            )
    -
    -        return distance_matrix
    -
    -    def _compute_distance(
    -        self, stacked_candidates: np.ndarray, stacked_objects: np.ndarray
    -    ) -> np.ndarray:
    -        """
    -        Method that computes the pairwise distances between new candidates and objects.
    -        It is intended to use the entire vectors to compare to each other in a single operation.
    -
    -        Parameters
    -        ----------
    -        stacked_candidates : np.ndarray
    -            np.ndarray containing a stack of candidates to be compared with the stacked_objects.
    -        stacked_objects : np.ndarray
    -            np.ndarray containing a stack of objects to be compared with the stacked_objects.
    -
    -        Returns
    -        -------
    -        np.ndarray
    -            A matrix containing the distances between objects and candidates.
    -        """
    -        return self.distance_function(stacked_candidates, stacked_objects)
    -
    -
    - - - -
    - - - - - - - - - - -
    - - - -

    - get_distances(objects, candidates) - -#

    - - -
    - -

    Method that calculates the distances between new candidates and objects.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    objects - Sequence[TrackedObject] - -
    -

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject -candidates.

    -
    -
    - required -
    candidates - Union[List[Detection], List[TrackedObject]] - -
    -

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    -
    -
    - required -
    - - - -

    Returns:

    - - - - - - - - - - - - - -
    TypeDescription
    - ndarray - -
    -

    A matrix containing the distances between objects and candidates.

    -
    -
    - -
    - Source code in norfair/distances.py -
    125
    -126
    -127
    -128
    -129
    -130
    -131
    -132
    -133
    -134
    -135
    -136
    -137
    -138
    -139
    -140
    -141
    -142
    -143
    -144
    -145
    -146
    -147
    -148
    -149
    -150
    -151
    -152
    -153
    -154
    -155
    -156
    -157
    -158
    -159
    -160
    -161
    -162
    -163
    -164
    -165
    -166
    -167
    -168
    -169
    -170
    -171
    -172
    -173
    -174
    -175
    -176
    -177
    -178
    -179
    -180
    -181
    -182
    -183
    -184
    -185
    -186
    def get_distances(
    -    self,
    -    objects: Sequence["TrackedObject"],
    -    candidates: Optional[Union[List["Detection"], List["TrackedObject"]]],
    -) -> np.ndarray:
    -    """
    -    Method that calculates the distances between new candidates and objects.
    -
    -    Parameters
    -    ----------
    -    objects : Sequence[TrackedObject]
    -        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]
    -        candidates.
    -    candidates : Union[List[Detection], List[TrackedObject]], optional
    -        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].
    -
    -    Returns
    -    -------
    -    np.ndarray
    -        A matrix containing the distances between objects and candidates.
    -    """
    -    distance_matrix = np.full(
    -        (len(candidates), len(objects)),
    -        fill_value=np.inf,
    -        dtype=np.float32,
    -    )
    -    if not objects or not candidates:
    -        return distance_matrix
    -
    -    object_labels = np.array([o.label for o in objects]).astype(str)
    -    candidate_labels = np.array([c.label for c in candidates]).astype(str)
    -
    -    # iterate over labels that are present both in objects and detections
    -    for label in np.intersect1d(
    -        np.unique(object_labels), np.unique(candidate_labels)
    -    ):
    -        # generate masks of the subset of object and detections for this label
    -        obj_mask = object_labels == label
    -        cand_mask = candidate_labels == label
    -
    -        stacked_objects = []
    -        for o in objects:
    -            if str(o.label) == label:
    -                stacked_objects.append(o.estimate.ravel())
    -        stacked_objects = np.stack(stacked_objects)
    -
    -        stacked_candidates = []
    -        for c in candidates:
    -            if str(c.label) == label:
    -                if "Detection" in str(type(c)):
    -                    stacked_candidates.append(c.points.ravel())
    -                else:
    -                    stacked_candidates.append(c.estimate.ravel())
    -        stacked_candidates = np.stack(stacked_candidates)
    -
    -        # calculate the pairwise distances between objects and candidates with this label
    -        # and assign the result to the correct positions inside distance_matrix
    -        distance_matrix[np.ix_(cand_mask, obj_mask)] = self._compute_distance(
    -            stacked_candidates, stacked_objects
    -        )
    -
    -    return distance_matrix
    -
    -
    -
    - -
    - - - -
    - -
    - - -
    - -
    - - - -

    - ScipyDistance - - -#

    - - -
    -

    - Bases: VectorizedDistance

    - - -

    ScipyDistance class extends VectorizedDistance for the use of Scipy's vectorized distances.

    -

    This class uses scipy.spatial.distance.cdist to calculate distances between two np.ndarray.

    - - - -

    Parameters:

    - - - - - - - - - - - - - - - - - - - - - - - -
    NameTypeDescriptionDefault
    metric - str - -
    -

    Defines the specific Scipy metric to use to calculate the pairwise distances between -new candidates and objects.

    -
    -
    - 'euclidean' -
    Other - -
    - -
    -
    - required -
    - -
    - See Also -

    scipy.spatial.distance.cdist

    -
    -
    - Source code in norfair/distances.py -
    210
    -211
    -212
    -213
    -214
    -215
    -216
    -217
    -218
    -219
    -220
    -221
    -222
    -223
    -224
    -225
    -226
    -227
    -228
    -229
    -230
    -231
    class ScipyDistance(VectorizedDistance):
    -    """
    -    ScipyDistance class extends VectorizedDistance for the use of Scipy's vectorized distances.
    -
    -    This class uses `scipy.spatial.distance.cdist` to calculate distances between two `np.ndarray`.
    -
    -    Parameters
    -    ----------
    -    metric : str, optional
    -        Defines the specific Scipy metric to use to calculate the pairwise distances between
    -        new candidates and objects.
    -
    -    Other kwargs are passed through to cdist
    -
    -    See Also
    -    --------
    -    [`scipy.spatial.distance.cdist`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
    -    """
    -
    -    def __init__(self, metric: str = "euclidean", **kwargs):
    -        self.metric = metric
    -        super().__init__(distance_function=partial(cdist, metric=self.metric, **kwargs))
    -
    -
    - - - -
    - - - - - - - - - - - -
    - -
    - - -
    -
    @@ -2866,7 +1473,7 @@

    - Distance + Distance
    diff --git a/dev/reference/index.html b/dev/reference/index.html index 9a90aa76..21f140b1 100644 --- a/dev/reference/index.html +++ b/dev/reference/index.html @@ -418,6 +418,204 @@ +

  • + +
  • + + + Color + + + +
  • + +
  • + + + Palette + + + + + +
  • + +
  • + + + Drawable + + + +
  • + +
  • + + + FixedCamera + + + + + +
  • + +
  • + + + AbsolutePaths + + + +
  • + +
  • + + + Paths + + + + + +
  • + +
  • + + + frobenius + + + +
  • + +
  • + + + mean_euclidean + + + +
  • + +
  • + + + mean_manhattan + + + +
  • + +
  • + + + iou + + + +
  • + +
  • + + + get_distance_by_name + + + +
  • + +
  • + + + create_keypoints_voting_distance + + + +
  • + +
  • + + + create_normalized_mean_euclidean_distance + + + +
  • + +
  • + + + draw_absolute_grid + + + +
  • + +
  • + + + draw_tracked_boxes + + + +
  • + +
  • + + + draw_tracked_objects + + +
  • @@ -618,59 +816,4460 @@ - +
  • + + + Color + + - - - - - - - -
    -
    - - - - -

    Reference#

    - - -
    - - - - -
    +
  • + +
  • + + + Palette + + -

    A customizable lightweight Python library for real-time multi-object tracking.

    - - - -

    Examples:

    -
    >>> from norfair import Detection, Tracker, Video, draw_tracked_objects
    ->>> detector = MyDetector()  # Set up a detector
    ->>> video = Video(input_path="video.mp4")
    ->>> tracker = Tracker(distance_function="euclidean", distance_threshold=50)
    ->>> for frame in video:
    ->>>    detections = detector(frame)
    ->>>    norfair_detections = [Detection(points) for points in detections]
    ->>>    tracked_objects = tracker.update(detections=norfair_detections)
    ->>>    draw_tracked_objects(frame, tracked_objects)
    ->>>    video.write(frame)
    -
    - + + +
  • + +
  • + + + Drawable + + + +
  • + +
  • + + + FixedCamera + + + + + +
  • + +
  • + + + AbsolutePaths + + + +
  • + +
  • + + + Paths + + + + + +
  • + +
  • + + + frobenius + + + +
  • + +
  • + + + mean_euclidean + + + +
  • + +
  • + + + mean_manhattan + + + +
  • + +
  • + + + iou + + + +
  • + +
  • + + + get_distance_by_name + + + +
  • + +
  • + + + create_keypoints_voting_distance + + + +
  • + +
  • + + + create_normalized_mean_euclidean_distance + + + +
  • + +
  • + + + draw_absolute_grid + + + +
  • + +
  • + + + draw_tracked_boxes + + + +
  • + +
  • + + + draw_tracked_objects + + + +
  • + + + + + + + + + + +
    +
    + + + + +

    Reference#

    + + +
    + + + + +
    + +

    A customizable lightweight Python library for real-time multi-object tracking.

    + + + +

    Examples:

    +
    >>> from norfair import Detection, Tracker, Video, draw_tracked_objects
    +>>> detector = MyDetector()  # Set up a detector
    +>>> video = Video(input_path="video.mp4")
    +>>> tracker = Tracker(distance_function="euclidean", distance_threshold=50)
    +>>> for frame in video:
    +>>>    detections = detector(frame)
    +>>>    norfair_detections = [Detection(points) for points in detections]
    +>>>    tracked_objects = tracker.update(detections=norfair_detections)
    +>>>    draw_tracked_objects(frame, tracked_objects)
    +>>>    video.write(frame)
    +
    + + + +
    + + + + + + + + +
    + + + +

    + Color + + +#

    + + +
    + + +

    Contains predefined colors.

    +

    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR +This is the format opencv uses.

    + +
    + Source code in norfair/drawing/color.py +
     44
    + 45
    + 46
    + 47
    + 48
    + 49
    + 50
    + 51
    + 52
    + 53
    + 54
    + 55
    + 56
    + 57
    + 58
    + 59
    + 60
    + 61
    + 62
    + 63
    + 64
    + 65
    + 66
    + 67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    +117
    +118
    +119
    +120
    +121
    +122
    +123
    +124
    +125
    +126
    +127
    +128
    +129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    +141
    +142
    +143
    +144
    +145
    +146
    +147
    +148
    +149
    +150
    +151
    +152
    +153
    +154
    +155
    +156
    +157
    +158
    +159
    +160
    +161
    +162
    +163
    +164
    +165
    +166
    +167
    +168
    +169
    +170
    +171
    +172
    +173
    +174
    +175
    +176
    +177
    +178
    +179
    +180
    +181
    +182
    +183
    +184
    +185
    +186
    +187
    +188
    +189
    +190
    +191
    +192
    +193
    +194
    +195
    +196
    +197
    +198
    +199
    +200
    +201
    +202
    +203
    +204
    +205
    +206
    +207
    +208
    +209
    +210
    +211
    +212
    +213
    +214
    +215
    +216
    +217
    +218
    +219
    +220
    +221
    +222
    +223
    +224
    +225
    +226
    +227
    +228
    +229
    +230
    +231
    +232
    +233
    class Color:
    +    """
    +    Contains predefined colors.
    +
    +    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR
    +    This is the format opencv uses.
    +    """
    +
    +    # from PIL.ImageColors.colormap
    +    aliceblue = hex_to_bgr("#f0f8ff")
    +    antiquewhite = hex_to_bgr("#faebd7")
    +    aqua = hex_to_bgr("#00ffff")
    +    aquamarine = hex_to_bgr("#7fffd4")
    +    azure = hex_to_bgr("#f0ffff")
    +    beige = hex_to_bgr("#f5f5dc")
    +    bisque = hex_to_bgr("#ffe4c4")
    +    black = hex_to_bgr("#000000")
    +    blanchedalmond = hex_to_bgr("#ffebcd")
    +    blue = hex_to_bgr("#0000ff")
    +    blueviolet = hex_to_bgr("#8a2be2")
    +    brown = hex_to_bgr("#a52a2a")
    +    burlywood = hex_to_bgr("#deb887")
    +    cadetblue = hex_to_bgr("#5f9ea0")
    +    chartreuse = hex_to_bgr("#7fff00")
    +    chocolate = hex_to_bgr("#d2691e")
    +    coral = hex_to_bgr("#ff7f50")
    +    cornflowerblue = hex_to_bgr("#6495ed")
    +    cornsilk = hex_to_bgr("#fff8dc")
    +    crimson = hex_to_bgr("#dc143c")
    +    cyan = hex_to_bgr("#00ffff")
    +    darkblue = hex_to_bgr("#00008b")
    +    darkcyan = hex_to_bgr("#008b8b")
    +    darkgoldenrod = hex_to_bgr("#b8860b")
    +    darkgray = hex_to_bgr("#a9a9a9")
    +    darkgrey = hex_to_bgr("#a9a9a9")
    +    darkgreen = hex_to_bgr("#006400")
    +    darkkhaki = hex_to_bgr("#bdb76b")
    +    darkmagenta = hex_to_bgr("#8b008b")
    +    darkolivegreen = hex_to_bgr("#556b2f")
    +    darkorange = hex_to_bgr("#ff8c00")
    +    darkorchid = hex_to_bgr("#9932cc")
    +    darkred = hex_to_bgr("#8b0000")
    +    darksalmon = hex_to_bgr("#e9967a")
    +    darkseagreen = hex_to_bgr("#8fbc8f")
    +    darkslateblue = hex_to_bgr("#483d8b")
    +    darkslategray = hex_to_bgr("#2f4f4f")
    +    darkslategrey = hex_to_bgr("#2f4f4f")
    +    darkturquoise = hex_to_bgr("#00ced1")
    +    darkviolet = hex_to_bgr("#9400d3")
    +    deeppink = hex_to_bgr("#ff1493")
    +    deepskyblue = hex_to_bgr("#00bfff")
    +    dimgray = hex_to_bgr("#696969")
    +    dimgrey = hex_to_bgr("#696969")
    +    dodgerblue = hex_to_bgr("#1e90ff")
    +    firebrick = hex_to_bgr("#b22222")
    +    floralwhite = hex_to_bgr("#fffaf0")
    +    forestgreen = hex_to_bgr("#228b22")
    +    fuchsia = hex_to_bgr("#ff00ff")
    +    gainsboro = hex_to_bgr("#dcdcdc")
    +    ghostwhite = hex_to_bgr("#f8f8ff")
    +    gold = hex_to_bgr("#ffd700")
    +    goldenrod = hex_to_bgr("#daa520")
    +    gray = hex_to_bgr("#808080")
    +    grey = hex_to_bgr("#808080")
    +    green = (0, 128, 0)
    +    greenyellow = hex_to_bgr("#adff2f")
    +    honeydew = hex_to_bgr("#f0fff0")
    +    hotpink = hex_to_bgr("#ff69b4")
    +    indianred = hex_to_bgr("#cd5c5c")
    +    indigo = hex_to_bgr("#4b0082")
    +    ivory = hex_to_bgr("#fffff0")
    +    khaki = hex_to_bgr("#f0e68c")
    +    lavender = hex_to_bgr("#e6e6fa")
    +    lavenderblush = hex_to_bgr("#fff0f5")
    +    lawngreen = hex_to_bgr("#7cfc00")
    +    lemonchiffon = hex_to_bgr("#fffacd")
    +    lightblue = hex_to_bgr("#add8e6")
    +    lightcoral = hex_to_bgr("#f08080")
    +    lightcyan = hex_to_bgr("#e0ffff")
    +    lightgoldenrodyellow = hex_to_bgr("#fafad2")
    +    lightgreen = hex_to_bgr("#90ee90")
    +    lightgray = hex_to_bgr("#d3d3d3")
    +    lightgrey = hex_to_bgr("#d3d3d3")
    +    lightpink = hex_to_bgr("#ffb6c1")
    +    lightsalmon = hex_to_bgr("#ffa07a")
    +    lightseagreen = hex_to_bgr("#20b2aa")
    +    lightskyblue = hex_to_bgr("#87cefa")
    +    lightslategray = hex_to_bgr("#778899")
    +    lightslategrey = hex_to_bgr("#778899")
    +    lightsteelblue = hex_to_bgr("#b0c4de")
    +    lightyellow = hex_to_bgr("#ffffe0")
    +    lime = hex_to_bgr("#00ff00")
    +    limegreen = hex_to_bgr("#32cd32")
    +    linen = hex_to_bgr("#faf0e6")
    +    magenta = hex_to_bgr("#ff00ff")
    +    maroon = hex_to_bgr("#800000")
    +    mediumaquamarine = hex_to_bgr("#66cdaa")
    +    mediumblue = hex_to_bgr("#0000cd")
    +    mediumorchid = hex_to_bgr("#ba55d3")
    +    mediumpurple = hex_to_bgr("#9370db")
    +    mediumseagreen = hex_to_bgr("#3cb371")
    +    mediumslateblue = hex_to_bgr("#7b68ee")
    +    mediumspringgreen = hex_to_bgr("#00fa9a")
    +    mediumturquoise = hex_to_bgr("#48d1cc")
    +    mediumvioletred = hex_to_bgr("#c71585")
    +    midnightblue = hex_to_bgr("#191970")
    +    mintcream = hex_to_bgr("#f5fffa")
    +    mistyrose = hex_to_bgr("#ffe4e1")
    +    moccasin = hex_to_bgr("#ffe4b5")
    +    navajowhite = hex_to_bgr("#ffdead")
    +    navy = hex_to_bgr("#000080")
    +    oldlace = hex_to_bgr("#fdf5e6")
    +    olive = hex_to_bgr("#808000")
    +    olivedrab = hex_to_bgr("#6b8e23")
    +    orange = hex_to_bgr("#ffa500")
    +    orangered = hex_to_bgr("#ff4500")
    +    orchid = hex_to_bgr("#da70d6")
    +    palegoldenrod = hex_to_bgr("#eee8aa")
    +    palegreen = hex_to_bgr("#98fb98")
    +    paleturquoise = hex_to_bgr("#afeeee")
    +    palevioletred = hex_to_bgr("#db7093")
    +    papayawhip = hex_to_bgr("#ffefd5")
    +    peachpuff = hex_to_bgr("#ffdab9")
    +    peru = hex_to_bgr("#cd853f")
    +    pink = hex_to_bgr("#ffc0cb")
    +    plum = hex_to_bgr("#dda0dd")
    +    powderblue = hex_to_bgr("#b0e0e6")
    +    purple = hex_to_bgr("#800080")
    +    rebeccapurple = hex_to_bgr("#663399")
    +    red = hex_to_bgr("#ff0000")
    +    rosybrown = hex_to_bgr("#bc8f8f")
    +    royalblue = hex_to_bgr("#4169e1")
    +    saddlebrown = hex_to_bgr("#8b4513")
    +    salmon = hex_to_bgr("#fa8072")
    +    sandybrown = hex_to_bgr("#f4a460")
    +    seagreen = hex_to_bgr("#2e8b57")
    +    seashell = hex_to_bgr("#fff5ee")
    +    sienna = hex_to_bgr("#a0522d")
    +    silver = hex_to_bgr("#c0c0c0")
    +    skyblue = hex_to_bgr("#87ceeb")
    +    slateblue = hex_to_bgr("#6a5acd")
    +    slategray = hex_to_bgr("#708090")
    +    slategrey = hex_to_bgr("#708090")
    +    snow = hex_to_bgr("#fffafa")
    +    springgreen = hex_to_bgr("#00ff7f")
    +    steelblue = hex_to_bgr("#4682b4")
    +    tan = hex_to_bgr("#d2b48c")
    +    teal = hex_to_bgr("#008080")
    +    thistle = hex_to_bgr("#d8bfd8")
    +    tomato = hex_to_bgr("#ff6347")
    +    turquoise = hex_to_bgr("#40e0d0")
    +    violet = hex_to_bgr("#ee82ee")
    +    wheat = hex_to_bgr("#f5deb3")
    +    white = hex_to_bgr("#ffffff")
    +    whitesmoke = hex_to_bgr("#f5f5f5")
    +    yellow = hex_to_bgr("#ffff00")
    +    yellowgreen = hex_to_bgr("#9acd32")
    +
    +    # seaborn tab20 colors
    +    tab1 = hex_to_bgr("#1f77b4")
    +    tab2 = hex_to_bgr("#aec7e8")
    +    tab3 = hex_to_bgr("#ff7f0e")
    +    tab4 = hex_to_bgr("#ffbb78")
    +    tab5 = hex_to_bgr("#2ca02c")
    +    tab6 = hex_to_bgr("#98df8a")
    +    tab7 = hex_to_bgr("#d62728")
    +    tab8 = hex_to_bgr("#ff9896")
    +    tab9 = hex_to_bgr("#9467bd")
    +    tab10 = hex_to_bgr("#c5b0d5")
    +    tab11 = hex_to_bgr("#8c564b")
    +    tab12 = hex_to_bgr("#c49c94")
    +    tab13 = hex_to_bgr("#e377c2")
    +    tab14 = hex_to_bgr("#f7b6d2")
    +    tab15 = hex_to_bgr("#7f7f7f")
    +    tab16 = hex_to_bgr("#c7c7c7")
    +    tab17 = hex_to_bgr("#bcbd22")
    +    tab18 = hex_to_bgr("#dbdb8d")
    +    tab19 = hex_to_bgr("#17becf")
    +    tab20 = hex_to_bgr("#9edae5")
    +    # seaborn colorblind
    +    cb1 = hex_to_bgr("#0173b2")
    +    cb2 = hex_to_bgr("#de8f05")
    +    cb3 = hex_to_bgr("#029e73")
    +    cb4 = hex_to_bgr("#d55e00")
    +    cb5 = hex_to_bgr("#cc78bc")
    +    cb6 = hex_to_bgr("#ca9161")
    +    cb7 = hex_to_bgr("#fbafe4")
    +    cb8 = hex_to_bgr("#949494")
    +    cb9 = hex_to_bgr("#ece133")
    +    cb10 = hex_to_bgr("#56b4e9")
    +
    +
    + + + +
    + + + + + + + + + + + +
    + +
    + + +
    + +
    + + + +

    + Palette + + +#

    + + +
    + + +

    Class to control the color pallete for drawing.

    + + + +

    Examples:

    +

    Change palette:

    +
    >>> from norfair import Palette
    +>>> Palette.set("colorblind")
    +>>> # or a custom palette
    +>>> from norfair import Color
    +>>> Palette.set([Color.red, Color.blue, "#ffeeff"])
    +
    + +
    + Source code in norfair/drawing/color.py +
    312
    +313
    +314
    +315
    +316
    +317
    +318
    +319
    +320
    +321
    +322
    +323
    +324
    +325
    +326
    +327
    +328
    +329
    +330
    +331
    +332
    +333
    +334
    +335
    +336
    +337
    +338
    +339
    +340
    +341
    +342
    +343
    +344
    +345
    +346
    +347
    +348
    +349
    +350
    +351
    +352
    +353
    +354
    +355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    +364
    +365
    +366
    +367
    +368
    +369
    +370
    +371
    class Palette:
    +    """
    +    Class to control the color pallete for drawing.
    +
    +    Examples
    +    --------
    +    Change palette:
    +    >>> from norfair import Palette
    +    >>> Palette.set("colorblind")
    +    >>> # or a custom palette
    +    >>> from norfair import Color
    +    >>> Palette.set([Color.red, Color.blue, "#ffeeff"])
    +    """
    +
    +    _colors = PALETTES["tab10"]
    +    _default_color = Color.black
    +
    +    @classmethod
    +    def set(cls, palette: Union[str, Iterable[ColorLike]]):
    +        """
    +        Selects a color palette.
    +
    +        Parameters
    +        ----------
    +        palette : Union[str, Iterable[ColorLike]]
    +            can be either
    +            - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`
    +            - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]
    +        """
    +        if isinstance(palette, str):
    +            try:
    +                cls._colors = PALETTES[palette]
    +            except KeyError as e:
    +                raise ValueError(
    +                    f"Invalid palette name '{palette}', valid values are {PALETTES.keys()}"
    +                ) from e
    +        else:
    +            colors = []
    +            for c in palette:
    +                colors.append(parse_color(c))
    +
    +            cls._colors = colors
    +
    +    @classmethod
    +    def set_default_color(cls, color: ColorLike):
    +        """
    +        Selects the default color of `choose_color` when hashable is None.
    +
    +        Parameters
    +        ----------
    +        color : ColorLike
    +            The new default color.
    +        """
    +        cls._default_color = parse_color(color)
    +
    +    @classmethod
    +    def choose_color(cls, hashable: Hashable) -> ColorType:
    +        if hashable is None:
    +            return cls._default_color
    +        return cls._colors[abs(hash(hashable)) % len(cls._colors)]
    +
    +
    + + + +
    + + + + + + + + + + +
    + + + +

    + set(palette) + + + classmethod + + +#

    + + +
    + +

    Selects a color palette.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    palette + Union[str, Iterable[ColorLike]] + +
    +

    can be either +- the name of one of the predefined palettes tab10, tab20, or colorblind +- a list of ColorLike objects that can be parsed by parse_color

    +
    +
    + required +
    + +
    + Source code in norfair/drawing/color.py +
    329
    +330
    +331
    +332
    +333
    +334
    +335
    +336
    +337
    +338
    +339
    +340
    +341
    +342
    +343
    +344
    +345
    +346
    +347
    +348
    +349
    +350
    +351
    +352
    +353
    @classmethod
    +def set(cls, palette: Union[str, Iterable[ColorLike]]):
    +    """
    +    Selects a color palette.
    +
    +    Parameters
    +    ----------
    +    palette : Union[str, Iterable[ColorLike]]
    +        can be either
    +        - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`
    +        - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]
    +    """
    +    if isinstance(palette, str):
    +        try:
    +            cls._colors = PALETTES[palette]
    +        except KeyError as e:
    +            raise ValueError(
    +                f"Invalid palette name '{palette}', valid values are {PALETTES.keys()}"
    +            ) from e
    +    else:
    +        colors = []
    +        for c in palette:
    +            colors.append(parse_color(c))
    +
    +        cls._colors = colors
    +
    +
    +
    + +
    + + +
    + + + +

    + set_default_color(color) + + + classmethod + + +#

    + + +
    + +

    Selects the default color of choose_color when hashable is None.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    color + ColorLike + +
    +

    The new default color.

    +
    +
    + required +
    + +
    + Source code in norfair/drawing/color.py +
    355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    +364
    +365
    @classmethod
    +def set_default_color(cls, color: ColorLike):
    +    """
    +    Selects the default color of `choose_color` when hashable is None.
    +
    +    Parameters
    +    ----------
    +    color : ColorLike
    +        The new default color.
    +    """
    +    cls._default_color = parse_color(color)
    +
    +
    +
    + +
    + + + +
    + +
    + + +
    + +
    + + + +

    + Drawable + + +#

    + + +
    + + +

    Class to standardize Drawable objects like Detections and TrackedObjects

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    obj + Union[Detection, TrackedObject] + +
    +

    A Detection or a TrackedObject +that will be used to initialized the drawable. +If this parameter is passed, all other arguments are ignored

    +
    +
    + None +
    points + ndarray + +
    +

    Points included in the drawable, shape is (N_points, N_dimensions). Ignored if obj is passed

    +
    +
    + None +
    id + Any + +
    +

    Id of this object. Ignored if obj is passed

    +
    +
    + None +
    label + Any + +
    +

    Label specifying the class of the object. Ignored if obj is passed

    +
    +
    + None +
    scores + ndarray + +
    +

    Confidence scores of each point, shape is (N_points,). Ignored if obj is passed

    +
    +
    + None +
    live_points + ndarray + +
    +

    Bolean array indicating which points are alive, shape is (N_points,). Ignored if obj is passed

    +
    +
    + None +
    + + + +

    Raises:

    + + + + + + + + + + + + + +
    TypeDescription
    + ValueError + +
    +

    If obj is not an instance of the supported classes.

    +
    +
    + +
    + Source code in norfair/drawing/drawer.py +
    301
    +302
    +303
    +304
    +305
    +306
    +307
    +308
    +309
    +310
    +311
    +312
    +313
    +314
    +315
    +316
    +317
    +318
    +319
    +320
    +321
    +322
    +323
    +324
    +325
    +326
    +327
    +328
    +329
    +330
    +331
    +332
    +333
    +334
    +335
    +336
    +337
    +338
    +339
    +340
    +341
    +342
    +343
    +344
    +345
    +346
    +347
    +348
    +349
    +350
    +351
    +352
    +353
    +354
    +355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    class Drawable:
    +    """
    +    Class to standardize Drawable objects like Detections and TrackedObjects
    +
    +    Parameters
    +    ----------
    +    obj : Union[Detection, TrackedObject], optional
    +        A [Detection][norfair.tracker.Detection] or a [TrackedObject][norfair.tracker.TrackedObject]
    +        that will be used to initialized the drawable.
    +        If this parameter is passed, all other arguments are ignored
    +    points : np.ndarray, optional
    +        Points included in the drawable, shape is `(N_points, N_dimensions)`. Ignored if `obj` is passed
    +    id : Any, optional
    +        Id of this object. Ignored if `obj` is passed
    +    label : Any, optional
    +        Label specifying the class of the object. Ignored if `obj` is passed
    +    scores : np.ndarray, optional
    +        Confidence scores of each point, shape is `(N_points,)`. Ignored if `obj` is passed
    +    live_points : np.ndarray, optional
    +        Bolean array indicating which points are alive, shape is `(N_points,)`. Ignored if `obj` is passed
    +
    +    Raises
    +    ------
    +    ValueError
    +        If obj is not an instance of the supported classes.
    +    """
    +
    +    def __init__(
    +        self,
    +        obj: Union[Detection, TrackedObject] = None,
    +        points: np.ndarray = None,
    +        id: Any = None,
    +        label: Any = None,
    +        scores: np.ndarray = None,
    +        live_points: np.ndarray = None,
    +    ) -> None:
    +        if isinstance(obj, Detection):
    +            self.points = obj.points
    +            self.id = None
    +            self.label = obj.label
    +            self.scores = obj.scores
    +            # TODO: alive points for detections could be the ones over the threshold
    +            # but that info is not available here
    +            self.live_points = np.ones(obj.points.shape[0]).astype(bool)
    +
    +        elif isinstance(obj, TrackedObject):
    +            self.points = obj.estimate
    +            self.id = obj.id
    +            self.label = obj.label
    +            # TODO: TrackedObject.scores could be an interesting thing to have
    +            # it could be the scores of the last detection or some kind of moving average
    +            self.scores = None
    +            self.live_points = obj.live_points
    +        elif obj is None:
    +            self.points = points
    +            self.id = id
    +            self.label = label
    +            self.scores = scores
    +            self.live_points = live_points
    +        else:
    +            raise ValueError(
    +                f"Extecting a Detection or a TrackedObject but received {type(obj)}"
    +            )
    +
    +
    + + + +
    + + + + + + + + + + + +
    + +
    + + +
    + +
    + + + +

    + FixedCamera + + +#

    + + +
    + + +

    Class used to stabilize video based on the camera motion.

    +

    Starts with a larger frame, where the original frame is drawn on top of a black background. +As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

    +

    Useful for debugging or demoing the camera motion. +Example GIF

    +
    +

    Warning

    +

    This only works with TranslationTransformation, +using HomographyTransformation will result in +unexpected behaviour.

    +
    +
    +

    Warning

    +

    If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

    +
    +
    +

    Note

    +

    Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. +In this case, a warning will be logged and the frames will be cropped to avoid errors.

    +
    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    scale + float + +
    +

    The resulting video will have a resolution of scale * (H, W) where HxW is the resolution of the original video. +Use a bigger scale if the camera is moving too much.

    +
    +
    + 2 +
    attenuation + float + +
    +

    Controls how fast the older frames fade to black.

    +
    +
    + 0.05 +
    + + + +

    Examples:

    +
    >>> # setup
    +>>> tracker = Tracker("frobenious", 100)
    +>>> motion_estimator = MotionEstimator()
    +>>> video = Video(input_path="video.mp4")
    +>>> fixed_camera = FixedCamera()
    +>>> # process video
    +>>> for frame in video:
    +>>>     coord_transformations = motion_estimator.update(frame)
    +>>>     detections = get_detections(frame)
    +>>>     tracked_objects = tracker.update(detections, coord_transformations)
    +>>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer
    +>>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
    +>>>     video.write(bigger_frame)
    +
    + +
    + Source code in norfair/drawing/fixed_camera.py +
      7
    +  8
    +  9
    + 10
    + 11
    + 12
    + 13
    + 14
    + 15
    + 16
    + 17
    + 18
    + 19
    + 20
    + 21
    + 22
    + 23
    + 24
    + 25
    + 26
    + 27
    + 28
    + 29
    + 30
    + 31
    + 32
    + 33
    + 34
    + 35
    + 36
    + 37
    + 38
    + 39
    + 40
    + 41
    + 42
    + 43
    + 44
    + 45
    + 46
    + 47
    + 48
    + 49
    + 50
    + 51
    + 52
    + 53
    + 54
    + 55
    + 56
    + 57
    + 58
    + 59
    + 60
    + 61
    + 62
    + 63
    + 64
    + 65
    + 66
    + 67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    +117
    +118
    +119
    +120
    +121
    +122
    +123
    +124
    +125
    +126
    +127
    +128
    +129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    +141
    class FixedCamera:
    +    """
    +    Class used to stabilize video based on the camera motion.
    +
    +    Starts with a larger frame, where the original frame is drawn on top of a black background.
    +    As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.
    +
    +    Useful for debugging or demoing the camera motion.
    +    ![Example GIF](../../videos/camera_stabilization.gif)
    +
    +    !!! Warning
    +        This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],
    +        using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in
    +        unexpected behaviour.
    +
    +    !!! Warning
    +        If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.
    +
    +    !!! Note
    +        Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.
    +        In this case, a warning will be logged and the frames will be cropped to avoid errors.
    +
    +    Parameters
    +    ----------
    +    scale : float, optional
    +        The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.
    +        Use a bigger scale if the camera is moving too much.
    +    attenuation : float, optional
    +        Controls how fast the older frames fade to black.
    +
    +    Examples
    +    --------
    +    >>> # setup
    +    >>> tracker = Tracker("frobenious", 100)
    +    >>> motion_estimator = MotionEstimator()
    +    >>> video = Video(input_path="video.mp4")
    +    >>> fixed_camera = FixedCamera()
    +    >>> # process video
    +    >>> for frame in video:
    +    >>>     coord_transformations = motion_estimator.update(frame)
    +    >>>     detections = get_detections(frame)
    +    >>>     tracked_objects = tracker.update(detections, coord_transformations)
    +    >>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer
    +    >>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)
    +    >>>     video.write(bigger_frame)
    +    """
    +
    +    def __init__(self, scale: float = 2, attenuation: float = 0.05):
    +        self.scale = scale
    +        self._background = None
    +        self._attenuation_factor = 1 - attenuation
    +
    +    def adjust_frame(
    +        self, frame: np.ndarray, coord_transformation: TranslationTransformation
    +    ) -> np.ndarray:
    +        """
    +        Render scaled up frame.
    +
    +        Parameters
    +        ----------
    +        frame : np.ndarray
    +            The OpenCV frame.
    +        coord_transformation : TranslationTransformation
    +            The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]
    +
    +        Returns
    +        -------
    +        np.ndarray
    +            The new bigger frame with the original frame drawn on it.
    +        """
    +
    +        # initialize background if necessary
    +        if self._background is None:
    +            original_size = (
    +                frame.shape[1],
    +                frame.shape[0],
    +            )  # OpenCV format is (width, height)
    +
    +            scaled_size = tuple(
    +                (np.array(original_size) * np.array(self.scale)).round().astype(int)
    +            )
    +            self._background = np.zeros(
    +                [scaled_size[1], scaled_size[0], frame.shape[-1]],
    +                frame.dtype,
    +            )
    +        else:
    +            self._background = (self._background * self._attenuation_factor).astype(
    +                frame.dtype
    +            )
    +
    +        # top_left is the anchor coordinate from where we start drawing the fame on top of the background
    +        # aim to draw it in the center of the background but transformations will move this point
    +        top_left = (
    +            np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2
    +        )
    +        top_left = (
    +            coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]
    +        )
    +        # box of the background that will be updated and the limits of it
    +        background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])
    +        background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])
    +        background_size_y, background_size_x = self._background.shape[:2]
    +
    +        # define box of the frame that will be used
    +        # if the scale is not enough to support the movement, warn the user but keep drawing
    +        # cropping the frame so that the operation doesn't fail
    +        frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])
    +        if (
    +            background_y0 < 0
    +            or background_x0 < 0
    +            or background_y1 > background_size_y
    +            or background_x1 > background_size_x
    +        ):
    +            warn_once(
    +                "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped"
    +            )
    +            # crop left or top of the frame if necessary
    +            frame_y0 = max(-background_y0, 0)
    +            frame_x0 = max(-background_x0, 0)
    +            # crop right or bottom of the frame if necessary
    +            frame_y1 = max(
    +                min(background_size_y - background_y0, background_y1 - background_y0), 0
    +            )
    +            frame_x1 = max(
    +                min(background_size_x - background_x0, background_x1 - background_x0), 0
    +            )
    +            # handle cases where the limits of the background become negative which numpy will interpret incorrectly
    +            background_y0 = max(background_y0, 0)
    +            background_x0 = max(background_x0, 0)
    +            background_y1 = max(background_y1, 0)
    +            background_x1 = max(background_x1, 0)
    +        self._background[
    +            background_y0:background_y1, background_x0:background_x1, :
    +        ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]
    +        return self._background
    +
    +
    + + + +
    + + + + + + + + + + +
    + + + +

    + adjust_frame(frame, coord_transformation) + +#

    + + +
    + +

    Render scaled up frame.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    frame + ndarray + +
    +

    The OpenCV frame.

    +
    +
    + required +
    coord_transformation + TranslationTransformation + +
    +

    The coordinate transformation as returned by the MotionEstimator

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + ndarray + +
    +

    The new bigger frame with the original frame drawn on it.

    +
    +
    + +
    + Source code in norfair/drawing/fixed_camera.py +
     59
    + 60
    + 61
    + 62
    + 63
    + 64
    + 65
    + 66
    + 67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    +117
    +118
    +119
    +120
    +121
    +122
    +123
    +124
    +125
    +126
    +127
    +128
    +129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    +141
    def adjust_frame(
    +    self, frame: np.ndarray, coord_transformation: TranslationTransformation
    +) -> np.ndarray:
    +    """
    +    Render scaled up frame.
    +
    +    Parameters
    +    ----------
    +    frame : np.ndarray
    +        The OpenCV frame.
    +    coord_transformation : TranslationTransformation
    +        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]
    +
    +    Returns
    +    -------
    +    np.ndarray
    +        The new bigger frame with the original frame drawn on it.
    +    """
    +
    +    # initialize background if necessary
    +    if self._background is None:
    +        original_size = (
    +            frame.shape[1],
    +            frame.shape[0],
    +        )  # OpenCV format is (width, height)
    +
    +        scaled_size = tuple(
    +            (np.array(original_size) * np.array(self.scale)).round().astype(int)
    +        )
    +        self._background = np.zeros(
    +            [scaled_size[1], scaled_size[0], frame.shape[-1]],
    +            frame.dtype,
    +        )
    +    else:
    +        self._background = (self._background * self._attenuation_factor).astype(
    +            frame.dtype
    +        )
    +
    +    # top_left is the anchor coordinate from where we start drawing the fame on top of the background
    +    # aim to draw it in the center of the background but transformations will move this point
    +    top_left = (
    +        np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2
    +    )
    +    top_left = (
    +        coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]
    +    )
    +    # box of the background that will be updated and the limits of it
    +    background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])
    +    background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])
    +    background_size_y, background_size_x = self._background.shape[:2]
    +
    +    # define box of the frame that will be used
    +    # if the scale is not enough to support the movement, warn the user but keep drawing
    +    # cropping the frame so that the operation doesn't fail
    +    frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])
    +    if (
    +        background_y0 < 0
    +        or background_x0 < 0
    +        or background_y1 > background_size_y
    +        or background_x1 > background_size_x
    +    ):
    +        warn_once(
    +            "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped"
    +        )
    +        # crop left or top of the frame if necessary
    +        frame_y0 = max(-background_y0, 0)
    +        frame_x0 = max(-background_x0, 0)
    +        # crop right or bottom of the frame if necessary
    +        frame_y1 = max(
    +            min(background_size_y - background_y0, background_y1 - background_y0), 0
    +        )
    +        frame_x1 = max(
    +            min(background_size_x - background_x0, background_x1 - background_x0), 0
    +        )
    +        # handle cases where the limits of the background become negative which numpy will interpret incorrectly
    +        background_y0 = max(background_y0, 0)
    +        background_x0 = max(background_x0, 0)
    +        background_y1 = max(background_y1, 0)
    +        background_x1 = max(background_x1, 0)
    +    self._background[
    +        background_y0:background_y1, background_x0:background_x1, :
    +    ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]
    +    return self._background
    +
    +
    +
    + +
    + + + +
    + +
    + + +
    + +
    + + + +

    + AbsolutePaths + + +#

    + + +
    + + +

    Class that draws the absolute paths taken by a set of points.

    +

    Works just like Paths but supports camera motion.

    +
    +

    Warning

    +

    This drawer is not optimized so it can be stremely slow. Performance degrades linearly with +max_history * number_of_tracked_objects.

    +
    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    get_points_to_draw + Optional[Callable[[array], array]] + +
    +

    Function that takes a list of points (the .estimate attribute of a TrackedObject) +and returns a list of points for which we want to draw their paths.

    +

    By default it is the mean point of all the points in the tracker.

    +
    +
    + None +
    thickness + Optional[int] + +
    +

    Thickness of the circles representing the paths of interest.

    +
    +
    + None +
    color + Optional[Tuple[int, int, int]] + +
    +

    Color of the circles representing the paths of interest.

    +
    +
    + None +
    radius + Optional[int] + +
    +

    Radius of the circles representing the paths of interest.

    +
    +
    + None +
    max_history + int + +
    +

    Number of past points to include in the path. High values make the drawing slower

    +
    +
    + 20 +
    + + + +

    Examples:

    +
    >>> from norfair import Tracker, Video, Path
    +>>> video = Video("video.mp4")
    +>>> tracker = Tracker(...)
    +>>> path_drawer = Path()
    +>>> for frame in video:
    +>>>    detections = get_detections(frame)  # runs detector and returns Detections
    +>>>    tracked_objects = tracker.update(detections)
    +>>>    frame = path_drawer.draw(frame, tracked_objects)
    +>>>    video.write(frame)
    +
    + +
    + Source code in norfair/drawing/path.py +
    125
    +126
    +127
    +128
    +129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    +141
    +142
    +143
    +144
    +145
    +146
    +147
    +148
    +149
    +150
    +151
    +152
    +153
    +154
    +155
    +156
    +157
    +158
    +159
    +160
    +161
    +162
    +163
    +164
    +165
    +166
    +167
    +168
    +169
    +170
    +171
    +172
    +173
    +174
    +175
    +176
    +177
    +178
    +179
    +180
    +181
    +182
    +183
    +184
    +185
    +186
    +187
    +188
    +189
    +190
    +191
    +192
    +193
    +194
    +195
    +196
    +197
    +198
    +199
    +200
    +201
    +202
    +203
    +204
    +205
    +206
    +207
    +208
    +209
    +210
    +211
    +212
    +213
    +214
    +215
    +216
    +217
    +218
    +219
    +220
    +221
    +222
    +223
    +224
    +225
    +226
    +227
    +228
    +229
    +230
    +231
    +232
    class AbsolutePaths:
    +    """
    +    Class that draws the absolute paths taken by a set of points.
    +
    +    Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion.
    +
    +    !!! warning
    +        This drawer is not optimized so it can be stremely slow. Performance degrades linearly with
    +        `max_history * number_of_tracked_objects`.
    +
    +    Parameters
    +    ----------
    +    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional
    +        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])
    +        and returns a list of points for which we want to draw their paths.
    +
    +        By default it is the mean point of all the points in the tracker.
    +    thickness : Optional[int], optional
    +        Thickness of the circles representing the paths of interest.
    +    color : Optional[Tuple[int, int, int]], optional
    +        [Color][norfair.drawing.Color] of the circles representing the paths of interest.
    +    radius : Optional[int], optional
    +        Radius of the circles representing the paths of interest.
    +    max_history : int, optional
    +        Number of past points to include in the path. High values make the drawing slower
    +
    +    Examples
    +    --------
    +    >>> from norfair import Tracker, Video, Path
    +    >>> video = Video("video.mp4")
    +    >>> tracker = Tracker(...)
    +    >>> path_drawer = Path()
    +    >>> for frame in video:
    +    >>>    detections = get_detections(frame)  # runs detector and returns Detections
    +    >>>    tracked_objects = tracker.update(detections)
    +    >>>    frame = path_drawer.draw(frame, tracked_objects)
    +    >>>    video.write(frame)
    +    """
    +
    +    def __init__(
    +        self,
    +        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,
    +        thickness: Optional[int] = None,
    +        color: Optional[Tuple[int, int, int]] = None,
    +        radius: Optional[int] = None,
    +        max_history=20,
    +    ):
    +
    +        if get_points_to_draw is None:
    +
    +            def get_points_to_draw(points):
    +                return [np.mean(np.array(points), axis=0)]
    +
    +        self.get_points_to_draw = get_points_to_draw
    +
    +        self.radius = radius
    +        self.thickness = thickness
    +        self.color = color
    +        self.past_points = defaultdict(lambda: [])
    +        self.max_history = max_history
    +        self.alphas = np.linspace(0.99, 0.01, max_history)
    +
    +    def draw(self, frame, tracked_objects, coord_transform=None):
    +        frame_scale = frame.shape[0] / 100
    +
    +        if self.radius is None:
    +            self.radius = int(max(frame_scale * 0.7, 1))
    +        if self.thickness is None:
    +            self.thickness = int(max(frame_scale / 7, 1))
    +        for obj in tracked_objects:
    +            if not obj.live_points.any():
    +                continue
    +
    +            if self.color is None:
    +                color = Palette.choose_color(obj.id)
    +            else:
    +                color = self.color
    +
    +            points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True))
    +
    +            for point in coord_transform.abs_to_rel(points_to_draw):
    +                Drawer.circle(
    +                    frame,
    +                    position=tuple(point.astype(int)),
    +                    radius=self.radius,
    +                    color=color,
    +                    thickness=self.thickness,
    +                )
    +
    +            last = points_to_draw
    +            for i, past_points in enumerate(self.past_points[obj.id]):
    +                overlay = frame.copy()
    +                last = coord_transform.abs_to_rel(last)
    +                for j, point in enumerate(coord_transform.abs_to_rel(past_points)):
    +                    Drawer.line(
    +                        overlay,
    +                        tuple(last[j].astype(int)),
    +                        tuple(point.astype(int)),
    +                        color=color,
    +                        thickness=self.thickness,
    +                    )
    +                last = past_points
    +
    +                alpha = self.alphas[i]
    +                frame = Drawer.alpha_blend(overlay, frame, alpha=alpha)
    +            self.past_points[obj.id].insert(0, points_to_draw)
    +            self.past_points[obj.id] = self.past_points[obj.id][: self.max_history]
    +        return frame
    +
    +
    + + + +
    + + + + + + + + + + + +
    + +
    + + +
    + +
    + + + +

    + Paths + + +#

    + + +
    + + +

    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    get_points_to_draw + Optional[Callable[[array], array]] + +
    +

    Function that takes a list of points (the .estimate attribute of a TrackedObject) +and returns a list of points for which we want to draw their paths.

    +

    By default it is the mean point of all the points in the tracker.

    +
    +
    + None +
    thickness + Optional[int] + +
    +

    Thickness of the circles representing the paths of interest.

    +
    +
    + None +
    color + Optional[Tuple[int, int, int]] + +
    +

    Color of the circles representing the paths of interest.

    +
    +
    + None +
    radius + Optional[int] + +
    +

    Radius of the circles representing the paths of interest.

    +
    +
    + None +
    attenuation + float + +
    +

    A float number in [0, 1] that dictates the speed at which the path is erased. +if it is 0 then the path is never erased.

    +
    +
    + 0.01 +
    + + + +

    Examples:

    +
    >>> from norfair import Tracker, Video, Path
    +>>> video = Video("video.mp4")
    +>>> tracker = Tracker(...)
    +>>> path_drawer = Path()
    +>>> for frame in video:
    +>>>    detections = get_detections(frame)  # runs detector and returns Detections
    +>>>    tracked_objects = tracker.update(detections)
    +>>>    frame = path_drawer.draw(frame, tracked_objects)
    +>>>    video.write(frame)
    +
    + +
    + Source code in norfair/drawing/path.py +
     12
    + 13
    + 14
    + 15
    + 16
    + 17
    + 18
    + 19
    + 20
    + 21
    + 22
    + 23
    + 24
    + 25
    + 26
    + 27
    + 28
    + 29
    + 30
    + 31
    + 32
    + 33
    + 34
    + 35
    + 36
    + 37
    + 38
    + 39
    + 40
    + 41
    + 42
    + 43
    + 44
    + 45
    + 46
    + 47
    + 48
    + 49
    + 50
    + 51
    + 52
    + 53
    + 54
    + 55
    + 56
    + 57
    + 58
    + 59
    + 60
    + 61
    + 62
    + 63
    + 64
    + 65
    + 66
    + 67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    +117
    +118
    +119
    +120
    +121
    +122
    class Paths:
    +    """
    +    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.
    +
    +    Parameters
    +    ----------
    +    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional
    +        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])
    +        and returns a list of points for which we want to draw their paths.
    +
    +        By default it is the mean point of all the points in the tracker.
    +    thickness : Optional[int], optional
    +        Thickness of the circles representing the paths of interest.
    +    color : Optional[Tuple[int, int, int]], optional
    +        [Color][norfair.drawing.Color] of the circles representing the paths of interest.
    +    radius : Optional[int], optional
    +        Radius of the circles representing the paths of interest.
    +    attenuation : float, optional
    +        A float number in [0, 1] that dictates the speed at which the path is erased.
    +        if it is `0` then the path is never erased.
    +
    +    Examples
    +    --------
    +    >>> from norfair import Tracker, Video, Path
    +    >>> video = Video("video.mp4")
    +    >>> tracker = Tracker(...)
    +    >>> path_drawer = Path()
    +    >>> for frame in video:
    +    >>>    detections = get_detections(frame)  # runs detector and returns Detections
    +    >>>    tracked_objects = tracker.update(detections)
    +    >>>    frame = path_drawer.draw(frame, tracked_objects)
    +    >>>    video.write(frame)
    +    """
    +
    +    def __init__(
    +        self,
    +        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,
    +        thickness: Optional[int] = None,
    +        color: Optional[Tuple[int, int, int]] = None,
    +        radius: Optional[int] = None,
    +        attenuation: float = 0.01,
    +    ):
    +        if get_points_to_draw is None:
    +
    +            def get_points_to_draw(points):
    +                return [np.mean(np.array(points), axis=0)]
    +
    +        self.get_points_to_draw = get_points_to_draw
    +
    +        self.radius = radius
    +        self.thickness = thickness
    +        self.color = color
    +        self.mask = None
    +        self.attenuation_factor = 1 - attenuation
    +
    +    def draw(
    +        self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]
    +    ) -> np.array:
    +        """
    +        Draw the paths of the points interest on a frame.
    +
    +        !!! warning
    +            This method does **not** draw frames in place as other drawers do, the resulting frame is returned.
    +
    +        Parameters
    +        ----------
    +        frame : np.ndarray
    +            The OpenCV frame to draw on.
    +        tracked_objects : Sequence[TrackedObject]
    +            List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.
    +
    +        Returns
    +        -------
    +        np.array
    +            The resulting frame.
    +        """
    +        if self.mask is None:
    +            frame_scale = frame.shape[0] / 100
    +
    +            if self.radius is None:
    +                self.radius = int(max(frame_scale * 0.7, 1))
    +            if self.thickness is None:
    +                self.thickness = int(max(frame_scale / 7, 1))
    +
    +            self.mask = np.zeros(frame.shape, np.uint8)
    +
    +        self.mask = (self.mask * self.attenuation_factor).astype("uint8")
    +
    +        for obj in tracked_objects:
    +            if obj.abs_to_rel is not None:
    +                warn_once(
    +                    "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected"
    +                )
    +
    +            if self.color is None:
    +                color = Palette.choose_color(obj.id)
    +            else:
    +                color = self.color
    +
    +            points_to_draw = self.get_points_to_draw(obj.estimate)
    +
    +            for point in points_to_draw:
    +                self.mask = Drawer.circle(
    +                    self.mask,
    +                    position=tuple(point.astype(int)),
    +                    radius=self.radius,
    +                    color=color,
    +                    thickness=self.thickness,
    +                )
    +
    +        return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)
    +
    +
    + + + +
    + + + + + + + + + + +
    + + + +

    + draw(frame, tracked_objects) + +#

    + + +
    + +

    Draw the paths of the points interest on a frame.

    +
    +

    Warning

    +

    This method does not draw frames in place as other drawers do, the resulting frame is returned.

    +
    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    frame + ndarray + +
    +

    The OpenCV frame to draw on.

    +
    +
    + required +
    tracked_objects + Sequence[TrackedObject] + +
    +

    List of TrackedObject to get the points of interest in order to update the paths.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + array + +
    +

    The resulting frame.

    +
    +
    + +
    + Source code in norfair/drawing/path.py +
     67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    +117
    +118
    +119
    +120
    +121
    +122
    def draw(
    +    self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]
    +) -> np.array:
    +    """
    +    Draw the paths of the points interest on a frame.
    +
    +    !!! warning
    +        This method does **not** draw frames in place as other drawers do, the resulting frame is returned.
    +
    +    Parameters
    +    ----------
    +    frame : np.ndarray
    +        The OpenCV frame to draw on.
    +    tracked_objects : Sequence[TrackedObject]
    +        List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.
    +
    +    Returns
    +    -------
    +    np.array
    +        The resulting frame.
    +    """
    +    if self.mask is None:
    +        frame_scale = frame.shape[0] / 100
    +
    +        if self.radius is None:
    +            self.radius = int(max(frame_scale * 0.7, 1))
    +        if self.thickness is None:
    +            self.thickness = int(max(frame_scale / 7, 1))
    +
    +        self.mask = np.zeros(frame.shape, np.uint8)
    +
    +    self.mask = (self.mask * self.attenuation_factor).astype("uint8")
    +
    +    for obj in tracked_objects:
    +        if obj.abs_to_rel is not None:
    +            warn_once(
    +                "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected"
    +            )
    +
    +        if self.color is None:
    +            color = Palette.choose_color(obj.id)
    +        else:
    +            color = self.color
    +
    +        points_to_draw = self.get_points_to_draw(obj.estimate)
    +
    +        for point in points_to_draw:
    +            self.mask = Drawer.circle(
    +                self.mask,
    +                position=tuple(point.astype(int)),
    +                radius=self.radius,
    +                color=color,
    +                thickness=self.thickness,
    +            )
    +
    +    return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)
    +
    +
    +
    + +
    + + + +
    + +
    + + +
    + + + +
    + + + +

    + frobenius(detection, tracked_object) + +#

    + + +
    + +

    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

    +

    The Frobenius distance and norm are given by:

    +
    \[ +d_f(a, b) = ||a - b||_F +\]
    +
    \[ +||A||_F = [\sum_{i,j} abs(a_{i,j})^2]^{1/2} +\]
    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    detection + Detection + +
    +

    A detection.

    +
    +
    + required +
    tracked_object + TrackedObject + +
    +

    A tracked object.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + float + +
    +

    The distance.

    +
    +
    + +
    + See Also +

    np.linalg.norm

    +
    +
    + Source code in norfair/distances.py +
    234
    +235
    +236
    +237
    +238
    +239
    +240
    +241
    +242
    +243
    +244
    +245
    +246
    +247
    +248
    +249
    +250
    +251
    +252
    +253
    +254
    +255
    +256
    +257
    +258
    +259
    +260
    +261
    +262
    +263
    +264
    def frobenius(detection: "Detection", tracked_object: "TrackedObject") -> float:
    +    """
    +    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.
    +
    +    The Frobenius distance and norm are given by:
    +
    +    $$
    +    d_f(a, b) = ||a - b||_F
    +    $$
    +
    +    $$
    +    ||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}
    +    $$
    +
    +    Parameters
    +    ----------
    +    detection : Detection
    +        A detection.
    +    tracked_object : TrackedObject
    +        A tracked object.
    +
    +    Returns
    +    -------
    +    float
    +        The distance.
    +
    +    See Also
    +    --------
    +    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    +    """
    +    return np.linalg.norm(detection.points - tracked_object.estimate)
    +
    +
    +
    + +
    + + +
    + + + +

    + mean_euclidean(detection, tracked_object) + +#

    + + +
    + +

    Average euclidean distance between the points in detection and estimates in tracked_object.

    +
    \[ +d(a, b) = \frac{\sum_{i=0}^N ||a_i - b_i||_2}{N} +\]
    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    detection + Detection + +
    +

    A detection.

    +
    +
    + required +
    tracked_object + TrackedObject + +
    +

    A tracked object

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + float + +
    +

    The distance.

    +
    +
    + +
    + See Also +

    np.linalg.norm

    +
    +
    + Source code in norfair/distances.py +
    267
    +268
    +269
    +270
    +271
    +272
    +273
    +274
    +275
    +276
    +277
    +278
    +279
    +280
    +281
    +282
    +283
    +284
    +285
    +286
    +287
    +288
    +289
    +290
    +291
    def mean_euclidean(detection: "Detection", tracked_object: "TrackedObject") -> float:
    +    """
    +    Average euclidean distance between the points in detection and estimates in tracked_object.
    +
    +    $$
    +    d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_2}{N}
    +    $$
    +
    +    Parameters
    +    ----------
    +    detection : Detection
    +        A detection.
    +    tracked_object : TrackedObject
    +        A tracked object
    +
    +    Returns
    +    -------
    +    float
    +        The distance.
    +
    +    See Also
    +    --------
    +    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    +    """
    +    return np.linalg.norm(detection.points - tracked_object.estimate, axis=1).mean()
    +
    +
    +
    + +
    + + +
    + + + +

    + mean_manhattan(detection, tracked_object) + +#

    + + +
    + +

    Average manhattan distance between the points in detection and the estimates in tracked_object

    +

    Given by:

    +
    \[ +d(a, b) = \frac{\sum_{i=0}^N ||a_i - b_i||_1}{N} +\]
    +

    Where \(||a||_1\) is the manhattan norm.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    detection + Detection + +
    +

    A detection.

    +
    +
    + required +
    tracked_object + TrackedObject + +
    +

    a tracked object.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + float + +
    +

    The distance.

    +
    +
    + +
    + See Also +

    np.linalg.norm

    +
    +
    + Source code in norfair/distances.py +
    294
    +295
    +296
    +297
    +298
    +299
    +300
    +301
    +302
    +303
    +304
    +305
    +306
    +307
    +308
    +309
    +310
    +311
    +312
    +313
    +314
    +315
    +316
    +317
    +318
    +319
    +320
    +321
    +322
    +323
    +324
    def mean_manhattan(detection: "Detection", tracked_object: "TrackedObject") -> float:
    +    """
    +    Average manhattan distance between the points in detection and the estimates in tracked_object
    +
    +    Given by:
    +
    +    $$
    +    d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_1}{N}
    +    $$
    +
    +    Where $||a||_1$ is the manhattan norm.
    +
    +    Parameters
    +    ----------
    +    detection : Detection
    +        A detection.
    +    tracked_object : TrackedObject
    +        a tracked object.
    +
    +    Returns
    +    -------
    +    float
    +        The distance.
    +
    +    See Also
    +    --------
    +    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)
    +    """
    +    return np.linalg.norm(
    +        detection.points - tracked_object.estimate, ord=1, axis=1
    +    ).mean()
    +
    +
    +
    + +
    + + +
    + + + +

    + iou(candidates, objects) + +#

    + + +
    + +

    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected +to be in [x_min, y_min, x_max, y_max] format.

    +

    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap, +to transform that into a distance that makes sense we return 1 - iou.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    candidates + ndarray + +
    +

    (N, 4) numpy.ndarray containing candidates bounding boxes.

    +
    +
    + required +
    objects + ndarray + +
    +

    (K, 4) numpy.ndarray containing objects bounding boxes.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + ndarray + +
    +

    (N, K) numpy.ndarray of 1 - iou between candidates and objects.

    +
    +
    + +
    + Source code in norfair/distances.py +
    348
    +349
    +350
    +351
    +352
    +353
    +354
    +355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    +364
    +365
    +366
    +367
    +368
    +369
    +370
    +371
    +372
    +373
    +374
    +375
    +376
    +377
    +378
    +379
    +380
    +381
    def iou(candidates: np.ndarray, objects: np.ndarray) -> np.ndarray:
    +    """
    +    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected
    +    to be in `[x_min, y_min, x_max, y_max]` format.
    +
    +    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,
    +    to transform that into a distance that makes sense we return `1 - iou`.
    +
    +    Parameters
    +    ----------
    +    candidates : numpy.ndarray
    +        (N, 4) numpy.ndarray containing candidates bounding boxes.
    +    objects : numpy.ndarray
    +        (K, 4) numpy.ndarray containing objects bounding boxes.
    +
    +    Returns
    +    -------
    +    numpy.ndarray
    +        (N, K) numpy.ndarray of `1 - iou` between candidates and objects.
    +    """
    +    _validate_bboxes(candidates)
    +
    +    area_candidates = _boxes_area(candidates.T)
    +    area_objects = _boxes_area(objects.T)
    +
    +    top_left = np.maximum(candidates[:, None, :2], objects[:, :2])
    +    bottom_right = np.minimum(candidates[:, None, 2:], objects[:, 2:])
    +
    +    area_intersection = np.prod(
    +        np.clip(bottom_right - top_left, a_min=0, a_max=None), 2
    +    )
    +    return 1 - area_intersection / (
    +        area_candidates[:, None] + area_objects - area_intersection
    +    )
    +
    +
    +
    + +
    + + +
    + + + +

    + get_distance_by_name(name) + +#

    + + +
    + +

    Select a distance by name.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    name + str + +
    +

    A string defining the metric to get.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + Distance + +
    +

    The distance object.

    +
    +
    + +
    + Source code in norfair/distances.py +
    425
    +426
    +427
    +428
    +429
    +430
    +431
    +432
    +433
    +434
    +435
    +436
    +437
    +438
    +439
    +440
    +441
    +442
    +443
    +444
    +445
    +446
    +447
    +448
    +449
    +450
    +451
    +452
    +453
    +454
    +455
    +456
    +457
    +458
    +459
    +460
    +461
    def get_distance_by_name(name: str) -> Distance:
    +    """
    +    Select a distance by name.
    +
    +    Parameters
    +    ----------
    +    name : str
    +        A string defining the metric to get.
    +
    +    Returns
    +    -------
    +    Distance
    +        The distance object.
    +    """
    +
    +    if name in _SCALAR_DISTANCE_FUNCTIONS:
    +        warning(
    +            "You are using a scalar distance function. If you want to speed up the"
    +            " tracking process please consider using a vectorized distance function"
    +            f" such as {AVAILABLE_VECTORIZED_DISTANCES}."
    +        )
    +        distance = _SCALAR_DISTANCE_FUNCTIONS[name]
    +        distance_function = ScalarDistance(distance)
    +    elif name in _SCIPY_DISTANCE_FUNCTIONS:
    +        distance_function = ScipyDistance(name)
    +    elif name in _VECTORIZED_DISTANCE_FUNCTIONS:
    +        if name == "iou_opt":
    +            warning("iou_opt is deprecated, use iou instead")
    +        distance = _VECTORIZED_DISTANCE_FUNCTIONS[name]
    +        distance_function = VectorizedDistance(distance)
    +    else:
    +        raise ValueError(
    +            f"Invalid distance '{name}', expecting one of"
    +            f" {list(_SCALAR_DISTANCE_FUNCTIONS.keys()) + AVAILABLE_VECTORIZED_DISTANCES}"
    +        )
    +
    +    return distance_function
    +
    +
    +
    + +
    + + +
    + + + +

    + create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold) + +#

    + + +
    + +

    Construct a keypoint voting distance function configured with the thresholds.

    +

    Count how many points in a detection match the with a tracked_object. +A match is considered when distance between the points is < keypoint_distance_threshold +and the score of the last_detection of the tracked_object is > detection_threshold. +Notice the if multiple points are tracked, the ith point in detection can only match the ith +point in the tracked object.

    +

    Distance is 1 if no point matches and approximates 0 as more points are matched.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    keypoint_distance_threshold + float + +
    +

    Points closer than this threshold are considered a match.

    +
    +
    + required +
    detection_threshold + float + +
    +

    Detections and objects with score lower than this threshold are ignored.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + Callable + +
    +

    The distance funtion that must be passed to the Tracker.

    +
    +
    + +
    + Source code in norfair/distances.py +
    464
    +465
    +466
    +467
    +468
    +469
    +470
    +471
    +472
    +473
    +474
    +475
    +476
    +477
    +478
    +479
    +480
    +481
    +482
    +483
    +484
    +485
    +486
    +487
    +488
    +489
    +490
    +491
    +492
    +493
    +494
    +495
    +496
    +497
    +498
    +499
    +500
    +501
    +502
    def create_keypoints_voting_distance(
    +    keypoint_distance_threshold: float, detection_threshold: float
    +) -> Callable[["Detection", "TrackedObject"], float]:
    +    """
    +    Construct a keypoint voting distance function configured with the thresholds.
    +
    +    Count how many points in a detection match the with a tracked_object.
    +    A match is considered when distance between the points is < `keypoint_distance_threshold`
    +    and the score of the last_detection of the tracked_object is > `detection_threshold`.
    +    Notice the if multiple points are tracked, the ith point in detection can only match the ith
    +    point in the tracked object.
    +
    +    Distance is 1 if no point matches and approximates 0 as more points are matched.
    +
    +    Parameters
    +    ----------
    +    keypoint_distance_threshold: float
    +        Points closer than this threshold are considered a match.
    +    detection_threshold: float
    +        Detections and objects with score lower than this threshold are ignored.
    +
    +    Returns
    +    -------
    +    Callable
    +        The distance funtion that must be passed to the Tracker.
    +    """
    +
    +    def keypoints_voting_distance(
    +        detection: "Detection", tracked_object: "TrackedObject"
    +    ) -> float:
    +        distances = np.linalg.norm(detection.points - tracked_object.estimate, axis=1)
    +        match_num = np.count_nonzero(
    +            (distances < keypoint_distance_threshold)
    +            * (detection.scores > detection_threshold)
    +            * (tracked_object.last_detection.scores > detection_threshold)
    +        )
    +        return 1 / (1 + match_num)
    +
    +    return keypoints_voting_distance
    +
    +
    +
    + +
    + + +
    + + + +

    + create_normalized_mean_euclidean_distance(height, width) + +#

    + + +
    + +

    Construct a normalized mean euclidean distance function configured with the max height and width.

    +

    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    height + int + +
    +

    Height of the image.

    +
    +
    + required +
    width + int + +
    +

    Width of the image.

    +
    +
    + required +
    + + + +

    Returns:

    + + + + + + + + + + + + + +
    TypeDescription
    + Callable + +
    +

    The distance funtion that must be passed to the Tracker.

    +
    +
    + +
    + Source code in norfair/distances.py +
    505
    +506
    +507
    +508
    +509
    +510
    +511
    +512
    +513
    +514
    +515
    +516
    +517
    +518
    +519
    +520
    +521
    +522
    +523
    +524
    +525
    +526
    +527
    +528
    +529
    +530
    +531
    +532
    +533
    +534
    +535
    +536
    +537
    +538
    def create_normalized_mean_euclidean_distance(
    +    height: int, width: int
    +) -> Callable[["Detection", "TrackedObject"], float]:
    +    """
    +    Construct a normalized mean euclidean distance function configured with the max height and width.
    +
    +    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.
    +
    +    Parameters
    +    ----------
    +    height: int
    +        Height of the image.
    +    width: int
    +        Width of the image.
    +
    +    Returns
    +    -------
    +    Callable
    +        The distance funtion that must be passed to the Tracker.
    +    """
    +
    +    def normalized__mean_euclidean_distance(
    +        detection: "Detection", tracked_object: "TrackedObject"
    +    ) -> float:
    +        """Normalized mean euclidean distance"""
    +        # calculate distances and normalized it by width and height
    +        difference = (detection.points - tracked_object.estimate).astype(float)
    +        difference[:, 0] /= width
    +        difference[:, 1] /= height
    +
    +        # calculate eucledean distance and average
    +        return np.linalg.norm(difference, axis=1).mean()
    +
    +    return normalized__mean_euclidean_distance
    +
    +
    +
    + +
    + + +
    + + + +

    + draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False) + +#

    + + +
    + +

    Draw a grid of points in absolute coordinates.

    +

    Useful for debugging camera motion.

    +

    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection +of latitude and longitude lines over the surface of the sphere.

    + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    frame + ndarray + +
    +

    The OpenCV frame to draw on.

    +
    +
    + required +
    coord_transformations + CoordinatesTransformation + +
    +

    The coordinate transformation as returned by the MotionEstimator

    +
    +
    + required +
    grid_size + int + +
    +

    How many points to draw.

    +
    +
    + 20 +
    radius + int + +
    +

    Size of each point.

    +
    +
    + 2 +
    thickness + int + +
    +

    Thickness of each point

    +
    +
    + 1 +
    color + ColorType + +
    +

    Color of the points.

    +
    +
    + black +
    polar + Bool + +
    +

    If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). +By default, False is used which means the points are drawn as if the camera were pointing to the Equator.

    +
    +
    + False +
    + +
    + Source code in norfair/drawing/absolute_grid.py +
     49
    + 50
    + 51
    + 52
    + 53
    + 54
    + 55
    + 56
    + 57
    + 58
    + 59
    + 60
    + 61
    + 62
    + 63
    + 64
    + 65
    + 66
    + 67
    + 68
    + 69
    + 70
    + 71
    + 72
    + 73
    + 74
    + 75
    + 76
    + 77
    + 78
    + 79
    + 80
    + 81
    + 82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    def draw_absolute_grid(
    +    frame: np.ndarray,
    +    coord_transformations: CoordinatesTransformation,
    +    grid_size: int = 20,
    +    radius: int = 2,
    +    thickness: int = 1,
    +    color: ColorType = Color.black,
    +    polar: bool = False,
    +):
    +    """
    +    Draw a grid of points in absolute coordinates.
    +
    +    Useful for debugging camera motion.
    +
    +    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection
    +    of latitude and longitude lines over the surface of the sphere.
    +
    +    Parameters
    +    ----------
    +    frame : np.ndarray
    +        The OpenCV frame to draw on.
    +    coord_transformations : CoordinatesTransformation
    +        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]
    +    grid_size : int, optional
    +        How many points to draw.
    +    radius : int, optional
    +        Size of each point.
    +    thickness : int, optional
    +        Thickness of each point
    +    color : ColorType, optional
    +        Color of the points.
    +    polar : Bool, optional
    +        If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth).
    +        By default, False is used which means the points are drawn as if the camera were pointing to the Equator.
    +    """
    +    h, w, _ = frame.shape
    +
    +    # get absolute points grid
    +    points = _get_grid(grid_size, w, h, polar=polar)
    +
    +    # transform the points to relative coordinates
    +    if coord_transformations is None:
    +        points_transformed = points
    +    else:
    +        points_transformed = coord_transformations.abs_to_rel(points)
    +
    +    # filter points that are not visible
    +    visible_points = points_transformed[
    +        (points_transformed <= np.array([w, h])).all(axis=1)
    +        & (points_transformed >= 0).all(axis=1)
    +    ]
    +    for point in visible_points:
    +        Drawer.cross(
    +            frame, point.astype(int), radius=radius, thickness=thickness, color=color
    +        )
    +
    +
    +
    + +
    + + +
    + + + +

    + draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None) + +#

    + + +
    + +

    Deprecated. Use draw_box

    + +
    + Source code in norfair/drawing/draw_boxes.py +
    184
    +185
    +186
    +187
    +188
    +189
    +190
    +191
    +192
    +193
    +194
    +195
    +196
    +197
    +198
    +199
    +200
    +201
    +202
    +203
    +204
    +205
    +206
    +207
    +208
    +209
    def draw_tracked_boxes(
    +    frame: np.ndarray,
    +    objects: Sequence["TrackedObject"],
    +    border_colors: Optional[Tuple[int, int, int]] = None,
    +    border_width: Optional[int] = None,
    +    id_size: Optional[int] = None,
    +    id_thickness: Optional[int] = None,
    +    draw_box: bool = True,
    +    color_by_label: bool = False,
    +    draw_labels: bool = False,
    +    label_size: Optional[int] = None,
    +    label_width: Optional[int] = None,
    +) -> np.array:
    +    "**Deprecated**. Use [`draw_box`][norfair.drawing.draw_boxes.draw_boxes]"
    +    warn_once("draw_tracked_boxes is deprecated, use draw_box instead")
    +    return draw_boxes(
    +        frame=frame,
    +        drawables=objects,
    +        color="by_label" if color_by_label else border_colors,
    +        thickness=border_width,
    +        text_size=label_size or id_size,
    +        text_thickness=id_thickness or label_width,
    +        draw_labels=draw_labels,
    +        draw_ids=id_size is not None and id_size > 0,
    +        draw_box=draw_box,
    +    )
    +
    +
    +
    + +
    + + +
    + + + +

    + draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None) + +#

    + + +
    + +

    Deprecated use draw_points

    + +
    + Source code in norfair/drawing/draw_points.py +
    189
    +190
    +191
    +192
    +193
    +194
    +195
    +196
    +197
    +198
    +199
    +200
    +201
    +202
    +203
    +204
    +205
    +206
    +207
    +208
    +209
    +210
    +211
    +212
    +213
    +214
    +215
    +216
    +217
    +218
    +219
    +220
    +221
    +222
    +223
    +224
    +225
    +226
    +227
    +228
    +229
    def draw_tracked_objects(
    +    frame: np.ndarray,
    +    objects: Sequence["TrackedObject"],
    +    radius: Optional[int] = None,
    +    color: Optional[ColorLike] = None,
    +    id_size: Optional[float] = None,
    +    id_thickness: Optional[int] = None,
    +    draw_points: bool = True,  # pylint: disable=redefined-outer-name
    +    color_by_label: bool = False,
    +    draw_labels: bool = False,
    +    label_size: Optional[int] = None,
    +):
    +    """
    +    **Deprecated** use [`draw_points`][norfair.drawing.draw_points.draw_points]
    +    """
    +    warn_once("draw_tracked_objects is deprecated, use draw_points instead")
    +
    +    frame_scale = frame.shape[0] / 100
    +    if radius is None:
    +        radius = int(frame_scale * 0.5)
    +    if id_size is None:
    +        id_size = frame_scale / 10
    +    if id_thickness is None:
    +        id_thickness = int(frame_scale / 5)
    +    if label_size is None:
    +        label_size = int(max(frame_scale / 100, 1))
    +
    +    _draw_points_alias(
    +        frame=frame,
    +        drawables=objects,
    +        color="by_label" if color_by_label else color,
    +        radius=radius,
    +        thickness=None,
    +        draw_labels=draw_labels,
    +        draw_ids=id_size is not None and id_size > 0,
    +        draw_points=draw_points,
    +        text_size=label_size or id_size,
    +        text_thickness=id_thickness,
    +        text_color=None,
    +        hide_dead_points=True,
    +    )
    +
    +
    +
    + +
    diff --git a/dev/search/search_index.json b/dev/search/search_index.json index 89685e19..d81ea3af 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

    Norfair is a customizable lightweight Python library for real-time multi-object tracking.

    Using Norfair, you can add tracking capabilities to any detector with just a few lines of code.

    Tracking players with moving camera Tracking 3D objects"},{"location":"#features","title":"Features","text":"
    • Any detector expressing its detections as a series of (x, y) coordinates can be used with Norfair. This includes detectors performing tasks such as object or keypoint detection (see examples).

    • Modular. It can easily be inserted into complex video processing pipelines to add tracking to existing projects. At the same time, it is possible to build a video inference loop from scratch using just Norfair and a detector.

    • Supports moving camera, re-identification with appearance embeddings, and n-dimensional object tracking (see Advanced features).

    • Norfair provides several predefined distance functions to compare tracked objects and detections. The distance functions can also be defined by the user, enabling the implementation of different tracking strategies.

    • Fast. The only thing bounding inference speed will be the detection network feeding detections to Norfair.

    Norfair is built, used and maintained by Tryolabs.

    "},{"location":"#installation","title":"Installation","text":"

    Norfair currently supports Python 3.8+. The latest tested version to support Python 3.7 is Norfair 2.2.0. Later versions may work, but no specific support is planned.

    For the minimal version, install as:

    pip install norfair\n

    To make Norfair install the dependencies to support more features, install as:

    pip install norfair[video]  # Adds several video helper features running on OpenCV\npip install norfair[metrics]  # Supports running MOT metrics evaluation\npip install norfair[metrics,video]  # Everything included\n

    If the needed dependencies are already present in the system, installing the minimal version of Norfair is enough for enabling the extra features. This is particularly useful for embedded devices, where installing compiled dependencies can be difficult, but they can sometimes come preinstalled with the system.

    "},{"location":"#documentation","title":"Documentation","text":"

    Getting started guide.

    Official reference.

    "},{"location":"#examples-demos","title":"Examples & demos","text":"

    We provide several examples of how Norfair can be used to add tracking capabilities to different detectors, and also showcase more advanced features.

    Note: for ease of reproducibility, we provide Dockerfiles for all the demos. Even though Norfair does not need a GPU, the default configuration of most demos requires a GPU to be able to run the detectors. For this, make sure you install NVIDIA Container Toolkit so that your GPU can be shared with Docker.

    It is possible to run several demos with a CPU, but you will have to modify the scripts or tinker with the installation of their dependencies.

    "},{"location":"#adding-tracking-to-different-detectors","title":"Adding tracking to different detectors","text":"

    Most tracking demos are showcased with vehicles and pedestrians, but the detectors are generally trained with many more classes from the COCO dataset.

    1. YOLOv7: tracking object centroids or bounding boxes.
    2. YOLOv5: tracking object centroids or bounding boxes.
    3. YOLOv4: tracking object centroids.
    4. Detectron2: tracking object centroids.
    5. AlphaPose: tracking human keypoints (pose estimation) and inserting Norfair into a complex existing pipeline using.
    6. OpenPose: tracking human keypoints.
    7. YOLOPv2: tracking with a model for traffic object detection, drivable road area segmentation, and lane line detection.
    8. YOLO-NAS: tracking object centroids or bounding boxes.
    "},{"location":"#advanced-features","title":"Advanced features","text":"
    1. Speed up pose estimation by extrapolating detections using OpenPose.
    2. Track both bounding boxes and human keypoints (multi-class), unifying the detections from a YOLO model and OpenPose.
    3. Re-identification (ReID) of tracked objects using appearance embeddings. This is a good starting point for scenarios with a lot of occlusion, in which the Kalman filter alone would struggle.
    4. Accurately track objects even if the camera is moving, by estimating camera motion potentially accounting for pan, tilt, rotation, movement in any direction, and zoom.
    5. Track points in 3D, using MediaPipe Objectron.
    6. Tracking of small objects, using SAHI: Slicing Aided Hyper Inference.
    "},{"location":"#ros-integration","title":"ROS integration","text":"

    To make it even easier to use Norfair in robotics projects, we now offer a version that integrates with the Robotic Operating System (ROS).

    We present a ROS package and a fully functional environment running on Docker to do the first steps with this package and start your first application easier.

    "},{"location":"#benchmarking-and-profiling","title":"Benchmarking and profiling","text":"
    1. Kalman filter and distance function profiling using TRT pose estimator.
    2. Computation of MOT17 scores using motmetrics4norfair.
    "},{"location":"#how-it-works","title":"How it works","text":"

    Norfair works by estimating the future position of each point based on its past positions. It then tries to match these estimated positions with newly detected points provided by the detector. For this matching to occur, Norfair can rely on any distance function. There are some predefined distances already integrated in Norfair, and the users can also define their own custom distances. Therefore, each object tracker can be made as simple or as complex as needed.

    As an example we use Detectron2 to get the single point detections to use with this distance function. We just use the centroids of the bounding boxes it produces around cars as our detections, and get the following results.

    On the left you can see the points we get from Detectron2, and on the right how Norfair tracks them assigning a unique identifier through time. Even a straightforward distance function like this one can work when the tracking needed is simple.

    Norfair also provides several useful tools for creating a video inference loop. Here is what the full code for creating the previous example looks like, including the code needed to set up Detectron2:

    import cv2\nimport numpy as np\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import DefaultPredictor\n\nfrom norfair import Detection, Tracker, Video, draw_tracked_objects\n\n# Set up Detectron2 object detector\ncfg = get_cfg()\ncfg.merge_from_file(\"demos/faster_rcnn_R_50_FPN_3x.yaml\")\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\ncfg.MODEL.WEIGHTS = \"detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl\"\ndetector = DefaultPredictor(cfg)\n\n# Norfair\nvideo = Video(input_path=\"video.mp4\")\ntracker = Tracker(distance_function=\"euclidean\", distance_threshold=20)\n\nfor frame in video:\n    detections = detector(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n    detections = [Detection(p) for p in detections['instances'].pred_boxes.get_centers().cpu().numpy()]\n    tracked_objects = tracker.update(detections=detections)\n    draw_tracked_objects(frame, tracked_objects)\n    video.write(frame)\n

    The video and drawing tools use OpenCV frames, so they are compatible with most Python video code available online. The point tracking is based on SORT generalized to detections consisting of a dynamically changing number of points per detection.

    "},{"location":"#motivation","title":"Motivation","text":"

    Trying out the latest state-of-the-art detectors normally requires running repositories that weren't intended to be easy to use. These tend to be repositories associated with a research paper describing a novel new way of doing detection, and they are therefore intended to be run as a one-off evaluation script to get some result metric to publish on a particular research paper. This explains why they tend to not be easy to run as inference scripts, or why extracting the core model to use in another standalone script isn't always trivial.

    Norfair was born out of the need to quickly add a simple layer of tracking over a wide range of newly released SOTA detectors. It was designed to seamlessly be plugged into a complex, highly coupled code base, with minimum effort. Norfair provides a series of modular but compatible tools, which you can pick and choose to use in your project.

    "},{"location":"#comparison-to-other-trackers","title":"Comparison to other trackers","text":"

    Norfair's contribution to Python's object tracker library repertoire is its ability to work with any object detector by being able to work with a variable number of points per detection, and the ability for the user to heavily customize the tracker by creating their own distance function.

    If you are looking for a tracker, here are some other projects worth noting:

    • OpenCV includes several tracking solutions like KCF Tracker and MedianFlow Tracker which are run by making the user select a part of the frame to track, and then letting the tracker follow that area. They tend not to be run on top of a detector and are not very robust.
    • dlib includes a correlation single object tracker. You have to create your own multiple object tracker on top of it yourself if you want to track multiple objects with it.
    • AlphaPose just released a new version of their human pose tracker. This tracker is tightly integrated into their code base, and to the task of tracking human poses.
    • SORT and Deep SORT are similar to this repo in that they use Kalman filters (and a deep embedding for Deep SORT), but they are hardcoded to a fixed distance function and to tracking boxes. Norfair also adds some filtering when matching tracked objects with detections, and changes the Hungarian Algorithm for its own distance minimizer. Both these repos are also released under the GPL license, which might be an issue for some individuals or companies because the source code of derivative works needs to be published.
    "},{"location":"#benchmarks","title":"Benchmarks","text":"

    MOT17 and MOT20 results obtained using motmetrics4norfair demo script on the train split. We used detections obtained with ByteTrack's YOLOX object detection model.

    MOT17 Train IDF1 IDP IDR Rcll Prcn MOTA MOTP MOT17-02 61.3% 63.6% 59.0% 86.8% 93.5% 79.9% 14.8% MOT17-04 93.3% 93.6% 93.0% 98.6% 99.3% 97.9% 07.9% MOT17-05 77.8% 77.7% 77.8% 85.9% 85.8% 71.2% 14.7% MOT17-09 65.0% 67.4% 62.9% 90.3% 96.8% 86.8% 12.2% MOT17-10 70.2% 72.5% 68.1% 87.3% 93.0% 80.1% 18.7% MOT17-11 80.2% 80.5% 80.0% 93.0% 93.6% 86.4% 11.3% MOT17-13 79.0% 79.6% 78.4% 90.6% 92.0% 82.4% 16.6% OVERALL 80.6% 81.8% 79.6% 92.9% 95.5% 88.1% 11.9% MOT20 Train IDF1 IDP IDR Rcll Prcn MOTA MOTP MOT20-01 85.9% 88.1% 83.8% 93.4% 98.2% 91.5% 12.6% MOT20-02 72.8% 74.6% 71.0% 93.2% 97.9% 91.0% 12.7% MOT20-03 93.0% 94.1% 92.0% 96.1% 98.3% 94.4% 13.7% MOT20-05 87.9% 88.9% 87.0% 96.0% 98.1% 94.1% 13.0% OVERALL 87.3% 88.4% 86.2% 95.6% 98.1% 93.7% 13.2%"},{"location":"#commercial-support","title":"Commercial support","text":"

    Tryolabs can provide commercial support, implement new features in Norfair or build video analytics tools for solving your challenging problems. Norfair powers several video analytics applications, such as the face mask detection tool.

    If you are interested, please contact us.

    "},{"location":"#citing-norfair","title":"Citing Norfair","text":"

    For citations in academic publications, please export your desired citation format (BibTeX or other) from Zenodo.

    "},{"location":"#license","title":"License","text":"

    Copyright \u00a9 2022, Tryolabs. Released under the BSD 3-Clause.

    "},{"location":"getting_started/","title":"Getting Started","text":"

    Norfair's goal is to easily track multiple objects in videos based on the frame-by-frame detections of a user-defined model.

    "},{"location":"getting_started/#model-or-detector","title":"Model or Detector","text":"

    We recommend first deciding and setting up the model and then adding Norfair on top of it. Models trained for any form of object detection or keypoint detection (including pose estimation) are all supported. You can check some of the integrations we have as examples:

    • Yolov7, Yolov5 and Yolov4
    • Detectron2
    • Alphapose
    • Openpose
    • MMDetection

    Any other model trained on one of the supported tasks is also supported and should be easy to integrate with Norfair, regardless of whether it uses Pytorch, TensorFlow, or other.

    If you are unsure of which model to use, Yolov7 is a good starting point since it's easy to set up and offers models of different sizes pre-trained on object detection and pose estimation.

    Note

    Norfair is a Detection-Based-Tracker (DBT) and as such, its performance is highly dependent on the performance of the model of choice.

    The detections from the model will need to be wrapped in an instance of Detection before passing them to Norfair.

    "},{"location":"getting_started/#install","title":"Install","text":"

    Installing Norfair is extremely easy, simply run pip install norfair to install the latest version from PyPI.

    You can also install the latest version from the master branch using pip install git+https://github.com/tryolabs/norfair.git@master#egg=norfair

    "},{"location":"getting_started/#video","title":"Video","text":"

    Norfair offers optional functionality to process videos (mp4 and mov formats are supported) or capture a live feed from a camera. To use this functionality you need to install Norfair with the video extra using this command: pip install norfair[video].

    Check the Video class for more info on how to use it.

    "},{"location":"getting_started/#tracking","title":"Tracking","text":"

    Let's dive right into a simple example in the following snippet:

    from norfair import Detection, Tracker, Video, draw_tracked_objects\n\ndetector = MyDetector()  # Set up a detector\nvideo = Video(input_path=\"video.mp4\")\ntracker = Tracker(distance_function=\"euclidean\", distance_threshold=100)\n\nfor frame in video:\n   detections = detector(frame)\n   norfair_detections = [Detection(points) for points in detections]\n   tracked_objects = tracker.update(detections=norfair_detections)\n   draw_tracked_objects(frame, tracked_objects)\n   video.write(frame)\n

    The tracker is created and then the detections are fed to it one frame at a time in order. This method is called online tracking and allows Norfair to be used in live feeds and real-time scenarios where future frames are not available.

    Norfair includes functionality for creating an output video with drawings which is useful for evaluating and debugging. We usually start with this simple setup and move from there.

    "},{"location":"getting_started/#next-steps","title":"Next Steps","text":"

    The next steps depend a lot on your goal and the result of evaluating the output videos, nevertheless here are some pointers that might help you solve common problems

    "},{"location":"getting_started/#detection-issues","title":"Detection Issues","text":"

    Most common problem is that the tracking has errors or is not precise enough. In this case, the first thing to check is whether this is a detection error or a tracking error. As mentioned above if the detector fails the tracking will suffer.

    To debug this use draw_points or draw_boxes to inspect the detections and analyze if they are precise enough. If you are filtering the detections based on scores, this is a good time to tweak the threshold. If you decide that the detections are not good enough you can try a different architecture, a bigger version of the model, or consider fine-tuning the model on your domain.

    "},{"location":"getting_started/#tracking-issues","title":"Tracking Issues","text":"

    After inspecting the detections you might find issues with the tracking, several things can go wrong with tracking but here is a list of common errors and things to try:

    • Objects take too long to start, this can have multiple causes:
      • initialization_delay is too big on the Tracker. Makes the TrackedObject stay on initializing for too long, 3 is usually a good value to start with.
      • distance_threshold is too small on the Tracker. Prevents the Detections to be matched with the correct TrackedObject. The best value depends on the distance used.
      • Incorrect distance_function on the Tracker. Some distances might not be valid in some cases, for instance, if using IoU but the objects in your video move so quickly that there is never an overlap between the detections of consecutive frames. Try different distances, euclidean or create_normalized_mean_euclidean_distance are good starting points.
    • Objects take too long to disappear. Lower hit_counter_max on the Tracker.
    • Points or bounding boxes jitter too much. Increase R (measurement error) or lower Q (estimate or process error) on the OptimizedKalmanFilterFactory or FilterPyKalmanFilterFactory. This makes the Kalman Filter put less weight on the measurements and trust more on the estimate, stabilizing the result.
    • Camera motion confuses the Tracker. If the camera moves, the apparent movement of objects can become too erratic for the Tracker. Use MotionEstimator.
    • Incorrect matches between Detections and TrackedObjects, a couple of scenarios can cause this:
      • distance_threshold is too big so the Tracker matches Detections to TrackedObjects that are simply too far. Lower the threshold until you fix the error, the correct value will depend on the distance function that you're using.
      • Mismatches when objects overlap. In this case, tracking becomes more challenging, usually, the quality of the detection degrades causing one of the objects to be missed or creating a single big detection that includes both objects. On top of the detection issues, the tracker needs to decide which detection should be matched to which TrackedObject which can be error-prone if only considering spatial information. The solution is not easy but incorporating the notion of the appearance similarity based on some kind of embedding to your distance_function can help.
    • Can't recover an object after occlusions. Use ReID distance, see this demo for an example but for real-world use you will need a good ReID model that can provide good embeddings.
    "},{"location":"reference/","title":"Reference","text":"

    A customizable lightweight Python library for real-time multi-object tracking.

    Examples:

    >>> from norfair import Detection, Tracker, Video, draw_tracked_objects\n>>> detector = MyDetector()  # Set up a detector\n>>> video = Video(input_path=\"video.mp4\")\n>>> tracker = Tracker(distance_function=\"euclidean\", distance_threshold=50)\n>>> for frame in video:\n>>>    detections = detector(frame)\n>>>    norfair_detections = [Detection(points) for points in detections]\n>>>    tracked_objects = tracker.update(detections=norfair_detections)\n>>>    draw_tracked_objects(frame, tracked_objects)\n>>>    video.write(frame)\n
    "},{"location":"reference/camera_motion/","title":"Camera Motion","text":"

    Camera motion stimation module.

    "},{"location":"reference/camera_motion/#norfair.camera_motion.CoordinatesTransformation","title":"CoordinatesTransformation","text":"

    Bases: ABC

    Abstract class representing a coordinate transformation.

    Detections' and tracked objects' coordinates can be interpreted in 2 reference:

    • Relative: their position on the current frame, (0, 0) is top left
    • Absolute: their position on an fixed space, (0, 0) is the top left of the first frame of the video.

    Therefore, coordinate transformation in this context is a class that can transform coordinates in one reference to another.

    Source code in norfair/camera_motion.py
    class CoordinatesTransformation(ABC):\n    \"\"\"\n    Abstract class representing a coordinate transformation.\n\n    Detections' and tracked objects' coordinates can be interpreted in 2 reference:\n\n    - _Relative_: their position on the current frame, (0, 0) is top left\n    - _Absolute_: their position on an fixed space, (0, 0)\n        is the top left of the first frame of the video.\n\n    Therefore, coordinate transformation in this context is a class that can transform\n    coordinates in one reference to another.\n    \"\"\"\n\n    @abstractmethod\n    def abs_to_rel(self, points: np.ndarray) -> np.ndarray:\n        pass\n\n    @abstractmethod\n    def rel_to_abs(self, points: np.ndarray) -> np.ndarray:\n        pass\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TransformationGetter","title":"TransformationGetter","text":"

    Bases: ABC

    Abstract class representing a method for finding CoordinatesTransformation between 2 sets of points

    Source code in norfair/camera_motion.py
    class TransformationGetter(ABC):\n    \"\"\"\n    Abstract class representing a method for finding CoordinatesTransformation between 2 sets of points\n    \"\"\"\n\n    @abstractmethod\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, CoordinatesTransformation]:\n        pass\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TranslationTransformation","title":"TranslationTransformation","text":"

    Bases: CoordinatesTransformation

    Coordinate transformation between points using a simple translation

    Parameters:

    Name Type Description Default movement_vector ndarray

    The vector representing the translation.

    required Source code in norfair/camera_motion.py
    class TranslationTransformation(CoordinatesTransformation):\n    \"\"\"\n    Coordinate transformation between points using a simple translation\n\n    Parameters\n    ----------\n    movement_vector : np.ndarray\n        The vector representing the translation.\n    \"\"\"\n\n    def __init__(self, movement_vector):\n        self.movement_vector = movement_vector\n\n    def abs_to_rel(self, points: np.ndarray):\n        return points + self.movement_vector\n\n    def rel_to_abs(self, points: np.ndarray):\n        return points - self.movement_vector\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TranslationTransformationGetter","title":"TranslationTransformationGetter","text":"

    Bases: TransformationGetter

    Calculates TranslationTransformation between points.

    The camera movement is calculated as the mode of optical flow between the previous reference frame and the current.

    Comparing consecutive frames can make differences too small to correctly estimate the translation, for this reason the reference frame is kept fixed as we progress through the video. Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.

    Parameters:

    Name Type Description Default bin_size float

    Before calculatin the mode, optiocal flow is bucketized into bins of this size.

    0.2 proportion_points_used_threshold float

    Proportion of points that must be matched, otherwise the reference frame must be updated.

    0.9 Source code in norfair/camera_motion.py
    class TranslationTransformationGetter(TransformationGetter):\n    \"\"\"\n    Calculates TranslationTransformation between points.\n\n    The camera movement is calculated as the mode of optical flow between the previous reference frame\n    and the current.\n\n    Comparing consecutive frames can make differences too small to correctly estimate the translation,\n    for this reason the reference frame is kept fixed as we progress through the video.\n    Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.\n\n    Parameters\n    ----------\n    bin_size : float\n        Before calculatin the mode, optiocal flow is bucketized into bins of this size.\n    proportion_points_used_threshold: float\n        Proportion of points that must be matched, otherwise the reference frame must be updated.\n    \"\"\"\n\n    def __init__(\n        self, bin_size: float = 0.2, proportion_points_used_threshold: float = 0.9\n    ) -> None:\n        self.bin_size = bin_size\n        self.proportion_points_used_threshold = proportion_points_used_threshold\n        self.data = None\n\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, TranslationTransformation]:\n        # get flow\n        flow = curr_pts - prev_pts\n\n        # get mode\n        flow = np.around(flow / self.bin_size) * self.bin_size\n        unique_flows, counts = np.unique(flow, axis=0, return_counts=True)\n\n        max_index = counts.argmax()\n\n        proportion_points_used = counts[max_index] / len(prev_pts)\n        update_prvs = proportion_points_used < self.proportion_points_used_threshold\n\n        flow_mode = unique_flows[max_index]\n\n        try:\n            flow_mode += self.data\n        except TypeError:\n            pass\n\n        if update_prvs:\n            self.data = flow_mode\n\n        return update_prvs, TranslationTransformation(flow_mode)\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.HomographyTransformation","title":"HomographyTransformation","text":"

    Bases: CoordinatesTransformation

    Coordinate transformation beweent points using an homography

    Parameters:

    Name Type Description Default homography_matrix ndarray

    The matrix representing the homography

    required Source code in norfair/camera_motion.py
    class HomographyTransformation(CoordinatesTransformation):\n    \"\"\"\n    Coordinate transformation beweent points using an homography\n\n    Parameters\n    ----------\n    homography_matrix : np.ndarray\n        The matrix representing the homography\n    \"\"\"\n\n    def __init__(self, homography_matrix: np.ndarray):\n        self.homography_matrix = homography_matrix\n        self.inverse_homography_matrix = np.linalg.inv(homography_matrix)\n\n    def abs_to_rel(self, points: np.ndarray):\n        ones = np.ones((len(points), 1))\n        points_with_ones = np.hstack((points, ones))\n        points_transformed = points_with_ones @ self.homography_matrix.T\n        last_column = points_transformed[:, -1]\n        last_column[last_column == 0] = 0.0000001\n        points_transformed = points_transformed / last_column.reshape(-1, 1)\n        new_points_transformed = points_transformed[:, :2]\n        return new_points_transformed\n\n    def rel_to_abs(self, points: np.ndarray):\n        ones = np.ones((len(points), 1))\n        points_with_ones = np.hstack((points, ones))\n        points_transformed = points_with_ones @ self.inverse_homography_matrix.T\n        last_column = points_transformed[:, -1]\n        last_column[last_column == 0] = 0.0000001\n        points_transformed = points_transformed / last_column.reshape(-1, 1)\n        return points_transformed[:, :2]\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.HomographyTransformationGetter","title":"HomographyTransformationGetter","text":"

    Bases: TransformationGetter

    Calculates HomographyTransformation between points.

    The camera movement is represented as an homography that matches the optical flow between the previous reference frame and the current.

    Comparing consecutive frames can make differences too small to correctly estimate the homography, often resulting in the identity. For this reason the reference frame is kept fixed as we progress through the video. Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.

    Parameters:

    Name Type Description Default method Optional[int]

    One of openCV's method for finding homographies. Valid options are: [0, cv.RANSAC, cv.LMEDS, cv.RHO], by default cv.RANSAC

    None ransac_reproj_threshold int

    Maximum allowed reprojection error to treat a point pair as an inlier. More info in links below.

    3 max_iters int

    The maximum number of RANSAC iterations. More info in links below.

    2000 confidence float

    Confidence level, must be between 0 and 1. More info in links below.

    0.995 proportion_points_used_threshold float

    Proportion of points that must be matched, otherwise the reference frame must be updated.

    0.9 See Also

    opencv.findHomography

    Source code in norfair/camera_motion.py
    class HomographyTransformationGetter(TransformationGetter):\n    \"\"\"\n    Calculates HomographyTransformation between points.\n\n    The camera movement is represented as an homography that matches the optical flow between the previous reference frame\n    and the current.\n\n    Comparing consecutive frames can make differences too small to correctly estimate the homography, often resulting in the identity.\n    For this reason the reference frame is kept fixed as we progress through the video.\n    Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.\n\n    Parameters\n    ----------\n    method : Optional[int], optional\n        One of openCV's method for finding homographies.\n        Valid options are: `[0, cv.RANSAC, cv.LMEDS, cv.RHO]`, by default `cv.RANSAC`\n    ransac_reproj_threshold : int, optional\n        Maximum allowed reprojection error to treat a point pair as an inlier. More info in links below.\n    max_iters : int, optional\n        The maximum number of RANSAC iterations.  More info in links below.\n    confidence : float, optional\n        Confidence level, must be between 0 and 1. More info in links below.\n    proportion_points_used_threshold : float, optional\n        Proportion of points that must be matched, otherwise the reference frame must be updated.\n\n    See Also\n    --------\n    [opencv.findHomography](https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780)\n    \"\"\"\n\n    def __init__(\n        self,\n        method: Optional[int] = None,\n        ransac_reproj_threshold: int = 3,\n        max_iters: int = 2000,\n        confidence: float = 0.995,\n        proportion_points_used_threshold: float = 0.9,\n    ) -> None:\n        self.data = None\n        if method is None:\n            method = cv2.RANSAC\n        self.method = method\n        self.ransac_reproj_threshold = ransac_reproj_threshold\n        self.max_iters = max_iters\n        self.confidence = confidence\n        self.proportion_points_used_threshold = proportion_points_used_threshold\n\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, Optional[HomographyTransformation]]:\n\n        if not (\n            isinstance(prev_pts, np.ndarray)\n            and prev_pts.shape[0] >= 4\n            and isinstance(curr_pts, np.ndarray)\n            and curr_pts.shape[0] >= 4\n        ):\n            warning(\n                \"The homography couldn't be computed in this frame \"\n                \"due to low amount of points\"\n            )\n            if isinstance(self.data, np.ndarray):\n                return True, HomographyTransformation(self.data)\n            else:\n                return True, None\n\n        homography_matrix, points_used = cv2.findHomography(\n            prev_pts,\n            curr_pts,\n            method=self.method,\n            ransacReprojThreshold=self.ransac_reproj_threshold,\n            maxIters=self.max_iters,\n            confidence=self.confidence,\n        )\n\n        proportion_points_used = np.sum(points_used) / len(points_used)\n\n        update_prvs = proportion_points_used < self.proportion_points_used_threshold\n\n        try:\n            homography_matrix = homography_matrix @ self.data\n        except (TypeError, ValueError):\n            pass\n\n        if update_prvs:\n            self.data = homography_matrix\n\n        return update_prvs, HomographyTransformation(homography_matrix)\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.MotionEstimator","title":"MotionEstimator","text":"

    Estimator of the motion of the camera.

    Uses optical flow to estimate the motion of the camera from frame to frame. The optical flow is calculated on a sample of strong points (corners).

    Parameters:

    Name Type Description Default max_points int

    Maximum amount of points sampled. More points make the estimation process slower but more precise

    200 min_distance int

    Minimum distance between the sample points.

    15 block_size int

    Size of an average block when finding the corners. More info in links below.

    3 transformations_getter TransformationGetter

    An instance of TransformationGetter. By default HomographyTransformationGetter

    None draw_flow bool

    Draws the optical flow on the frame for debugging.

    False flow_color Optional[Tuple[int, int, int]]

    Color of the drawing, by default blue.

    None quality_level float

    Parameter characterizing the minimal accepted quality of image corners.

    0.01

    Examples:

    >>> from norfair import Tracker, Video\n>>> from norfair.camera_motion MotionEstimator\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> motion_estimator = MotionEstimator()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    coord_transformation = motion_estimator.update(frame)\n>>>    tracked_objects = tracker.update(detections, coord_transformations=coord_transformation)\n
    See Also

    For more infor on how the points are sampled: OpenCV.goodFeaturesToTrack

    Source code in norfair/camera_motion.py
    class MotionEstimator:\n    \"\"\"\n    Estimator of the motion of the camera.\n\n    Uses optical flow to estimate the motion of the camera from frame to frame.\n    The optical flow is calculated on a sample of strong points (corners).\n\n    Parameters\n    ----------\n    max_points : int, optional\n        Maximum amount of points sampled.\n        More points make the estimation process slower but more precise\n    min_distance : int, optional\n        Minimum distance between the sample points.\n    block_size : int, optional\n        Size of an average block when finding the corners. More info in links below.\n    transformations_getter : TransformationGetter, optional\n        An instance of TransformationGetter. By default [`HomographyTransformationGetter`][norfair.camera_motion.HomographyTransformationGetter]\n    draw_flow : bool, optional\n        Draws the optical flow on the frame for debugging.\n    flow_color : Optional[Tuple[int, int, int]], optional\n        Color of the drawing, by default blue.\n    quality_level : float, optional\n        Parameter characterizing the minimal accepted quality of image corners.\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video\n    >>> from norfair.camera_motion MotionEstimator\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> motion_estimator = MotionEstimator()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    coord_transformation = motion_estimator.update(frame)\n    >>>    tracked_objects = tracker.update(detections, coord_transformations=coord_transformation)\n\n    See Also\n    --------\n    For more infor on how the points are sampled: [OpenCV.goodFeaturesToTrack](https://docs.opencv.org/3.4/dd/d1a/group__imgproc__feature.html#ga1d6bb77486c8f92d79c8793ad995d541)\n    \"\"\"\n\n    def __init__(\n        self,\n        max_points: int = 200,\n        min_distance: int = 15,\n        block_size: int = 3,\n        transformations_getter: TransformationGetter = None,\n        draw_flow: bool = False,\n        flow_color: Optional[Tuple[int, int, int]] = None,\n        quality_level: float = 0.01,\n    ):\n\n        self.max_points = max_points\n        self.min_distance = min_distance\n        self.block_size = block_size\n\n        self.draw_flow = draw_flow\n        if self.draw_flow and flow_color is None:\n            flow_color = [0, 0, 100]\n        self.flow_color = flow_color\n\n        self.gray_prvs = None\n        self.prev_pts = None\n        if transformations_getter is None:\n            transformations_getter = HomographyTransformationGetter()\n\n        self.transformations_getter = transformations_getter\n        self.transformations_getter_copy = copy.deepcopy(transformations_getter)\n\n        self.prev_mask = None\n        self.gray_next = None\n        self.quality_level = quality_level\n\n    def update(\n        self, frame: np.ndarray, mask: np.ndarray = None\n    ) -> Optional[CoordinatesTransformation]:\n        \"\"\"\n        Estimate camera motion for each frame\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The frame.\n        mask : np.ndarray, optional\n            An optional mask to avoid areas of the frame when sampling the corner.\n            Must be an array of shape `(frame.shape[0], frame.shape[1])`, dtype same as frame,\n            and values in {0, 1}.\n\n            In general, the estimation will work best when it samples many points from the background;\n            with that intention, this parameters is usefull for masking out the detections/tracked objects,\n            forcing the MotionEstimator ignore the moving objects.\n            Can be used to mask static areas of the image, such as score overlays in sport transmisions or\n            timestamps in security cameras.\n\n        Returns\n        -------\n        CoordinatesTransformation\n            The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates\n            or vice versa.\n        \"\"\"\n\n        self.gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n        if self.gray_prvs is None:\n            self.gray_prvs = self.gray_next\n            self.prev_mask = mask\n\n        curr_pts, prev_pts = None, None\n        try:\n            curr_pts, prev_pts = _get_sparse_flow(\n                self.gray_next,\n                self.gray_prvs,\n                self.prev_pts,\n                self.max_points,\n                self.min_distance,\n                self.block_size,\n                self.prev_mask,\n                quality_level=self.quality_level,\n            )\n            if self.draw_flow:\n                for (curr, prev) in zip(curr_pts, prev_pts):\n                    c = tuple(curr.astype(int).ravel())\n                    p = tuple(prev.astype(int).ravel())\n                    cv2.line(frame, c, p, self.flow_color, 2)\n                    cv2.circle(frame, c, 3, self.flow_color, -1)\n        except Exception as e:\n            warning(e)\n\n        update_prvs, coord_transformations = True, None\n        try:\n            update_prvs, coord_transformations = self.transformations_getter(\n                curr_pts, prev_pts\n            )\n        except Exception as e:\n            warning(e)\n            del self.transformations_getter\n            self.transformations_getter = copy.deepcopy(\n                self.transformations_getter_copy\n            )\n\n        if update_prvs:\n            self.gray_prvs = self.gray_next\n            self.prev_pts = None\n            self.prev_mask = mask\n        else:\n            self.prev_pts = prev_pts\n\n        return coord_transformations\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.MotionEstimator.update","title":"update(frame, mask=None)","text":"

    Estimate camera motion for each frame

    Parameters:

    Name Type Description Default frame ndarray

    The frame.

    required mask ndarray

    An optional mask to avoid areas of the frame when sampling the corner. Must be an array of shape (frame.shape[0], frame.shape[1]), dtype same as frame, and values in {0, 1}.

    In general, the estimation will work best when it samples many points from the background; with that intention, this parameters is usefull for masking out the detections/tracked objects, forcing the MotionEstimator ignore the moving objects. Can be used to mask static areas of the image, such as score overlays in sport transmisions or timestamps in security cameras.

    None

    Returns:

    Type Description CoordinatesTransformation

    The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates or vice versa.

    Source code in norfair/camera_motion.py
    def update(\n    self, frame: np.ndarray, mask: np.ndarray = None\n) -> Optional[CoordinatesTransformation]:\n    \"\"\"\n    Estimate camera motion for each frame\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The frame.\n    mask : np.ndarray, optional\n        An optional mask to avoid areas of the frame when sampling the corner.\n        Must be an array of shape `(frame.shape[0], frame.shape[1])`, dtype same as frame,\n        and values in {0, 1}.\n\n        In general, the estimation will work best when it samples many points from the background;\n        with that intention, this parameters is usefull for masking out the detections/tracked objects,\n        forcing the MotionEstimator ignore the moving objects.\n        Can be used to mask static areas of the image, such as score overlays in sport transmisions or\n        timestamps in security cameras.\n\n    Returns\n    -------\n    CoordinatesTransformation\n        The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates\n        or vice versa.\n    \"\"\"\n\n    self.gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n    if self.gray_prvs is None:\n        self.gray_prvs = self.gray_next\n        self.prev_mask = mask\n\n    curr_pts, prev_pts = None, None\n    try:\n        curr_pts, prev_pts = _get_sparse_flow(\n            self.gray_next,\n            self.gray_prvs,\n            self.prev_pts,\n            self.max_points,\n            self.min_distance,\n            self.block_size,\n            self.prev_mask,\n            quality_level=self.quality_level,\n        )\n        if self.draw_flow:\n            for (curr, prev) in zip(curr_pts, prev_pts):\n                c = tuple(curr.astype(int).ravel())\n                p = tuple(prev.astype(int).ravel())\n                cv2.line(frame, c, p, self.flow_color, 2)\n                cv2.circle(frame, c, 3, self.flow_color, -1)\n    except Exception as e:\n        warning(e)\n\n    update_prvs, coord_transformations = True, None\n    try:\n        update_prvs, coord_transformations = self.transformations_getter(\n            curr_pts, prev_pts\n        )\n    except Exception as e:\n        warning(e)\n        del self.transformations_getter\n        self.transformations_getter = copy.deepcopy(\n            self.transformations_getter_copy\n        )\n\n    if update_prvs:\n        self.gray_prvs = self.gray_next\n        self.prev_pts = None\n        self.prev_mask = mask\n    else:\n        self.prev_pts = prev_pts\n\n    return coord_transformations\n
    "},{"location":"reference/distances/","title":"Distances","text":"

    Predefined distances

    "},{"location":"reference/distances/#norfair.distances.Distance","title":"Distance","text":"

    Bases: ABC

    Abstract class representing a distance.

    Subclasses must implement the method get_distances

    Source code in norfair/distances.py
    class Distance(ABC):\n    \"\"\"\n    Abstract class representing a distance.\n\n    Subclasses must implement the method `get_distances`\n    \"\"\"\n\n    @abstractmethod\n    def get_distances(\n        self,\n        objects: Sequence[\"TrackedObject\"],\n        candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n    ) -> np.ndarray:\n        \"\"\"\n        Method that calculates the distances between new candidates and objects.\n\n        Parameters\n        ----------\n        objects : Sequence[TrackedObject]\n            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n            candidates.\n        candidates : Union[List[Detection], List[TrackedObject]], optional\n            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n        Returns\n        -------\n        np.ndarray\n            A matrix containing the distances between objects and candidates.\n        \"\"\"\n
    "},{"location":"reference/distances/#norfair.distances.Distance.get_distances","title":"get_distances(objects, candidates) abstractmethod","text":"

    Method that calculates the distances between new candidates and objects.

    Parameters:

    Name Type Description Default objects Sequence[TrackedObject]

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject candidates.

    required candidates Union[List[Detection], List[TrackedObject]]

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    required

    Returns:

    Type Description ndarray

    A matrix containing the distances between objects and candidates.

    Source code in norfair/distances.py
    @abstractmethod\ndef get_distances(\n    self,\n    objects: Sequence[\"TrackedObject\"],\n    candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n) -> np.ndarray:\n    \"\"\"\n    Method that calculates the distances between new candidates and objects.\n\n    Parameters\n    ----------\n    objects : Sequence[TrackedObject]\n        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n        candidates.\n    candidates : Union[List[Detection], List[TrackedObject]], optional\n        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n    Returns\n    -------\n    np.ndarray\n        A matrix containing the distances between objects and candidates.\n    \"\"\"\n
    "},{"location":"reference/distances/#norfair.distances.ScalarDistance","title":"ScalarDistance","text":"

    Bases: Distance

    ScalarDistance class represents a distance that is calculated pointwise.

    Parameters:

    Name Type Description Default distance_function Union[Callable[[Detection, TrackedObject], float], Callable[[TrackedObject, TrackedObject], float]]

    Distance function used to determine the pointwise distance between new candidates and objects. This function should take 2 input arguments, the first being a Union[Detection, TrackedObject], and the second TrackedObject. It has to return a float with the distance it calculates.

    required Source code in norfair/distances.py
    class ScalarDistance(Distance):\n    \"\"\"\n    ScalarDistance class represents a distance that is calculated pointwise.\n\n    Parameters\n    ----------\n    distance_function : Union[Callable[[\"Detection\", \"TrackedObject\"], float], Callable[[\"TrackedObject\", \"TrackedObject\"], float]]\n        Distance function used to determine the pointwise distance between new candidates and objects.\n        This function should take 2 input arguments, the first being a `Union[Detection, TrackedObject]`,\n        and the second [TrackedObject][norfair.tracker.TrackedObject]. It has to return a `float` with the distance it calculates.\n    \"\"\"\n\n    def __init__(\n        self,\n        distance_function: Union[\n            Callable[[\"Detection\", \"TrackedObject\"], float],\n            Callable[[\"TrackedObject\", \"TrackedObject\"], float],\n        ],\n    ):\n        self.distance_function = distance_function\n\n    def get_distances(\n        self,\n        objects: Sequence[\"TrackedObject\"],\n        candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n    ) -> np.ndarray:\n        \"\"\"\n        Method that calculates the distances between new candidates and objects.\n\n        Parameters\n        ----------\n        objects : Sequence[TrackedObject]\n            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n            candidates.\n        candidates : Union[List[Detection], List[TrackedObject]], optional\n            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n        Returns\n        -------\n        np.ndarray\n            A matrix containing the distances between objects and candidates.\n        \"\"\"\n        distance_matrix = np.full(\n            (len(candidates), len(objects)),\n            fill_value=np.inf,\n            dtype=np.float32,\n        )\n        if not objects or not candidates:\n            return distance_matrix\n        for c, candidate in enumerate(candidates):\n            for o, obj in enumerate(objects):\n                if candidate.label != obj.label:\n                    if (candidate.label is None) or (obj.label is None):\n                        print(\"\\nThere are detections with and without label!\")\n                    continue\n                distance = self.distance_function(candidate, obj)\n                distance_matrix[c, o] = distance\n        return distance_matrix\n
    "},{"location":"reference/distances/#norfair.distances.ScalarDistance.get_distances","title":"get_distances(objects, candidates)","text":"

    Method that calculates the distances between new candidates and objects.

    Parameters:

    Name Type Description Default objects Sequence[TrackedObject]

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject candidates.

    required candidates Union[List[Detection], List[TrackedObject]]

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    required

    Returns:

    Type Description ndarray

    A matrix containing the distances between objects and candidates.

    Source code in norfair/distances.py
    def get_distances(\n    self,\n    objects: Sequence[\"TrackedObject\"],\n    candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n) -> np.ndarray:\n    \"\"\"\n    Method that calculates the distances between new candidates and objects.\n\n    Parameters\n    ----------\n    objects : Sequence[TrackedObject]\n        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n        candidates.\n    candidates : Union[List[Detection], List[TrackedObject]], optional\n        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n    Returns\n    -------\n    np.ndarray\n        A matrix containing the distances between objects and candidates.\n    \"\"\"\n    distance_matrix = np.full(\n        (len(candidates), len(objects)),\n        fill_value=np.inf,\n        dtype=np.float32,\n    )\n    if not objects or not candidates:\n        return distance_matrix\n    for c, candidate in enumerate(candidates):\n        for o, obj in enumerate(objects):\n            if candidate.label != obj.label:\n                if (candidate.label is None) or (obj.label is None):\n                    print(\"\\nThere are detections with and without label!\")\n                continue\n            distance = self.distance_function(candidate, obj)\n            distance_matrix[c, o] = distance\n    return distance_matrix\n
    "},{"location":"reference/distances/#norfair.distances.VectorizedDistance","title":"VectorizedDistance","text":"

    Bases: Distance

    VectorizedDistance class represents a distance that is calculated in a vectorized way. This means that instead of going through every pair and explicitly calculating its distance, VectorizedDistance uses the entire vectors to compare to each other in a single operation.

    Parameters:

    Name Type Description Default distance_function Callable[[ndarray, ndarray], ndarray]

    Distance function used to determine the distances between new candidates and objects. This function should take 2 input arguments, the first being a np.ndarray and the second np.ndarray. It has to return a np.ndarray with the distance matrix it calculates.

    required Source code in norfair/distances.py
    class VectorizedDistance(Distance):\n    \"\"\"\n    VectorizedDistance class represents a distance that is calculated in a vectorized way. This means\n    that instead of going through every pair and explicitly calculating its distance, VectorizedDistance\n    uses the entire vectors to compare to each other in a single operation.\n\n    Parameters\n    ----------\n    distance_function : Callable[[np.ndarray, np.ndarray], np.ndarray]\n        Distance function used to determine the distances between new candidates and objects.\n        This function should take 2 input arguments, the first being a `np.ndarray` and the second\n        `np.ndarray`. It has to return a `np.ndarray` with the distance matrix it calculates.\n    \"\"\"\n\n    def __init__(\n        self,\n        distance_function: Callable[[np.ndarray, np.ndarray], np.ndarray],\n    ):\n        self.distance_function = distance_function\n\n    def get_distances(\n        self,\n        objects: Sequence[\"TrackedObject\"],\n        candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n    ) -> np.ndarray:\n        \"\"\"\n        Method that calculates the distances between new candidates and objects.\n\n        Parameters\n        ----------\n        objects : Sequence[TrackedObject]\n            Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n            candidates.\n        candidates : Union[List[Detection], List[TrackedObject]], optional\n            List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n        Returns\n        -------\n        np.ndarray\n            A matrix containing the distances between objects and candidates.\n        \"\"\"\n        distance_matrix = np.full(\n            (len(candidates), len(objects)),\n            fill_value=np.inf,\n            dtype=np.float32,\n        )\n        if not objects or not candidates:\n            return distance_matrix\n\n        object_labels = np.array([o.label for o in objects]).astype(str)\n        candidate_labels = np.array([c.label for c in candidates]).astype(str)\n\n        # iterate over labels that are present both in objects and detections\n        for label in np.intersect1d(\n            np.unique(object_labels), np.unique(candidate_labels)\n        ):\n            # generate masks of the subset of object and detections for this label\n            obj_mask = object_labels == label\n            cand_mask = candidate_labels == label\n\n            stacked_objects = []\n            for o in objects:\n                if str(o.label) == label:\n                    stacked_objects.append(o.estimate.ravel())\n            stacked_objects = np.stack(stacked_objects)\n\n            stacked_candidates = []\n            for c in candidates:\n                if str(c.label) == label:\n                    if \"Detection\" in str(type(c)):\n                        stacked_candidates.append(c.points.ravel())\n                    else:\n                        stacked_candidates.append(c.estimate.ravel())\n            stacked_candidates = np.stack(stacked_candidates)\n\n            # calculate the pairwise distances between objects and candidates with this label\n            # and assign the result to the correct positions inside distance_matrix\n            distance_matrix[np.ix_(cand_mask, obj_mask)] = self._compute_distance(\n                stacked_candidates, stacked_objects\n            )\n\n        return distance_matrix\n\n    def _compute_distance(\n        self, stacked_candidates: np.ndarray, stacked_objects: np.ndarray\n    ) -> np.ndarray:\n        \"\"\"\n        Method that computes the pairwise distances between new candidates and objects.\n        It is intended to use the entire vectors to compare to each other in a single operation.\n\n        Parameters\n        ----------\n        stacked_candidates : np.ndarray\n            np.ndarray containing a stack of candidates to be compared with the stacked_objects.\n        stacked_objects : np.ndarray\n            np.ndarray containing a stack of objects to be compared with the stacked_objects.\n\n        Returns\n        -------\n        np.ndarray\n            A matrix containing the distances between objects and candidates.\n        \"\"\"\n        return self.distance_function(stacked_candidates, stacked_objects)\n
    "},{"location":"reference/distances/#norfair.distances.VectorizedDistance.get_distances","title":"get_distances(objects, candidates)","text":"

    Method that calculates the distances between new candidates and objects.

    Parameters:

    Name Type Description Default objects Sequence[TrackedObject]

    Sequence of TrackedObject to be compared with potential Detection or TrackedObject candidates.

    required candidates Union[List[Detection], List[TrackedObject]]

    List of candidates (Detection or TrackedObject) to be compared to TrackedObject.

    required

    Returns:

    Type Description ndarray

    A matrix containing the distances between objects and candidates.

    Source code in norfair/distances.py
    def get_distances(\n    self,\n    objects: Sequence[\"TrackedObject\"],\n    candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n) -> np.ndarray:\n    \"\"\"\n    Method that calculates the distances between new candidates and objects.\n\n    Parameters\n    ----------\n    objects : Sequence[TrackedObject]\n        Sequence of [TrackedObject][norfair.tracker.TrackedObject] to be compared with potential [Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]\n        candidates.\n    candidates : Union[List[Detection], List[TrackedObject]], optional\n        List of candidates ([Detection][norfair.tracker.Detection] or [TrackedObject][norfair.tracker.TrackedObject]) to be compared to [TrackedObject][norfair.tracker.TrackedObject].\n\n    Returns\n    -------\n    np.ndarray\n        A matrix containing the distances between objects and candidates.\n    \"\"\"\n    distance_matrix = np.full(\n        (len(candidates), len(objects)),\n        fill_value=np.inf,\n        dtype=np.float32,\n    )\n    if not objects or not candidates:\n        return distance_matrix\n\n    object_labels = np.array([o.label for o in objects]).astype(str)\n    candidate_labels = np.array([c.label for c in candidates]).astype(str)\n\n    # iterate over labels that are present both in objects and detections\n    for label in np.intersect1d(\n        np.unique(object_labels), np.unique(candidate_labels)\n    ):\n        # generate masks of the subset of object and detections for this label\n        obj_mask = object_labels == label\n        cand_mask = candidate_labels == label\n\n        stacked_objects = []\n        for o in objects:\n            if str(o.label) == label:\n                stacked_objects.append(o.estimate.ravel())\n        stacked_objects = np.stack(stacked_objects)\n\n        stacked_candidates = []\n        for c in candidates:\n            if str(c.label) == label:\n                if \"Detection\" in str(type(c)):\n                    stacked_candidates.append(c.points.ravel())\n                else:\n                    stacked_candidates.append(c.estimate.ravel())\n        stacked_candidates = np.stack(stacked_candidates)\n\n        # calculate the pairwise distances between objects and candidates with this label\n        # and assign the result to the correct positions inside distance_matrix\n        distance_matrix[np.ix_(cand_mask, obj_mask)] = self._compute_distance(\n            stacked_candidates, stacked_objects\n        )\n\n    return distance_matrix\n
    "},{"location":"reference/distances/#norfair.distances.ScipyDistance","title":"ScipyDistance","text":"

    Bases: VectorizedDistance

    ScipyDistance class extends VectorizedDistance for the use of Scipy's vectorized distances.

    This class uses scipy.spatial.distance.cdist to calculate distances between two np.ndarray.

    Parameters:

    Name Type Description Default metric str

    Defines the specific Scipy metric to use to calculate the pairwise distances between new candidates and objects.

    'euclidean' Other required See Also

    scipy.spatial.distance.cdist

    Source code in norfair/distances.py
    class ScipyDistance(VectorizedDistance):\n    \"\"\"\n    ScipyDistance class extends VectorizedDistance for the use of Scipy's vectorized distances.\n\n    This class uses `scipy.spatial.distance.cdist` to calculate distances between two `np.ndarray`.\n\n    Parameters\n    ----------\n    metric : str, optional\n        Defines the specific Scipy metric to use to calculate the pairwise distances between\n        new candidates and objects.\n\n    Other kwargs are passed through to cdist\n\n    See Also\n    --------\n    [`scipy.spatial.distance.cdist`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)\n    \"\"\"\n\n    def __init__(self, metric: str = \"euclidean\", **kwargs):\n        self.metric = metric\n        super().__init__(distance_function=partial(cdist, metric=self.metric, **kwargs))\n
    "},{"location":"reference/distances/#norfair.distances.frobenius","title":"frobenius(detection, tracked_object)","text":"

    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

    The Frobenius distance and norm are given by:

    \\[ d_f(a, b) = ||a - b||_F \\] \\[ ||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def frobenius(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.\n\n    The Frobenius distance and norm are given by:\n\n    $$\n    d_f(a, b) = ||a - b||_F\n    $$\n\n    $$\n    ||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate)\n
    "},{"location":"reference/distances/#norfair.distances.mean_euclidean","title":"mean_euclidean(detection, tracked_object)","text":"

    Average euclidean distance between the points in detection and estimates in tracked_object.

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_2}{N} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_euclidean(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average euclidean distance between the points in detection and estimates in tracked_object.\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_2}{N}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate, axis=1).mean()\n
    "},{"location":"reference/distances/#norfair.distances.mean_manhattan","title":"mean_manhattan(detection, tracked_object)","text":"

    Average manhattan distance between the points in detection and the estimates in tracked_object

    Given by:

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_1}{N} \\]

    Where \\(||a||_1\\) is the manhattan norm.

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    a tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_manhattan(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average manhattan distance between the points in detection and the estimates in tracked_object\n\n    Given by:\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_1}{N}\n    $$\n\n    Where $||a||_1$ is the manhattan norm.\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        a tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(\n        detection.points - tracked_object.estimate, ord=1, axis=1\n    ).mean()\n
    "},{"location":"reference/distances/#norfair.distances.iou","title":"iou(candidates, objects)","text":"

    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected to be in [x_min, y_min, x_max, y_max] format.

    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap, to transform that into a distance that makes sense we return 1 - iou.

    Parameters:

    Name Type Description Default candidates ndarray

    (N, 4) numpy.ndarray containing candidates bounding boxes.

    required objects ndarray

    (K, 4) numpy.ndarray containing objects bounding boxes.

    required

    Returns:

    Type Description ndarray

    (N, K) numpy.ndarray of 1 - iou between candidates and objects.

    Source code in norfair/distances.py
    def iou(candidates: np.ndarray, objects: np.ndarray) -> np.ndarray:\n    \"\"\"\n    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected\n    to be in `[x_min, y_min, x_max, y_max]` format.\n\n    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,\n    to transform that into a distance that makes sense we return `1 - iou`.\n\n    Parameters\n    ----------\n    candidates : numpy.ndarray\n        (N, 4) numpy.ndarray containing candidates bounding boxes.\n    objects : numpy.ndarray\n        (K, 4) numpy.ndarray containing objects bounding boxes.\n\n    Returns\n    -------\n    numpy.ndarray\n        (N, K) numpy.ndarray of `1 - iou` between candidates and objects.\n    \"\"\"\n    _validate_bboxes(candidates)\n\n    area_candidates = _boxes_area(candidates.T)\n    area_objects = _boxes_area(objects.T)\n\n    top_left = np.maximum(candidates[:, None, :2], objects[:, :2])\n    bottom_right = np.minimum(candidates[:, None, 2:], objects[:, 2:])\n\n    area_intersection = np.prod(\n        np.clip(bottom_right - top_left, a_min=0, a_max=None), 2\n    )\n    return 1 - area_intersection / (\n        area_candidates[:, None] + area_objects - area_intersection\n    )\n
    "},{"location":"reference/distances/#norfair.distances.get_distance_by_name","title":"get_distance_by_name(name)","text":"

    Select a distance by name.

    Parameters:

    Name Type Description Default name str

    A string defining the metric to get.

    required

    Returns:

    Type Description Distance

    The distance object.

    Source code in norfair/distances.py
    def get_distance_by_name(name: str) -> Distance:\n    \"\"\"\n    Select a distance by name.\n\n    Parameters\n    ----------\n    name : str\n        A string defining the metric to get.\n\n    Returns\n    -------\n    Distance\n        The distance object.\n    \"\"\"\n\n    if name in _SCALAR_DISTANCE_FUNCTIONS:\n        warning(\n            \"You are using a scalar distance function. If you want to speed up the\"\n            \" tracking process please consider using a vectorized distance function\"\n            f\" such as {AVAILABLE_VECTORIZED_DISTANCES}.\"\n        )\n        distance = _SCALAR_DISTANCE_FUNCTIONS[name]\n        distance_function = ScalarDistance(distance)\n    elif name in _SCIPY_DISTANCE_FUNCTIONS:\n        distance_function = ScipyDistance(name)\n    elif name in _VECTORIZED_DISTANCE_FUNCTIONS:\n        if name == \"iou_opt\":\n            warning(\"iou_opt is deprecated, use iou instead\")\n        distance = _VECTORIZED_DISTANCE_FUNCTIONS[name]\n        distance_function = VectorizedDistance(distance)\n    else:\n        raise ValueError(\n            f\"Invalid distance '{name}', expecting one of\"\n            f\" {list(_SCALAR_DISTANCE_FUNCTIONS.keys()) + AVAILABLE_VECTORIZED_DISTANCES}\"\n        )\n\n    return distance_function\n
    "},{"location":"reference/distances/#norfair.distances.create_keypoints_voting_distance","title":"create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold)","text":"

    Construct a keypoint voting distance function configured with the thresholds.

    Count how many points in a detection match the with a tracked_object. A match is considered when distance between the points is < keypoint_distance_threshold and the score of the last_detection of the tracked_object is > detection_threshold. Notice the if multiple points are tracked, the ith point in detection can only match the ith point in the tracked object.

    Distance is 1 if no point matches and approximates 0 as more points are matched.

    Parameters:

    Name Type Description Default keypoint_distance_threshold float

    Points closer than this threshold are considered a match.

    required detection_threshold float

    Detections and objects with score lower than this threshold are ignored.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_keypoints_voting_distance(\n    keypoint_distance_threshold: float, detection_threshold: float\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a keypoint voting distance function configured with the thresholds.\n\n    Count how many points in a detection match the with a tracked_object.\n    A match is considered when distance between the points is < `keypoint_distance_threshold`\n    and the score of the last_detection of the tracked_object is > `detection_threshold`.\n    Notice the if multiple points are tracked, the ith point in detection can only match the ith\n    point in the tracked object.\n\n    Distance is 1 if no point matches and approximates 0 as more points are matched.\n\n    Parameters\n    ----------\n    keypoint_distance_threshold: float\n        Points closer than this threshold are considered a match.\n    detection_threshold: float\n        Detections and objects with score lower than this threshold are ignored.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def keypoints_voting_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        distances = np.linalg.norm(detection.points - tracked_object.estimate, axis=1)\n        match_num = np.count_nonzero(\n            (distances < keypoint_distance_threshold)\n            * (detection.scores > detection_threshold)\n            * (tracked_object.last_detection.scores > detection_threshold)\n        )\n        return 1 / (1 + match_num)\n\n    return keypoints_voting_distance\n
    "},{"location":"reference/distances/#norfair.distances.create_normalized_mean_euclidean_distance","title":"create_normalized_mean_euclidean_distance(height, width)","text":"

    Construct a normalized mean euclidean distance function configured with the max height and width.

    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

    Parameters:

    Name Type Description Default height int

    Height of the image.

    required width int

    Width of the image.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_normalized_mean_euclidean_distance(\n    height: int, width: int\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a normalized mean euclidean distance function configured with the max height and width.\n\n    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.\n\n    Parameters\n    ----------\n    height: int\n        Height of the image.\n    width: int\n        Width of the image.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def normalized__mean_euclidean_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        \"\"\"Normalized mean euclidean distance\"\"\"\n        # calculate distances and normalized it by width and height\n        difference = (detection.points - tracked_object.estimate).astype(float)\n        difference[:, 0] /= width\n        difference[:, 1] /= height\n\n        # calculate eucledean distance and average\n        return np.linalg.norm(difference, axis=1).mean()\n\n    return normalized__mean_euclidean_distance\n
    "},{"location":"reference/drawing/","title":"Drawing","text":"

    Collection of drawing functions

    "},{"location":"reference/drawing/#norfair.drawing.draw_points","title":"draw_points","text":""},{"location":"reference/drawing/#norfair.drawing.draw_points.draw_points","title":"draw_points(frame, drawables=None, radius=None, thickness=None, color='by_id', color_by_label=None, draw_labels=True, text_size=None, draw_ids=True, draw_points=True, text_thickness=None, text_color=None, hide_dead_points=True, detections=None, label_size=None, draw_scores=False)","text":"

    Draw the points included in a list of Detections or TrackedObjects.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on. Modified in place.

    required drawables Union[Sequence[Detection], Sequence[TrackedObject]]

    List of objects to draw, Detections and TrackedObjects are accepted.

    None radius Optional[int]

    Radius of the circles representing each point. By default a sensible value is picked considering the frame size.

    None thickness Optional[int]

    Thickness or width of the line.

    None color ColorLike

    This parameter can take:

    1. A color as a tuple of ints describing the BGR (0, 0, 255)
    2. A 6-digit hex string \"#FF0000\"
    3. One of the defined color names \"red\"
    4. A string defining the strategy to choose colors from the Palette:

      1. based on the id of the objects \"by_id\"
      2. based on the label of the objects \"by_label\"
      3. random choice \"random\"

    If using by_id or by_label strategy but your objects don't have that field defined (Detections never have ids) the selected color will be the same for all objects (Palette's default Color).

    'by_id' color_by_label bool

    Deprecated. set color=\"by_label\".

    None draw_labels bool

    If set to True, the label is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    True draw_scores bool

    If set to True, the score is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False text_size Optional[int]

    Size of the title, the value is used as a multiplier of the base size of the font. By default the size is scaled automatically based on the frame size.

    None draw_ids bool

    If set to True, the id is added to a title that is drawn on top of the box. If an object doesn't have an id this parameter is ignored.

    True draw_points bool

    Set to False to hide the points and just draw the text.

    True text_thickness Optional[int]

    Thickness of the font. By default it's scaled with the text_size.

    None text_color Optional[ColorLike]

    Color of the text. By default the same color as the box is used.

    None hide_dead_points bool

    Set this param to False to always draw all points, even the ones considered \"dead\". A point is \"dead\" when the corresponding value of TrackedObject.live_points is set to False. If all objects are dead the object is not drawn. All points of a detection are considered to be alive.

    True detections Sequence[Detection]

    Deprecated. use drawables.

    None label_size Optional[int]

    Deprecated. text_size.

    None

    Returns:

    Type Description ndarray

    The resulting frame.

    Source code in norfair/drawing/draw_points.py
    def draw_points(\n    frame: np.ndarray,\n    drawables: Union[Sequence[Detection], Sequence[TrackedObject]] = None,\n    radius: Optional[int] = None,\n    thickness: Optional[int] = None,\n    color: ColorLike = \"by_id\",\n    color_by_label: bool = None,  # deprecated\n    draw_labels: bool = True,\n    text_size: Optional[int] = None,\n    draw_ids: bool = True,\n    draw_points: bool = True,  # pylint: disable=redefined-outer-name\n    text_thickness: Optional[int] = None,\n    text_color: Optional[ColorLike] = None,\n    hide_dead_points: bool = True,\n    detections: Sequence[\"Detection\"] = None,  # deprecated\n    label_size: Optional[int] = None,  # deprecated\n    draw_scores: bool = False,\n) -> np.ndarray:\n    \"\"\"\n    Draw the points included in a list of Detections or TrackedObjects.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on. Modified in place.\n    drawables : Union[Sequence[Detection], Sequence[TrackedObject]], optional\n        List of objects to draw, Detections and TrackedObjects are accepted.\n    radius : Optional[int], optional\n        Radius of the circles representing each point.\n        By default a sensible value is picked considering the frame size.\n    thickness : Optional[int], optional\n        Thickness or width of the line.\n    color : ColorLike, optional\n        This parameter can take:\n\n        1. A color as a tuple of ints describing the BGR `(0, 0, 255)`\n        2. A 6-digit hex string `\"#FF0000\"`\n        3. One of the defined color names `\"red\"`\n        4. A string defining the strategy to choose colors from the Palette:\n\n            1. based on the id of the objects `\"by_id\"`\n            2. based on the label of the objects `\"by_label\"`\n            3. random choice `\"random\"`\n\n        If using `by_id` or `by_label` strategy but your objects don't\n        have that field defined (Detections never have ids) the\n        selected color will be the same for all objects (Palette's default Color).\n    color_by_label : bool, optional\n        **Deprecated**. set `color=\"by_label\"`.\n    draw_labels : bool, optional\n        If set to True, the label is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    draw_scores : bool, optional\n        If set to True, the score is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    text_size : Optional[int], optional\n        Size of the title, the value is used as a multiplier of the base size of the font.\n        By default the size is scaled automatically based on the frame size.\n    draw_ids : bool, optional\n        If set to True, the id is added to a title that is drawn on top of the box.\n        If an object doesn't have an id this parameter is ignored.\n    draw_points : bool, optional\n        Set to False to hide the points and just draw the text.\n    text_thickness : Optional[int], optional\n        Thickness of the font. By default it's scaled with the `text_size`.\n    text_color : Optional[ColorLike], optional\n        Color of the text. By default the same color as the box is used.\n    hide_dead_points : bool, optional\n        Set this param to False to always draw all points, even the ones considered \"dead\".\n        A point is \"dead\" when the corresponding value of `TrackedObject.live_points`\n        is set to False. If all objects are dead the object is not drawn.\n        All points of a detection are considered to be alive.\n    detections : Sequence[Detection], optional\n        **Deprecated**. use drawables.\n    label_size : Optional[int], optional\n        **Deprecated**. text_size.\n\n    Returns\n    -------\n    np.ndarray\n        The resulting frame.\n    \"\"\"\n    #\n    # handle deprecated parameters\n    #\n    if color_by_label is not None:\n        warn_once(\n            'Parameter \"color_by_label\" on function draw_points is deprecated, set `color=\"by_label\"` instead'\n        )\n        color = \"by_label\"\n    if detections is not None:\n        warn_once(\n            \"Parameter 'detections' on function draw_points is deprecated, use 'drawables' instead\"\n        )\n        drawables = detections\n    if label_size is not None:\n        warn_once(\n            \"Parameter 'label_size' on function draw_points is deprecated, use 'text_size' instead\"\n        )\n        text_size = label_size\n    # end\n\n    if drawables is None:\n        return\n\n    if text_color is not None:\n        text_color = parse_color(text_color)\n\n    if color is None:\n        color = \"by_id\"\n    if thickness is None:\n        thickness = -1\n    if radius is None:\n        radius = int(round(max(max(frame.shape) * 0.002, 1)))\n\n    for o in drawables:\n        if not isinstance(o, Drawable):\n            d = Drawable(o)\n        else:\n            d = o\n\n        if hide_dead_points and not d.live_points.any():\n            continue\n\n        if color == \"by_id\":\n            obj_color = Palette.choose_color(d.id)\n        elif color == \"by_label\":\n            obj_color = Palette.choose_color(d.label)\n        elif color == \"random\":\n            obj_color = Palette.choose_color(np.random.rand())\n        else:\n            obj_color = parse_color(color)\n\n        if text_color is None:\n            obj_text_color = obj_color\n        else:\n            obj_text_color = text_color\n\n        if draw_points:\n            for point, live in zip(d.points, d.live_points):\n                if live or not hide_dead_points:\n                    Drawer.circle(\n                        frame,\n                        tuple(point.astype(int)),\n                        radius=radius,\n                        color=obj_color,\n                        thickness=thickness,\n                    )\n\n        if draw_labels or draw_ids or draw_scores:\n            position = d.points[d.live_points].mean(axis=0)\n            position -= radius\n            text = _build_text(\n                d, draw_labels=draw_labels, draw_ids=draw_ids, draw_scores=draw_scores\n            )\n\n            Drawer.text(\n                frame,\n                text,\n                tuple(position.astype(int)),\n                size=text_size,\n                color=obj_text_color,\n                thickness=text_thickness,\n            )\n\n    return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_points.draw_tracked_objects","title":"draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None)","text":"

    Deprecated use draw_points

    Source code in norfair/drawing/draw_points.py
    def draw_tracked_objects(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    radius: Optional[int] = None,\n    color: Optional[ColorLike] = None,\n    id_size: Optional[float] = None,\n    id_thickness: Optional[int] = None,\n    draw_points: bool = True,  # pylint: disable=redefined-outer-name\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n):\n    \"\"\"\n    **Deprecated** use [`draw_points`][norfair.drawing.draw_points.draw_points]\n    \"\"\"\n    warn_once(\"draw_tracked_objects is deprecated, use draw_points instead\")\n\n    frame_scale = frame.shape[0] / 100\n    if radius is None:\n        radius = int(frame_scale * 0.5)\n    if id_size is None:\n        id_size = frame_scale / 10\n    if id_thickness is None:\n        id_thickness = int(frame_scale / 5)\n    if label_size is None:\n        label_size = int(max(frame_scale / 100, 1))\n\n    _draw_points_alias(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else color,\n        radius=radius,\n        thickness=None,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_points=draw_points,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness,\n        text_color=None,\n        hide_dead_points=True,\n    )\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_boxes","title":"draw_boxes","text":""},{"location":"reference/drawing/#norfair.drawing.draw_boxes.draw_boxes","title":"draw_boxes(frame, drawables=None, color='by_id', thickness=None, random_color=None, color_by_label=None, draw_labels=False, text_size=None, draw_ids=True, text_color=None, text_thickness=None, draw_box=True, detections=None, line_color=None, line_width=None, label_size=None, draw_scores=False)","text":"

    Draw bounding boxes corresponding to Detections or TrackedObjects.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on. Modified in place.

    required drawables Union[Sequence[Detection], Sequence[TrackedObject]]

    List of objects to draw, Detections and TrackedObjects are accepted. This objects are assumed to contain 2 bi-dimensional points defining the bounding box as [[x0, y0], [x1, y1]].

    None color ColorLike

    This parameter can take:

    1. A color as a tuple of ints describing the BGR (0, 0, 255)
    2. A 6-digit hex string \"#FF0000\"
    3. One of the defined color names \"red\"
    4. A string defining the strategy to choose colors from the Palette:

      1. based on the id of the objects \"by_id\"
      2. based on the label of the objects \"by_label\"
      3. random choice \"random\"

    If using by_id or by_label strategy but your objects don't have that field defined (Detections never have ids) the selected color will be the same for all objects (Palette's default Color).

    'by_id' thickness Optional[int]

    Thickness or width of the line.

    None random_color bool

    Deprecated. Set color=\"random\".

    None color_by_label bool

    Deprecated. Set color=\"by_label\".

    None draw_labels bool

    If set to True, the label is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False draw_scores bool

    If set to True, the score is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False text_size Optional[float]

    Size of the title, the value is used as a multiplier of the base size of the font. By default the size is scaled automatically based on the frame size.

    None draw_ids bool

    If set to True, the id is added to a title that is drawn on top of the box. If an object doesn't have an id this parameter is ignored.

    True text_color Optional[ColorLike]

    Color of the text. By default the same color as the box is used.

    None text_thickness Optional[int]

    Thickness of the font. By default it's scaled with the text_size.

    None draw_box bool

    Set to False to hide the box and just draw the text.

    True detections Sequence[Detection]

    Deprecated. Use drawables.

    None line_color Optional[ColorLike]

    Deprecated. Use color.

    None line_width Optional[int]

    Deprecated. Use thickness.

    None label_size Optional[int]

    Deprecated. Use text_size.

    None

    Returns:

    Type Description ndarray

    The resulting frame.

    Source code in norfair/drawing/draw_boxes.py
    def draw_boxes(\n    frame: np.ndarray,\n    drawables: Union[Sequence[Detection], Sequence[TrackedObject]] = None,\n    color: ColorLike = \"by_id\",\n    thickness: Optional[int] = None,\n    random_color: bool = None,  # Deprecated\n    color_by_label: bool = None,  # Deprecated\n    draw_labels: bool = False,\n    text_size: Optional[float] = None,\n    draw_ids: bool = True,\n    text_color: Optional[ColorLike] = None,\n    text_thickness: Optional[int] = None,\n    draw_box: bool = True,\n    detections: Sequence[\"Detection\"] = None,  # Deprecated\n    line_color: Optional[ColorLike] = None,  # Deprecated\n    line_width: Optional[int] = None,  # Deprecated\n    label_size: Optional[int] = None,  # Deprecated\u00b4\n    draw_scores: bool = False,\n) -> np.ndarray:\n    \"\"\"\n    Draw bounding boxes corresponding to Detections or TrackedObjects.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on. Modified in place.\n    drawables : Union[Sequence[Detection], Sequence[TrackedObject]], optional\n        List of objects to draw, Detections and TrackedObjects are accepted.\n        This objects are assumed to contain 2 bi-dimensional points defining\n        the bounding box as `[[x0, y0], [x1, y1]]`.\n    color : ColorLike, optional\n        This parameter can take:\n\n        1. A color as a tuple of ints describing the BGR `(0, 0, 255)`\n        2. A 6-digit hex string `\"#FF0000\"`\n        3. One of the defined color names `\"red\"`\n        4. A string defining the strategy to choose colors from the Palette:\n\n            1. based on the id of the objects `\"by_id\"`\n            2. based on the label of the objects `\"by_label\"`\n            3. random choice `\"random\"`\n\n        If using `by_id` or `by_label` strategy but your objects don't\n        have that field defined (Detections never have ids) the\n        selected color will be the same for all objects (Palette's default Color).\n    thickness : Optional[int], optional\n        Thickness or width of the line.\n    random_color : bool, optional\n        **Deprecated**. Set color=\"random\".\n    color_by_label : bool, optional\n        **Deprecated**. Set color=\"by_label\".\n    draw_labels : bool, optional\n        If set to True, the label is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    draw_scores : bool, optional\n        If set to True, the score is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    text_size : Optional[float], optional\n        Size of the title, the value is used as a multiplier of the base size of the font.\n        By default the size is scaled automatically based on the frame size.\n    draw_ids : bool, optional\n        If set to True, the id is added to a title that is drawn on top of the box.\n        If an object doesn't have an id this parameter is ignored.\n    text_color : Optional[ColorLike], optional\n        Color of the text. By default the same color as the box is used.\n    text_thickness : Optional[int], optional\n        Thickness of the font. By default it's scaled with the `text_size`.\n    draw_box : bool, optional\n        Set to False to hide the box and just draw the text.\n    detections : Sequence[Detection], optional\n        **Deprecated**. Use drawables.\n    line_color: Optional[ColorLike], optional\n        **Deprecated**. Use color.\n    line_width: Optional[int], optional\n        **Deprecated**. Use thickness.\n    label_size: Optional[int], optional\n        **Deprecated**. Use text_size.\n\n    Returns\n    -------\n    np.ndarray\n        The resulting frame.\n    \"\"\"\n    #\n    # handle deprecated parameters\n    #\n    if random_color is not None:\n        warn_once(\n            'Parameter \"random_color\" is deprecated, set `color=\"random\"` instead'\n        )\n        color = \"random\"\n    if color_by_label is not None:\n        warn_once(\n            'Parameter \"color_by_label\" is deprecated, set `color=\"by_label\"` instead'\n        )\n        color = \"by_label\"\n    if detections is not None:\n        warn_once('Parameter \"detections\" is deprecated, use \"drawables\" instead')\n        drawables = detections\n    if line_color is not None:\n        warn_once('Parameter \"line_color\" is deprecated, use \"color\" instead')\n        color = line_color\n    if line_width is not None:\n        warn_once('Parameter \"line_width\" is deprecated, use \"thickness\" instead')\n        thickness = line_width\n    if label_size is not None:\n        warn_once('Parameter \"label_size\" is deprecated, use \"text_size\" instead')\n        text_size = label_size\n    # end\n\n    if color is None:\n        color = \"by_id\"\n    if thickness is None:\n        thickness = int(max(frame.shape) / 500)\n\n    if drawables is None:\n        return frame\n\n    if text_color is not None:\n        text_color = parse_color(text_color)\n\n    for obj in drawables:\n        if not isinstance(obj, Drawable):\n            d = Drawable(obj)\n        else:\n            d = obj\n\n        if color == \"by_id\":\n            obj_color = Palette.choose_color(d.id)\n        elif color == \"by_label\":\n            obj_color = Palette.choose_color(d.label)\n        elif color == \"random\":\n            obj_color = Palette.choose_color(np.random.rand())\n        else:\n            obj_color = parse_color(color)\n\n        points = d.points.astype(int)\n        if draw_box:\n            Drawer.rectangle(\n                frame,\n                tuple(points),\n                color=obj_color,\n                thickness=thickness,\n            )\n\n        text = _build_text(\n            d, draw_labels=draw_labels, draw_ids=draw_ids, draw_scores=draw_scores\n        )\n        if text:\n            if text_color is None:\n                obj_text_color = obj_color\n            else:\n                obj_text_color = text_color\n            # the anchor will become the bottom-left of the text,\n            # we select-top left of the bbox compensating for the thickness of the box\n            text_anchor = (\n                points[0, 0] - thickness // 2,\n                points[0, 1] - thickness // 2 - 1,\n            )\n            frame = Drawer.text(\n                frame,\n                text,\n                position=text_anchor,\n                size=text_size,\n                color=obj_text_color,\n                thickness=text_thickness,\n            )\n\n    return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_boxes.draw_tracked_boxes","title":"draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None)","text":"

    Deprecated. Use draw_box

    Source code in norfair/drawing/draw_boxes.py
    def draw_tracked_boxes(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    border_colors: Optional[Tuple[int, int, int]] = None,\n    border_width: Optional[int] = None,\n    id_size: Optional[int] = None,\n    id_thickness: Optional[int] = None,\n    draw_box: bool = True,\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n    label_width: Optional[int] = None,\n) -> np.array:\n    \"**Deprecated**. Use [`draw_box`][norfair.drawing.draw_boxes.draw_boxes]\"\n    warn_once(\"draw_tracked_boxes is deprecated, use draw_box instead\")\n    return draw_boxes(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else border_colors,\n        thickness=border_width,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness or label_width,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_box=draw_box,\n    )\n
    "},{"location":"reference/drawing/#norfair.drawing.color","title":"color","text":""},{"location":"reference/drawing/#norfair.drawing.color.Color","title":"Color","text":"

    Contains predefined colors.

    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR This is the format opencv uses.

    Source code in norfair/drawing/color.py
    class Color:\n    \"\"\"\n    Contains predefined colors.\n\n    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR\n    This is the format opencv uses.\n    \"\"\"\n\n    # from PIL.ImageColors.colormap\n    aliceblue = hex_to_bgr(\"#f0f8ff\")\n    antiquewhite = hex_to_bgr(\"#faebd7\")\n    aqua = hex_to_bgr(\"#00ffff\")\n    aquamarine = hex_to_bgr(\"#7fffd4\")\n    azure = hex_to_bgr(\"#f0ffff\")\n    beige = hex_to_bgr(\"#f5f5dc\")\n    bisque = hex_to_bgr(\"#ffe4c4\")\n    black = hex_to_bgr(\"#000000\")\n    blanchedalmond = hex_to_bgr(\"#ffebcd\")\n    blue = hex_to_bgr(\"#0000ff\")\n    blueviolet = hex_to_bgr(\"#8a2be2\")\n    brown = hex_to_bgr(\"#a52a2a\")\n    burlywood = hex_to_bgr(\"#deb887\")\n    cadetblue = hex_to_bgr(\"#5f9ea0\")\n    chartreuse = hex_to_bgr(\"#7fff00\")\n    chocolate = hex_to_bgr(\"#d2691e\")\n    coral = hex_to_bgr(\"#ff7f50\")\n    cornflowerblue = hex_to_bgr(\"#6495ed\")\n    cornsilk = hex_to_bgr(\"#fff8dc\")\n    crimson = hex_to_bgr(\"#dc143c\")\n    cyan = hex_to_bgr(\"#00ffff\")\n    darkblue = hex_to_bgr(\"#00008b\")\n    darkcyan = hex_to_bgr(\"#008b8b\")\n    darkgoldenrod = hex_to_bgr(\"#b8860b\")\n    darkgray = hex_to_bgr(\"#a9a9a9\")\n    darkgrey = hex_to_bgr(\"#a9a9a9\")\n    darkgreen = hex_to_bgr(\"#006400\")\n    darkkhaki = hex_to_bgr(\"#bdb76b\")\n    darkmagenta = hex_to_bgr(\"#8b008b\")\n    darkolivegreen = hex_to_bgr(\"#556b2f\")\n    darkorange = hex_to_bgr(\"#ff8c00\")\n    darkorchid = hex_to_bgr(\"#9932cc\")\n    darkred = hex_to_bgr(\"#8b0000\")\n    darksalmon = hex_to_bgr(\"#e9967a\")\n    darkseagreen = hex_to_bgr(\"#8fbc8f\")\n    darkslateblue = hex_to_bgr(\"#483d8b\")\n    darkslategray = hex_to_bgr(\"#2f4f4f\")\n    darkslategrey = hex_to_bgr(\"#2f4f4f\")\n    darkturquoise = hex_to_bgr(\"#00ced1\")\n    darkviolet = hex_to_bgr(\"#9400d3\")\n    deeppink = hex_to_bgr(\"#ff1493\")\n    deepskyblue = hex_to_bgr(\"#00bfff\")\n    dimgray = hex_to_bgr(\"#696969\")\n    dimgrey = hex_to_bgr(\"#696969\")\n    dodgerblue = hex_to_bgr(\"#1e90ff\")\n    firebrick = hex_to_bgr(\"#b22222\")\n    floralwhite = hex_to_bgr(\"#fffaf0\")\n    forestgreen = hex_to_bgr(\"#228b22\")\n    fuchsia = hex_to_bgr(\"#ff00ff\")\n    gainsboro = hex_to_bgr(\"#dcdcdc\")\n    ghostwhite = hex_to_bgr(\"#f8f8ff\")\n    gold = hex_to_bgr(\"#ffd700\")\n    goldenrod = hex_to_bgr(\"#daa520\")\n    gray = hex_to_bgr(\"#808080\")\n    grey = hex_to_bgr(\"#808080\")\n    green = (0, 128, 0)\n    greenyellow = hex_to_bgr(\"#adff2f\")\n    honeydew = hex_to_bgr(\"#f0fff0\")\n    hotpink = hex_to_bgr(\"#ff69b4\")\n    indianred = hex_to_bgr(\"#cd5c5c\")\n    indigo = hex_to_bgr(\"#4b0082\")\n    ivory = hex_to_bgr(\"#fffff0\")\n    khaki = hex_to_bgr(\"#f0e68c\")\n    lavender = hex_to_bgr(\"#e6e6fa\")\n    lavenderblush = hex_to_bgr(\"#fff0f5\")\n    lawngreen = hex_to_bgr(\"#7cfc00\")\n    lemonchiffon = hex_to_bgr(\"#fffacd\")\n    lightblue = hex_to_bgr(\"#add8e6\")\n    lightcoral = hex_to_bgr(\"#f08080\")\n    lightcyan = hex_to_bgr(\"#e0ffff\")\n    lightgoldenrodyellow = hex_to_bgr(\"#fafad2\")\n    lightgreen = hex_to_bgr(\"#90ee90\")\n    lightgray = hex_to_bgr(\"#d3d3d3\")\n    lightgrey = hex_to_bgr(\"#d3d3d3\")\n    lightpink = hex_to_bgr(\"#ffb6c1\")\n    lightsalmon = hex_to_bgr(\"#ffa07a\")\n    lightseagreen = hex_to_bgr(\"#20b2aa\")\n    lightskyblue = hex_to_bgr(\"#87cefa\")\n    lightslategray = hex_to_bgr(\"#778899\")\n    lightslategrey = hex_to_bgr(\"#778899\")\n    lightsteelblue = hex_to_bgr(\"#b0c4de\")\n    lightyellow = hex_to_bgr(\"#ffffe0\")\n    lime = hex_to_bgr(\"#00ff00\")\n    limegreen = hex_to_bgr(\"#32cd32\")\n    linen = hex_to_bgr(\"#faf0e6\")\n    magenta = hex_to_bgr(\"#ff00ff\")\n    maroon = hex_to_bgr(\"#800000\")\n    mediumaquamarine = hex_to_bgr(\"#66cdaa\")\n    mediumblue = hex_to_bgr(\"#0000cd\")\n    mediumorchid = hex_to_bgr(\"#ba55d3\")\n    mediumpurple = hex_to_bgr(\"#9370db\")\n    mediumseagreen = hex_to_bgr(\"#3cb371\")\n    mediumslateblue = hex_to_bgr(\"#7b68ee\")\n    mediumspringgreen = hex_to_bgr(\"#00fa9a\")\n    mediumturquoise = hex_to_bgr(\"#48d1cc\")\n    mediumvioletred = hex_to_bgr(\"#c71585\")\n    midnightblue = hex_to_bgr(\"#191970\")\n    mintcream = hex_to_bgr(\"#f5fffa\")\n    mistyrose = hex_to_bgr(\"#ffe4e1\")\n    moccasin = hex_to_bgr(\"#ffe4b5\")\n    navajowhite = hex_to_bgr(\"#ffdead\")\n    navy = hex_to_bgr(\"#000080\")\n    oldlace = hex_to_bgr(\"#fdf5e6\")\n    olive = hex_to_bgr(\"#808000\")\n    olivedrab = hex_to_bgr(\"#6b8e23\")\n    orange = hex_to_bgr(\"#ffa500\")\n    orangered = hex_to_bgr(\"#ff4500\")\n    orchid = hex_to_bgr(\"#da70d6\")\n    palegoldenrod = hex_to_bgr(\"#eee8aa\")\n    palegreen = hex_to_bgr(\"#98fb98\")\n    paleturquoise = hex_to_bgr(\"#afeeee\")\n    palevioletred = hex_to_bgr(\"#db7093\")\n    papayawhip = hex_to_bgr(\"#ffefd5\")\n    peachpuff = hex_to_bgr(\"#ffdab9\")\n    peru = hex_to_bgr(\"#cd853f\")\n    pink = hex_to_bgr(\"#ffc0cb\")\n    plum = hex_to_bgr(\"#dda0dd\")\n    powderblue = hex_to_bgr(\"#b0e0e6\")\n    purple = hex_to_bgr(\"#800080\")\n    rebeccapurple = hex_to_bgr(\"#663399\")\n    red = hex_to_bgr(\"#ff0000\")\n    rosybrown = hex_to_bgr(\"#bc8f8f\")\n    royalblue = hex_to_bgr(\"#4169e1\")\n    saddlebrown = hex_to_bgr(\"#8b4513\")\n    salmon = hex_to_bgr(\"#fa8072\")\n    sandybrown = hex_to_bgr(\"#f4a460\")\n    seagreen = hex_to_bgr(\"#2e8b57\")\n    seashell = hex_to_bgr(\"#fff5ee\")\n    sienna = hex_to_bgr(\"#a0522d\")\n    silver = hex_to_bgr(\"#c0c0c0\")\n    skyblue = hex_to_bgr(\"#87ceeb\")\n    slateblue = hex_to_bgr(\"#6a5acd\")\n    slategray = hex_to_bgr(\"#708090\")\n    slategrey = hex_to_bgr(\"#708090\")\n    snow = hex_to_bgr(\"#fffafa\")\n    springgreen = hex_to_bgr(\"#00ff7f\")\n    steelblue = hex_to_bgr(\"#4682b4\")\n    tan = hex_to_bgr(\"#d2b48c\")\n    teal = hex_to_bgr(\"#008080\")\n    thistle = hex_to_bgr(\"#d8bfd8\")\n    tomato = hex_to_bgr(\"#ff6347\")\n    turquoise = hex_to_bgr(\"#40e0d0\")\n    violet = hex_to_bgr(\"#ee82ee\")\n    wheat = hex_to_bgr(\"#f5deb3\")\n    white = hex_to_bgr(\"#ffffff\")\n    whitesmoke = hex_to_bgr(\"#f5f5f5\")\n    yellow = hex_to_bgr(\"#ffff00\")\n    yellowgreen = hex_to_bgr(\"#9acd32\")\n\n    # seaborn tab20 colors\n    tab1 = hex_to_bgr(\"#1f77b4\")\n    tab2 = hex_to_bgr(\"#aec7e8\")\n    tab3 = hex_to_bgr(\"#ff7f0e\")\n    tab4 = hex_to_bgr(\"#ffbb78\")\n    tab5 = hex_to_bgr(\"#2ca02c\")\n    tab6 = hex_to_bgr(\"#98df8a\")\n    tab7 = hex_to_bgr(\"#d62728\")\n    tab8 = hex_to_bgr(\"#ff9896\")\n    tab9 = hex_to_bgr(\"#9467bd\")\n    tab10 = hex_to_bgr(\"#c5b0d5\")\n    tab11 = hex_to_bgr(\"#8c564b\")\n    tab12 = hex_to_bgr(\"#c49c94\")\n    tab13 = hex_to_bgr(\"#e377c2\")\n    tab14 = hex_to_bgr(\"#f7b6d2\")\n    tab15 = hex_to_bgr(\"#7f7f7f\")\n    tab16 = hex_to_bgr(\"#c7c7c7\")\n    tab17 = hex_to_bgr(\"#bcbd22\")\n    tab18 = hex_to_bgr(\"#dbdb8d\")\n    tab19 = hex_to_bgr(\"#17becf\")\n    tab20 = hex_to_bgr(\"#9edae5\")\n    # seaborn colorblind\n    cb1 = hex_to_bgr(\"#0173b2\")\n    cb2 = hex_to_bgr(\"#de8f05\")\n    cb3 = hex_to_bgr(\"#029e73\")\n    cb4 = hex_to_bgr(\"#d55e00\")\n    cb5 = hex_to_bgr(\"#cc78bc\")\n    cb6 = hex_to_bgr(\"#ca9161\")\n    cb7 = hex_to_bgr(\"#fbafe4\")\n    cb8 = hex_to_bgr(\"#949494\")\n    cb9 = hex_to_bgr(\"#ece133\")\n    cb10 = hex_to_bgr(\"#56b4e9\")\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette","title":"Palette","text":"

    Class to control the color pallete for drawing.

    Examples:

    Change palette:

    >>> from norfair import Palette\n>>> Palette.set(\"colorblind\")\n>>> # or a custom palette\n>>> from norfair import Color\n>>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n
    Source code in norfair/drawing/color.py
    class Palette:\n    \"\"\"\n    Class to control the color pallete for drawing.\n\n    Examples\n    --------\n    Change palette:\n    >>> from norfair import Palette\n    >>> Palette.set(\"colorblind\")\n    >>> # or a custom palette\n    >>> from norfair import Color\n    >>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n    \"\"\"\n\n    _colors = PALETTES[\"tab10\"]\n    _default_color = Color.black\n\n    @classmethod\n    def set(cls, palette: Union[str, Iterable[ColorLike]]):\n        \"\"\"\n        Selects a color palette.\n\n        Parameters\n        ----------\n        palette : Union[str, Iterable[ColorLike]]\n            can be either\n            - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n            - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n        \"\"\"\n        if isinstance(palette, str):\n            try:\n                cls._colors = PALETTES[palette]\n            except KeyError as e:\n                raise ValueError(\n                    f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n                ) from e\n        else:\n            colors = []\n            for c in palette:\n                colors.append(parse_color(c))\n\n            cls._colors = colors\n\n    @classmethod\n    def set_default_color(cls, color: ColorLike):\n        \"\"\"\n        Selects the default color of `choose_color` when hashable is None.\n\n        Parameters\n        ----------\n        color : ColorLike\n            The new default color.\n        \"\"\"\n        cls._default_color = parse_color(color)\n\n    @classmethod\n    def choose_color(cls, hashable: Hashable) -> ColorType:\n        if hashable is None:\n            return cls._default_color\n        return cls._colors[abs(hash(hashable)) % len(cls._colors)]\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette.set","title":"set(palette) classmethod","text":"

    Selects a color palette.

    Parameters:

    Name Type Description Default palette Union[str, Iterable[ColorLike]]

    can be either - the name of one of the predefined palettes tab10, tab20, or colorblind - a list of ColorLike objects that can be parsed by parse_color

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set(cls, palette: Union[str, Iterable[ColorLike]]):\n    \"\"\"\n    Selects a color palette.\n\n    Parameters\n    ----------\n    palette : Union[str, Iterable[ColorLike]]\n        can be either\n        - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n        - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n    \"\"\"\n    if isinstance(palette, str):\n        try:\n            cls._colors = PALETTES[palette]\n        except KeyError as e:\n            raise ValueError(\n                f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n            ) from e\n    else:\n        colors = []\n        for c in palette:\n            colors.append(parse_color(c))\n\n        cls._colors = colors\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette.set_default_color","title":"set_default_color(color) classmethod","text":"

    Selects the default color of choose_color when hashable is None.

    Parameters:

    Name Type Description Default color ColorLike

    The new default color.

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set_default_color(cls, color: ColorLike):\n    \"\"\"\n    Selects the default color of `choose_color` when hashable is None.\n\n    Parameters\n    ----------\n    color : ColorLike\n        The new default color.\n    \"\"\"\n    cls._default_color = parse_color(color)\n
    "},{"location":"reference/drawing/#norfair.drawing.color.hex_to_bgr","title":"hex_to_bgr(hex_value)","text":"

    Converts conventional 6 digits hex colors to BGR tuples

    Parameters:

    Name Type Description Default hex_value str

    hex value with leading # for instance \"#ff0000\"

    required

    Returns:

    Type Description Tuple[int, int, int]

    BGR values

    Raises:

    Type Description ValueError

    if the string is invalid

    Source code in norfair/drawing/color.py
    def hex_to_bgr(hex_value: str) -> ColorType:\n    \"\"\"Converts conventional 6 digits hex colors to BGR tuples\n\n    Parameters\n    ----------\n    hex_value : str\n        hex value with leading `#` for instance `\"#ff0000\"`\n\n    Returns\n    -------\n    Tuple[int, int, int]\n        BGR values\n\n    Raises\n    ------\n    ValueError\n        if the string is invalid\n    \"\"\"\n    if re.match(\"#[a-f0-9]{6}$\", hex_value):\n        return (\n            int(hex_value[5:7], 16),\n            int(hex_value[3:5], 16),\n            int(hex_value[1:3], 16),\n        )\n\n    if re.match(\"#[a-f0-9]{3}$\", hex_value):\n        return (\n            int(hex_value[3] * 2, 16),\n            int(hex_value[2] * 2, 16),\n            int(hex_value[1] * 2, 16),\n        )\n    raise ValueError(f\"'{hex_value}' is not a valid color\")\n
    "},{"location":"reference/drawing/#norfair.drawing.color.parse_color","title":"parse_color(color_like)","text":"

    Makes best effort to parse the given value to a Color

    Parameters:

    Name Type Description Default color_like ColorLike

    Can be one of:

    1. a string with the 6 digits hex value (\"#ff0000\")
    2. a string with one of the names defined in Colors (\"red\")
    3. a BGR tuple ((0, 0, 255))
    required

    Returns:

    Type Description Color

    The BGR tuple.

    Source code in norfair/drawing/color.py
    def parse_color(color_like: ColorLike) -> ColorType:\n    \"\"\"Makes best effort to parse the given value to a Color\n\n    Parameters\n    ----------\n    color_like : ColorLike\n        Can be one of:\n\n        1. a string with the 6 digits hex value (`\"#ff0000\"`)\n        2. a string with one of the names defined in Colors (`\"red\"`)\n        3. a BGR tuple (`(0, 0, 255)`)\n\n    Returns\n    -------\n    Color\n        The BGR tuple.\n    \"\"\"\n    if isinstance(color_like, str):\n        if color_like.startswith(\"#\"):\n            return hex_to_bgr(color_like)\n        else:\n            return getattr(Color, color_like)\n    # TODO: validate?\n    return tuple([int(v) for v in color_like])\n
    "},{"location":"reference/drawing/#norfair.drawing.path","title":"path","text":""},{"location":"reference/drawing/#norfair.drawing.path.Paths","title":"Paths","text":"

    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None attenuation float

    A float number in [0, 1] that dictates the speed at which the path is erased. if it is 0 then the path is never erased.

    0.01

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class Paths:\n    \"\"\"\n    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    attenuation : float, optional\n        A float number in [0, 1] that dictates the speed at which the path is erased.\n        if it is `0` then the path is never erased.\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        attenuation: float = 0.01,\n    ):\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.mask = None\n        self.attenuation_factor = 1 - attenuation\n\n    def draw(\n        self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n    ) -> np.array:\n        \"\"\"\n        Draw the paths of the points interest on a frame.\n\n        !!! warning\n            This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to draw on.\n        tracked_objects : Sequence[TrackedObject]\n            List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n        Returns\n        -------\n        np.array\n            The resulting frame.\n        \"\"\"\n        if self.mask is None:\n            frame_scale = frame.shape[0] / 100\n\n            if self.radius is None:\n                self.radius = int(max(frame_scale * 0.7, 1))\n            if self.thickness is None:\n                self.thickness = int(max(frame_scale / 7, 1))\n\n            self.mask = np.zeros(frame.shape, np.uint8)\n\n        self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n        for obj in tracked_objects:\n            if obj.abs_to_rel is not None:\n                warn_once(\n                    \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n                )\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.estimate)\n\n            for point in points_to_draw:\n                self.mask = Drawer.circle(\n                    self.mask,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n        return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/drawing/#norfair.drawing.path.Paths.draw","title":"draw(frame, tracked_objects)","text":"

    Draw the paths of the points interest on a frame.

    Warning

    This method does not draw frames in place as other drawers do, the resulting frame is returned.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required tracked_objects Sequence[TrackedObject]

    List of TrackedObject to get the points of interest in order to update the paths.

    required

    Returns:

    Type Description array

    The resulting frame.

    Source code in norfair/drawing/path.py
    def draw(\n    self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n) -> np.array:\n    \"\"\"\n    Draw the paths of the points interest on a frame.\n\n    !!! warning\n        This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    tracked_objects : Sequence[TrackedObject]\n        List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n    Returns\n    -------\n    np.array\n        The resulting frame.\n    \"\"\"\n    if self.mask is None:\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n\n        self.mask = np.zeros(frame.shape, np.uint8)\n\n    self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n    for obj in tracked_objects:\n        if obj.abs_to_rel is not None:\n            warn_once(\n                \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n            )\n\n        if self.color is None:\n            color = Palette.choose_color(obj.id)\n        else:\n            color = self.color\n\n        points_to_draw = self.get_points_to_draw(obj.estimate)\n\n        for point in points_to_draw:\n            self.mask = Drawer.circle(\n                self.mask,\n                position=tuple(point.astype(int)),\n                radius=self.radius,\n                color=color,\n                thickness=self.thickness,\n            )\n\n    return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/drawing/#norfair.drawing.path.AbsolutePaths","title":"AbsolutePaths","text":"

    Class that draws the absolute paths taken by a set of points.

    Works just like Paths but supports camera motion.

    Warning

    This drawer is not optimized so it can be stremely slow. Performance degrades linearly with max_history * number_of_tracked_objects.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None max_history int

    Number of past points to include in the path. High values make the drawing slower

    20

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class AbsolutePaths:\n    \"\"\"\n    Class that draws the absolute paths taken by a set of points.\n\n    Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion.\n\n    !!! warning\n        This drawer is not optimized so it can be stremely slow. Performance degrades linearly with\n        `max_history * number_of_tracked_objects`.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    max_history : int, optional\n        Number of past points to include in the path. High values make the drawing slower\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        max_history=20,\n    ):\n\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.past_points = defaultdict(lambda: [])\n        self.max_history = max_history\n        self.alphas = np.linspace(0.99, 0.01, max_history)\n\n    def draw(self, frame, tracked_objects, coord_transform=None):\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n        for obj in tracked_objects:\n            if not obj.live_points.any():\n                continue\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True))\n\n            for point in coord_transform.abs_to_rel(points_to_draw):\n                Drawer.circle(\n                    frame,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n            last = points_to_draw\n            for i, past_points in enumerate(self.past_points[obj.id]):\n                overlay = frame.copy()\n                last = coord_transform.abs_to_rel(last)\n                for j, point in enumerate(coord_transform.abs_to_rel(past_points)):\n                    Drawer.line(\n                        overlay,\n                        tuple(last[j].astype(int)),\n                        tuple(point.astype(int)),\n                        color=color,\n                        thickness=self.thickness,\n                    )\n                last = past_points\n\n                alpha = self.alphas[i]\n                frame = Drawer.alpha_blend(overlay, frame, alpha=alpha)\n            self.past_points[obj.id].insert(0, points_to_draw)\n            self.past_points[obj.id] = self.past_points[obj.id][: self.max_history]\n        return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.fixed_camera","title":"fixed_camera","text":""},{"location":"reference/drawing/#norfair.drawing.fixed_camera.FixedCamera","title":"FixedCamera","text":"

    Class used to stabilize video based on the camera motion.

    Starts with a larger frame, where the original frame is drawn on top of a black background. As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

    Useful for debugging or demoing the camera motion.

    Warning

    This only works with TranslationTransformation, using HomographyTransformation will result in unexpected behaviour.

    Warning

    If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

    Note

    Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. In this case, a warning will be logged and the frames will be cropped to avoid errors.

    Parameters:

    Name Type Description Default scale float

    The resulting video will have a resolution of scale * (H, W) where HxW is the resolution of the original video. Use a bigger scale if the camera is moving too much.

    2 attenuation float

    Controls how fast the older frames fade to black.

    0.05

    Examples:

    >>> # setup\n>>> tracker = Tracker(\"frobenious\", 100)\n>>> motion_estimator = MotionEstimator()\n>>> video = Video(input_path=\"video.mp4\")\n>>> fixed_camera = FixedCamera()\n>>> # process video\n>>> for frame in video:\n>>>     coord_transformations = motion_estimator.update(frame)\n>>>     detections = get_detections(frame)\n>>>     tracked_objects = tracker.update(detections, coord_transformations)\n>>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n>>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n>>>     video.write(bigger_frame)\n
    Source code in norfair/drawing/fixed_camera.py
    class FixedCamera:\n    \"\"\"\n    Class used to stabilize video based on the camera motion.\n\n    Starts with a larger frame, where the original frame is drawn on top of a black background.\n    As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.\n\n    Useful for debugging or demoing the camera motion.\n    ![Example GIF](../../videos/camera_stabilization.gif)\n\n    !!! Warning\n        This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],\n        using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in\n        unexpected behaviour.\n\n    !!! Warning\n        If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.\n\n    !!! Note\n        Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.\n        In this case, a warning will be logged and the frames will be cropped to avoid errors.\n\n    Parameters\n    ----------\n    scale : float, optional\n        The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.\n        Use a bigger scale if the camera is moving too much.\n    attenuation : float, optional\n        Controls how fast the older frames fade to black.\n\n    Examples\n    --------\n    >>> # setup\n    >>> tracker = Tracker(\"frobenious\", 100)\n    >>> motion_estimator = MotionEstimator()\n    >>> video = Video(input_path=\"video.mp4\")\n    >>> fixed_camera = FixedCamera()\n    >>> # process video\n    >>> for frame in video:\n    >>>     coord_transformations = motion_estimator.update(frame)\n    >>>     detections = get_detections(frame)\n    >>>     tracked_objects = tracker.update(detections, coord_transformations)\n    >>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n    >>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n    >>>     video.write(bigger_frame)\n    \"\"\"\n\n    def __init__(self, scale: float = 2, attenuation: float = 0.05):\n        self.scale = scale\n        self._background = None\n        self._attenuation_factor = 1 - attenuation\n\n    def adjust_frame(\n        self, frame: np.ndarray, coord_transformation: TranslationTransformation\n    ) -> np.ndarray:\n        \"\"\"\n        Render scaled up frame.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame.\n        coord_transformation : TranslationTransformation\n            The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n        Returns\n        -------\n        np.ndarray\n            The new bigger frame with the original frame drawn on it.\n        \"\"\"\n\n        # initialize background if necessary\n        if self._background is None:\n            original_size = (\n                frame.shape[1],\n                frame.shape[0],\n            )  # OpenCV format is (width, height)\n\n            scaled_size = tuple(\n                (np.array(original_size) * np.array(self.scale)).round().astype(int)\n            )\n            self._background = np.zeros(\n                [scaled_size[1], scaled_size[0], frame.shape[-1]],\n                frame.dtype,\n            )\n        else:\n            self._background = (self._background * self._attenuation_factor).astype(\n                frame.dtype\n            )\n\n        # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n        # aim to draw it in the center of the background but transformations will move this point\n        top_left = (\n            np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n        )\n        top_left = (\n            coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n        )\n        # box of the background that will be updated and the limits of it\n        background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n        background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n        background_size_y, background_size_x = self._background.shape[:2]\n\n        # define box of the frame that will be used\n        # if the scale is not enough to support the movement, warn the user but keep drawing\n        # cropping the frame so that the operation doesn't fail\n        frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n        if (\n            background_y0 < 0\n            or background_x0 < 0\n            or background_y1 > background_size_y\n            or background_x1 > background_size_x\n        ):\n            warn_once(\n                \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n            )\n            # crop left or top of the frame if necessary\n            frame_y0 = max(-background_y0, 0)\n            frame_x0 = max(-background_x0, 0)\n            # crop right or bottom of the frame if necessary\n            frame_y1 = max(\n                min(background_size_y - background_y0, background_y1 - background_y0), 0\n            )\n            frame_x1 = max(\n                min(background_size_x - background_x0, background_x1 - background_x0), 0\n            )\n            # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n            background_y0 = max(background_y0, 0)\n            background_x0 = max(background_x0, 0)\n            background_y1 = max(background_y1, 0)\n            background_x1 = max(background_x1, 0)\n        self._background[\n            background_y0:background_y1, background_x0:background_x1, :\n        ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n        return self._background\n
    "},{"location":"reference/drawing/#norfair.drawing.fixed_camera.FixedCamera.adjust_frame","title":"adjust_frame(frame, coord_transformation)","text":"

    Render scaled up frame.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame.

    required coord_transformation TranslationTransformation

    The coordinate transformation as returned by the MotionEstimator

    required

    Returns:

    Type Description ndarray

    The new bigger frame with the original frame drawn on it.

    Source code in norfair/drawing/fixed_camera.py
    def adjust_frame(\n    self, frame: np.ndarray, coord_transformation: TranslationTransformation\n) -> np.ndarray:\n    \"\"\"\n    Render scaled up frame.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame.\n    coord_transformation : TranslationTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n    Returns\n    -------\n    np.ndarray\n        The new bigger frame with the original frame drawn on it.\n    \"\"\"\n\n    # initialize background if necessary\n    if self._background is None:\n        original_size = (\n            frame.shape[1],\n            frame.shape[0],\n        )  # OpenCV format is (width, height)\n\n        scaled_size = tuple(\n            (np.array(original_size) * np.array(self.scale)).round().astype(int)\n        )\n        self._background = np.zeros(\n            [scaled_size[1], scaled_size[0], frame.shape[-1]],\n            frame.dtype,\n        )\n    else:\n        self._background = (self._background * self._attenuation_factor).astype(\n            frame.dtype\n        )\n\n    # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n    # aim to draw it in the center of the background but transformations will move this point\n    top_left = (\n        np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n    )\n    top_left = (\n        coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n    )\n    # box of the background that will be updated and the limits of it\n    background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n    background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n    background_size_y, background_size_x = self._background.shape[:2]\n\n    # define box of the frame that will be used\n    # if the scale is not enough to support the movement, warn the user but keep drawing\n    # cropping the frame so that the operation doesn't fail\n    frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n    if (\n        background_y0 < 0\n        or background_x0 < 0\n        or background_y1 > background_size_y\n        or background_x1 > background_size_x\n    ):\n        warn_once(\n            \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n        )\n        # crop left or top of the frame if necessary\n        frame_y0 = max(-background_y0, 0)\n        frame_x0 = max(-background_x0, 0)\n        # crop right or bottom of the frame if necessary\n        frame_y1 = max(\n            min(background_size_y - background_y0, background_y1 - background_y0), 0\n        )\n        frame_x1 = max(\n            min(background_size_x - background_x0, background_x1 - background_x0), 0\n        )\n        # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n        background_y0 = max(background_y0, 0)\n        background_x0 = max(background_x0, 0)\n        background_y1 = max(background_y1, 0)\n        background_x1 = max(background_x1, 0)\n    self._background[\n        background_y0:background_y1, background_x0:background_x1, :\n    ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n    return self._background\n
    "},{"location":"reference/drawing/#norfair.drawing.absolute_grid","title":"absolute_grid","text":""},{"location":"reference/drawing/#norfair.drawing.absolute_grid.draw_absolute_grid","title":"draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False)","text":"

    Draw a grid of points in absolute coordinates.

    Useful for debugging camera motion.

    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection of latitude and longitude lines over the surface of the sphere.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required coord_transformations CoordinatesTransformation

    The coordinate transformation as returned by the MotionEstimator

    required grid_size int

    How many points to draw.

    20 radius int

    Size of each point.

    2 thickness int

    Thickness of each point

    1 color ColorType

    Color of the points.

    black polar Bool

    If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). By default, False is used which means the points are drawn as if the camera were pointing to the Equator.

    False Source code in norfair/drawing/absolute_grid.py
    def draw_absolute_grid(\n    frame: np.ndarray,\n    coord_transformations: CoordinatesTransformation,\n    grid_size: int = 20,\n    radius: int = 2,\n    thickness: int = 1,\n    color: ColorType = Color.black,\n    polar: bool = False,\n):\n    \"\"\"\n    Draw a grid of points in absolute coordinates.\n\n    Useful for debugging camera motion.\n\n    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection\n    of latitude and longitude lines over the surface of the sphere.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    coord_transformations : CoordinatesTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n    grid_size : int, optional\n        How many points to draw.\n    radius : int, optional\n        Size of each point.\n    thickness : int, optional\n        Thickness of each point\n    color : ColorType, optional\n        Color of the points.\n    polar : Bool, optional\n        If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth).\n        By default, False is used which means the points are drawn as if the camera were pointing to the Equator.\n    \"\"\"\n    h, w, _ = frame.shape\n\n    # get absolute points grid\n    points = _get_grid(grid_size, w, h, polar=polar)\n\n    # transform the points to relative coordinates\n    if coord_transformations is None:\n        points_transformed = points\n    else:\n        points_transformed = coord_transformations.abs_to_rel(points)\n\n    # filter points that are not visible\n    visible_points = points_transformed[\n        (points_transformed <= np.array([w, h])).all(axis=1)\n        & (points_transformed >= 0).all(axis=1)\n    ]\n    for point in visible_points:\n        Drawer.cross(\n            frame, point.astype(int), radius=radius, thickness=thickness, color=color\n        )\n
    "},{"location":"reference/filter/","title":"Filter","text":""},{"location":"reference/filter/#norfair.filter.FilterPyKalmanFilterFactory","title":"FilterPyKalmanFilterFactory","text":"

    Bases: FilterFactory

    This class can be used either to change some parameters of the KalmanFilter that the tracker uses, or to fully customize the predictive filter implementation to use (as long as the methods and properties are compatible).

    The former case only requires changing the default parameters upon tracker creation: tracker = Tracker(..., filter_factory=FilterPyKalmanFilterFactory(R=100)), while the latter requires creating your own class extending FilterPyKalmanFilterFactory, and rewriting its create_filter method to return your own customized filter.

    Parameters:

    Name Type Description Default R float

    Multiplier for the sensor measurement noise matrix, by default 4.0

    4.0 Q float

    Multiplier for the process uncertainty, by default 0.1

    0.1 P float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables, by default 10.0

    10.0 See Also

    filterpy.KalmanFilter.

    Source code in norfair/filter.py
    class FilterPyKalmanFilterFactory(FilterFactory):\n    \"\"\"\n    This class can be used either to change some parameters of the [KalmanFilter](https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html)\n    that the tracker uses, or to fully customize the predictive filter implementation to use (as long as the methods and properties are compatible).\n\n    The former case only requires changing the default parameters upon tracker creation: `tracker = Tracker(..., filter_factory=FilterPyKalmanFilterFactory(R=100))`,\n    while the latter requires creating your own class extending `FilterPyKalmanFilterFactory`, and rewriting its `create_filter` method to return your own customized filter.\n\n    Parameters\n    ----------\n    R : float, optional\n        Multiplier for the sensor measurement noise matrix, by default 4.0\n    Q : float, optional\n        Multiplier for the process uncertainty, by default 0.1\n    P : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables, by default 10.0\n\n    See Also\n    --------\n    [`filterpy.KalmanFilter`](https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html).\n    \"\"\"\n\n    def __init__(self, R: float = 4.0, Q: float = 0.1, P: float = 10.0):\n        self.R = R\n        self.Q = Q\n        self.P = P\n\n    def create_filter(self, initial_detection: np.ndarray) -> KalmanFilter:\n        \"\"\"\n        This method returns a new predictive filter instance with the current setup, to be used by each new [`TrackedObject`][norfair.tracker.TrackedObject] that is created.\n        This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.\n\n        Parameters\n        ----------\n        initial_detection : np.ndarray\n            numpy array of shape `(number of points per object, 2)`, corresponding to the [`Detection.points`][norfair.tracker.Detection] of the tracked object being born,\n            which shall be used as initial position estimation for it.\n\n        Returns\n        -------\n        KalmanFilter\n            The kalman filter\n        \"\"\"\n        num_points = initial_detection.shape[0]\n        dim_points = initial_detection.shape[1]\n        dim_z = dim_points * num_points\n        dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n        filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)\n\n        # State transition matrix (models physics): numpy.array()\n        filter.F = np.eye(dim_x)\n        dt = 1  # At each step we update pos with v * dt\n\n        filter.F[:dim_z, dim_z:] = dt * np.eye(dim_z)\n\n        # Measurement function: numpy.array(dim_z, dim_x)\n        filter.H = np.eye(\n            dim_z,\n            dim_x,\n        )\n\n        # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)\n        filter.R *= self.R\n\n        # Process uncertainty: numpy.array(dim_x, dim_x)\n        # Don't decrease it too much or trackers pay too little attention to detections\n        filter.Q[dim_z:, dim_z:] *= self.Q\n\n        # Initial state: numpy.array(dim_x, 1)\n        filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n        filter.x[dim_z:] = 0\n\n        # Estimation uncertainty: numpy.array(dim_x, dim_x)\n        filter.P[dim_z:, dim_z:] *= self.P\n\n        return filter\n
    "},{"location":"reference/filter/#norfair.filter.FilterPyKalmanFilterFactory.create_filter","title":"create_filter(initial_detection)","text":"

    This method returns a new predictive filter instance with the current setup, to be used by each new TrackedObject that is created. This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.

    Parameters:

    Name Type Description Default initial_detection ndarray

    numpy array of shape (number of points per object, 2), corresponding to the Detection.points of the tracked object being born, which shall be used as initial position estimation for it.

    required

    Returns:

    Type Description KalmanFilter

    The kalman filter

    Source code in norfair/filter.py
    def create_filter(self, initial_detection: np.ndarray) -> KalmanFilter:\n    \"\"\"\n    This method returns a new predictive filter instance with the current setup, to be used by each new [`TrackedObject`][norfair.tracker.TrackedObject] that is created.\n    This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.\n\n    Parameters\n    ----------\n    initial_detection : np.ndarray\n        numpy array of shape `(number of points per object, 2)`, corresponding to the [`Detection.points`][norfair.tracker.Detection] of the tracked object being born,\n        which shall be used as initial position estimation for it.\n\n    Returns\n    -------\n    KalmanFilter\n        The kalman filter\n    \"\"\"\n    num_points = initial_detection.shape[0]\n    dim_points = initial_detection.shape[1]\n    dim_z = dim_points * num_points\n    dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n    filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)\n\n    # State transition matrix (models physics): numpy.array()\n    filter.F = np.eye(dim_x)\n    dt = 1  # At each step we update pos with v * dt\n\n    filter.F[:dim_z, dim_z:] = dt * np.eye(dim_z)\n\n    # Measurement function: numpy.array(dim_z, dim_x)\n    filter.H = np.eye(\n        dim_z,\n        dim_x,\n    )\n\n    # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)\n    filter.R *= self.R\n\n    # Process uncertainty: numpy.array(dim_x, dim_x)\n    # Don't decrease it too much or trackers pay too little attention to detections\n    filter.Q[dim_z:, dim_z:] *= self.Q\n\n    # Initial state: numpy.array(dim_x, 1)\n    filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n    filter.x[dim_z:] = 0\n\n    # Estimation uncertainty: numpy.array(dim_x, dim_x)\n    filter.P[dim_z:, dim_z:] *= self.P\n\n    return filter\n
    "},{"location":"reference/filter/#norfair.filter.OptimizedKalmanFilterFactory","title":"OptimizedKalmanFilterFactory","text":"

    Bases: FilterFactory

    Creates faster Filters than FilterPyKalmanFilterFactory.

    It allows the user to create Kalman Filter optimized for tracking and set its parameters.

    Parameters:

    Name Type Description Default R float

    Multiplier for the sensor measurement noise matrix.

    4.0 Q float

    Multiplier for the process uncertainty.

    0.1 pos_variance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables.

    10 pos_vel_covariance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to the covariance between position and speed.

    0 vel_variance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to velocity (not position) variables.

    1 Source code in norfair/filter.py
    class OptimizedKalmanFilterFactory(FilterFactory):\n    \"\"\"\n    Creates faster Filters than [`FilterPyKalmanFilterFactory`][norfair.filter.FilterPyKalmanFilterFactory].\n\n    It allows the user to create Kalman Filter optimized for tracking and set its parameters.\n\n    Parameters\n    ----------\n    R : float, optional\n        Multiplier for the sensor measurement noise matrix.\n    Q : float, optional\n        Multiplier for the process uncertainty.\n    pos_variance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables.\n    pos_vel_covariance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to the covariance between position and speed.\n    vel_variance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to velocity (not position) variables.\n    \"\"\"\n\n    def __init__(\n        self,\n        R: float = 4.0,\n        Q: float = 0.1,\n        pos_variance: float = 10,\n        pos_vel_covariance: float = 0,\n        vel_variance: float = 1,\n    ):\n        self.R = R\n        self.Q = Q\n\n        # entrances P matrix of KF\n        self.pos_variance = pos_variance\n        self.pos_vel_covariance = pos_vel_covariance\n        self.vel_variance = vel_variance\n\n    def create_filter(self, initial_detection: np.ndarray):\n        num_points = initial_detection.shape[0]\n        dim_points = initial_detection.shape[1]\n        dim_z = dim_points * num_points  # flattened positions\n        dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n        custom_filter = OptimizedKalmanFilter(\n            dim_x,\n            dim_z,\n            pos_variance=self.pos_variance,\n            pos_vel_covariance=self.pos_vel_covariance,\n            vel_variance=self.vel_variance,\n            q=self.Q,\n            r=self.R,\n        )\n        custom_filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n\n        return custom_filter\n
    "},{"location":"reference/metrics/","title":"Metrics","text":""},{"location":"reference/metrics/#norfair.metrics.PredictionsTextFile","title":"PredictionsTextFile","text":"

    Generates a text file with your predicted tracked objects, in the MOTChallenge format. It needs the 'input_path', which is the path to the sequence being processed, the 'save_path', and optionally the 'information_file' (in case you don't give an 'information_file', is assumed there is one in the input_path folder).

    Source code in norfair/metrics.py
    class PredictionsTextFile:\n    \"\"\"Generates a text file with your predicted tracked objects, in the MOTChallenge format.\n    It needs the 'input_path', which is the path to the sequence being processed,\n    the 'save_path', and optionally the 'information_file' (in case you don't give an\n    'information_file', is assumed there is one in the input_path folder).\n    \"\"\"\n\n    def __init__(self, input_path, save_path=\".\", information_file=None):\n\n        file_name = os.path.split(input_path)[1]\n\n        if information_file is None:\n            seqinfo_path = os.path.join(input_path, \"seqinfo.ini\")\n            information_file = InformationFile(file_path=seqinfo_path)\n\n        self.length = information_file.search(variable_name=\"seqLength\")\n\n        predictions_folder = os.path.join(save_path, \"predictions\")\n        if not os.path.exists(predictions_folder):\n            os.makedirs(predictions_folder)\n\n        out_file_name = os.path.join(predictions_folder, file_name + \".txt\")\n        self.text_file = open(out_file_name, \"w+\")\n\n        self.frame_number = 1\n\n    def update(self, predictions, frame_number=None):\n        if frame_number is None:\n            frame_number = self.frame_number\n        \"\"\"\n        Write tracked object information in the output file (for this frame), in the format\n        frame_number, id, bb_left, bb_top, bb_width, bb_height, -1, -1, -1, -1\n        \"\"\"\n        for obj in predictions:\n            frame_str = str(int(frame_number))\n            id_str = str(int(obj.id))\n            bb_left_str = str((obj.estimate[0, 0]))\n            bb_top_str = str((obj.estimate[0, 1]))  # [0,1]\n            bb_width_str = str((obj.estimate[1, 0] - obj.estimate[0, 0]))\n            bb_height_str = str((obj.estimate[1, 1] - obj.estimate[0, 1]))\n            row_text_out = (\n                frame_str\n                + \",\"\n                + id_str\n                + \",\"\n                + bb_left_str\n                + \",\"\n                + bb_top_str\n                + \",\"\n                + bb_width_str\n                + \",\"\n                + bb_height_str\n                + \",-1,-1,-1,-1\"\n            )\n            self.text_file.write(row_text_out)\n            self.text_file.write(\"\\n\")\n\n        self.frame_number += 1\n\n        if self.frame_number > self.length:\n            self.text_file.close()\n
    "},{"location":"reference/metrics/#norfair.metrics.DetectionFileParser","title":"DetectionFileParser","text":"

    Get Norfair detections from MOTChallenge text files containing detections

    Source code in norfair/metrics.py
    class DetectionFileParser:\n    \"\"\"Get Norfair detections from MOTChallenge text files containing detections\"\"\"\n\n    def __init__(self, input_path, information_file=None):\n        self.frame_number = 1\n\n        # Get detecions matrix data with rows corresponding to:\n        # frame, id, bb_left, bb_top, bb_right, bb_down, conf, x, y, z\n        detections_path = os.path.join(input_path, \"det/det.txt\")\n\n        self.matrix_detections = np.loadtxt(detections_path, dtype=\"f\", delimiter=\",\")\n        row_order = np.argsort(self.matrix_detections[:, 0])\n        self.matrix_detections = self.matrix_detections[row_order]\n        # Coordinates refer to box corners\n        self.matrix_detections[:, 4] = (\n            self.matrix_detections[:, 2] + self.matrix_detections[:, 4]\n        )\n        self.matrix_detections[:, 5] = (\n            self.matrix_detections[:, 3] + self.matrix_detections[:, 5]\n        )\n\n        if information_file is None:\n            seqinfo_path = os.path.join(input_path, \"seqinfo.ini\")\n            information_file = InformationFile(file_path=seqinfo_path)\n        self.length = information_file.search(variable_name=\"seqLength\")\n\n        self.sorted_by_frame = []\n        for frame_number in range(1, self.length + 1):\n            self.sorted_by_frame.append(self.get_dets_from_frame(frame_number))\n\n    def get_dets_from_frame(self, frame_number):\n        \"\"\"this function returns a list of norfair Detections class, corresponding to frame=frame_number\"\"\"\n\n        indexes = np.argwhere(self.matrix_detections[:, 0] == frame_number)\n        detections = []\n        if len(indexes) > 0:\n            actual_det = self.matrix_detections[indexes]\n            actual_det.shape = [actual_det.shape[0], actual_det.shape[2]]\n            for det in actual_det:\n                points = np.array([[det[2], det[3]], [det[4], det[5]]])\n                conf = det[6]\n                new_detection = Detection(points, np.array([conf, conf]))\n                detections.append(new_detection)\n        self.actual_detections = detections\n        return detections\n\n    def __iter__(self):\n        self.frame_number = 1\n        return self\n\n    def __next__(self):\n        if self.frame_number <= self.length:\n            self.frame_number += 1\n            # Frame_number is always 1 unit bigger than the corresponding index in self.sorted_by_frame, and\n            # also we just incremented the frame_number, so now is 2 units bigger than the corresponding index\n            return self.sorted_by_frame[self.frame_number - 2]\n\n        raise StopIteration()\n
    "},{"location":"reference/metrics/#norfair.metrics.DetectionFileParser.get_dets_from_frame","title":"get_dets_from_frame(frame_number)","text":"

    this function returns a list of norfair Detections class, corresponding to frame=frame_number

    Source code in norfair/metrics.py
    def get_dets_from_frame(self, frame_number):\n    \"\"\"this function returns a list of norfair Detections class, corresponding to frame=frame_number\"\"\"\n\n    indexes = np.argwhere(self.matrix_detections[:, 0] == frame_number)\n    detections = []\n    if len(indexes) > 0:\n        actual_det = self.matrix_detections[indexes]\n        actual_det.shape = [actual_det.shape[0], actual_det.shape[2]]\n        for det in actual_det:\n            points = np.array([[det[2], det[3]], [det[4], det[5]]])\n            conf = det[6]\n            new_detection = Detection(points, np.array([conf, conf]))\n            detections.append(new_detection)\n    self.actual_detections = detections\n    return detections\n
    "},{"location":"reference/metrics/#norfair.metrics.load_motchallenge","title":"load_motchallenge(matrix_data, min_confidence=-1)","text":"

    Load MOT challenge data.

    This is a modification of the function load_motchallenge from the py-motmetrics library, defined in io.py In this version, the pandas dataframe is generated from a numpy array (matrix_data) instead of a text file.

    Params

    matrix_data : array of float that has [frame, id, X, Y, width, height, conf, cassId, visibility] in each row, for each prediction on a particular video

    min_confidence : float Rows with confidence less than this threshold are removed. Defaults to -1. You should set this to 1 when loading ground truth MOTChallenge data, so that invalid rectangles in the ground truth are not considered during matching.

    Returns:

    Name Type Description df DataFrame

    The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility' The dataframe is indexed by ('FrameId', 'Id')

    Source code in norfair/metrics.py
    def load_motchallenge(matrix_data, min_confidence=-1):\n    \"\"\"Load MOT challenge data.\n\n    This is a modification of the function load_motchallenge from the py-motmetrics library, defined in io.py\n    In this version, the pandas dataframe is generated from a numpy array (matrix_data) instead of a text file.\n\n    Params\n    ------\n    matrix_data : array  of float that has [frame, id, X, Y, width, height, conf, cassId, visibility] in each row, for each prediction on a particular video\n\n    min_confidence : float\n        Rows with confidence less than this threshold are removed.\n        Defaults to -1. You should set this to 1 when loading\n        ground truth MOTChallenge data, so that invalid rectangles in\n        the ground truth are not considered during matching.\n\n    Returns\n    ------\n    df : pandas.DataFrame\n        The returned dataframe has the following columns\n            'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'\n        The dataframe is indexed by ('FrameId', 'Id')\n    \"\"\"\n\n    df = pd.DataFrame(\n        data=matrix_data,\n        columns=[\n            \"FrameId\",\n            \"Id\",\n            \"X\",\n            \"Y\",\n            \"Width\",\n            \"Height\",\n            \"Confidence\",\n            \"ClassId\",\n            \"Visibility\",\n            \"unused\",\n        ],\n    )\n    df = df.set_index([\"FrameId\", \"Id\"])\n    # Account for matlab convention.\n    df[[\"X\", \"Y\"]] -= (1, 1)\n\n    # Removed trailing column\n    del df[\"unused\"]\n\n    # Remove all rows without sufficient confidence\n    return df[df[\"Confidence\"] >= min_confidence]\n
    "},{"location":"reference/metrics/#norfair.metrics.compare_dataframes","title":"compare_dataframes(gts, ts)","text":"

    Builds accumulator for each sequence.

    Source code in norfair/metrics.py
    def compare_dataframes(gts, ts):\n    \"\"\"Builds accumulator for each sequence.\"\"\"\n    accs = []\n    names = []\n    for k, tsacc in ts.items():\n        print(\"Comparing \", k, \"...\")\n        if k in gts:\n            accs.append(\n                mm.utils.compare_to_groundtruth(gts[k], tsacc, \"iou\", distth=0.5)\n            )\n            names.append(k)\n\n    return accs, names\n
    "},{"location":"reference/tracker/","title":"Tracker","text":""},{"location":"reference/tracker/#norfair.tracker.Tracker","title":"Tracker","text":"

    The class in charge of performing the tracking of the detections produced by a detector.

    Parameters:

    Name Type Description Default distance_function Union[str, Callable[[Detection, TrackedObject], float]]

    Function used by the tracker to determine the distance between newly detected objects and the objects that are currently being tracked. This function should take 2 input arguments, the first being a Detection, and the second a TrackedObject. It has to return a float with the distance it calculates. Some common distances are implemented in distances, as a shortcut the tracker accepts the name of these predefined distances. Scipy's predefined distances are also accepted. A str with one of the available metrics in scipy.spatial.distance.cdist.

    required distance_threshold float

    Defines what is the maximum distance that can constitute a match. Detections and tracked objects whose distances are above this threshold won't be matched by the tracker.

    required hit_counter_max int

    Each tracked objects keeps an internal hit counter which tracks how often it's getting matched to a detection, each time it gets a match this counter goes up, and each time it doesn't it goes down.

    If it goes below 0 the object gets destroyed. This argument defines how large this inertia can grow, and therefore defines how long an object can live without getting matched to any detections, before it is displaced as a dead object, if no ReID distance function is implemented it will be destroyed.

    15 initialization_delay Optional[int]

    Determines how large the object's hit counter must be in order to be considered as initialized, and get returned to the user as a real object. It must be smaller than hit_counter_max or otherwise the object would never be initialized.

    If set to 0, objects will get returned to the user as soon as they are detected for the first time, which can be problematic as this can result in objects appearing and immediately dissapearing.

    Defaults to hit_counter_max / 2

    None pointwise_hit_counter_max int

    Each tracked object keeps track of how often the points it's tracking have been getting matched. Points that are getting matched (pointwise_hit_counter > 0) are said to be live, and points which aren't (pointwise_hit_counter = 0) are said to not be live.

    This is used to determine things like which individual points in a tracked object get drawn by draw_tracked_objects and which don't. This argument defines how large the inertia for each point of a tracker can grow.

    4 detection_threshold float

    Sets the threshold at which the scores of the points in a detection being fed into the tracker must dip below to be ignored by the tracker.

    0 filter_factory FilterFactory

    This parameter can be used to change what filter the TrackedObject instances created by the tracker will use. Defaults to OptimizedKalmanFilterFactory()

    OptimizedKalmanFilterFactory() past_detections_length int

    How many past detections to save for each tracked object. Norfair tries to distribute these past detections uniformly through the object's lifetime so they're more representative. Very useful if you want to add metric learning to your model, as you can associate an embedding to each detection and access them in your distance function.

    4 reid_distance_function Optional[Callable[[TrackedObject, TrackedObject], float]]

    Function used by the tracker to determine the ReID distance between newly detected trackers and unmatched trackers by the distance function.

    This function should take 2 input arguments, the first being tracked objects in the initialization phase of type TrackedObject, and the second being tracked objects that have been unmatched of type TrackedObject. It returns a float with the distance it calculates.

    None reid_distance_threshold float

    Defines what is the maximum ReID distance that can constitute a match.

    Tracked objects whose distance is above this threshold won't be merged, if they are the oldest tracked object will be maintained with the position of the new tracked object.

    0 reid_hit_counter_max Optional[int]

    Each tracked object keeps an internal ReID hit counter which tracks how often it's getting recognized by another tracker, each time it gets a match this counter goes up, and each time it doesn't it goes down. If it goes below 0 the object gets destroyed. If used, this argument (reid_hit_counter_max) defines how long an object can live without getting matched to any detections, before it is destroyed.

    None Source code in norfair/tracker.py
    class Tracker:\n    \"\"\"\n    The class in charge of performing the tracking of the detections produced by a detector.\n\n    Parameters\n    ----------\n    distance_function : Union[str, Callable[[Detection, TrackedObject], float]]\n        Function used by the tracker to determine the distance between newly detected objects and the objects that are currently being tracked.\n        This function should take 2 input arguments, the first being a [Detection][norfair.tracker.Detection], and the second a [TrackedObject][norfair.tracker.TrackedObject].\n        It has to return a `float` with the distance it calculates.\n        Some common distances are implemented in [distances][], as a shortcut the tracker accepts the name of these [predefined distances][norfair.distances.get_distance_by_name].\n        Scipy's predefined distances are also accepted. A `str` with one of the available metrics in\n        [`scipy.spatial.distance.cdist`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html).\n    distance_threshold : float\n        Defines what is the maximum distance that can constitute a match.\n        Detections and tracked objects whose distances are above this threshold won't be matched by the tracker.\n    hit_counter_max : int, optional\n        Each tracked objects keeps an internal hit counter which tracks how often it's getting matched to a detection,\n        each time it gets a match this counter goes up, and each time it doesn't it goes down.\n\n        If it goes below 0 the object gets destroyed. This argument defines how large this inertia can grow,\n        and therefore defines how long an object can live without getting matched to any detections, before it is displaced as a dead object, if no ReID distance function is implemented it will be destroyed.\n    initialization_delay : Optional[int], optional\n         Determines how large the object's hit counter must be in order to be considered as initialized, and get returned to the user as a real object.\n         It must be smaller than `hit_counter_max` or otherwise the object would never be initialized.\n\n         If set to 0, objects will get returned to the user as soon as they are detected for the first time,\n         which can be problematic as this can result in objects appearing and immediately dissapearing.\n\n         Defaults to `hit_counter_max / 2`\n    pointwise_hit_counter_max : int, optional\n        Each tracked object keeps track of how often the points it's tracking have been getting matched.\n        Points that are getting matched (`pointwise_hit_counter > 0`) are said to be live, and points which aren't (`pointwise_hit_counter = 0`)\n        are said to not be live.\n\n        This is used to determine things like which individual points in a tracked object get drawn by [`draw_tracked_objects`][norfair.drawing.draw_tracked_objects] and which don't.\n        This argument defines how large the inertia for each point of a tracker can grow.\n    detection_threshold : float, optional\n        Sets the threshold at which the scores of the points in a detection being fed into the tracker must dip below to be ignored by the tracker.\n    filter_factory : FilterFactory, optional\n        This parameter can be used to change what filter the [`TrackedObject`][norfair.tracker.TrackedObject] instances created by the tracker will use.\n        Defaults to [`OptimizedKalmanFilterFactory()`][norfair.filter.OptimizedKalmanFilterFactory]\n    past_detections_length : int, optional\n        How many past detections to save for each tracked object.\n        Norfair tries to distribute these past detections uniformly through the object's lifetime so they're more representative.\n        Very useful if you want to add metric learning to your model, as you can associate an embedding to each detection and access them in your distance function.\n    reid_distance_function: Optional[Callable[[\"TrackedObject\", \"TrackedObject\"], float]]\n        Function used by the tracker to determine the ReID distance between newly detected trackers and unmatched trackers by the distance function.\n\n        This function should take 2 input arguments, the first being tracked objects in the initialization phase of type [`TrackedObject`][norfair.tracker.TrackedObject],\n        and the second being tracked objects that have been unmatched of type [`TrackedObject`][norfair.tracker.TrackedObject]. It returns a `float` with the distance it\n        calculates.\n    reid_distance_threshold: float\n        Defines what is the maximum ReID distance that can constitute a match.\n\n        Tracked objects whose distance is above this threshold won't be merged, if they are the oldest tracked object will be maintained\n        with the position of the new tracked object.\n    reid_hit_counter_max: Optional[int]\n        Each tracked object keeps an internal ReID hit counter which tracks how often it's getting recognized by another tracker,\n        each time it gets a match this counter goes up, and each time it doesn't it goes down. If it goes below 0 the object gets destroyed.\n        If used, this argument (`reid_hit_counter_max`) defines how long an object can live without getting matched to any detections, before it is destroyed.\n    \"\"\"\n\n    def __init__(\n        self,\n        distance_function: Union[str, Callable[[\"Detection\", \"TrackedObject\"], float]],\n        distance_threshold: float,\n        hit_counter_max: int = 15,\n        initialization_delay: Optional[int] = None,\n        pointwise_hit_counter_max: int = 4,\n        detection_threshold: float = 0,\n        filter_factory: FilterFactory = OptimizedKalmanFilterFactory(),\n        past_detections_length: int = 4,\n        reid_distance_function: Optional[\n            Callable[[\"TrackedObject\", \"TrackedObject\"], float]\n        ] = None,\n        reid_distance_threshold: float = 0,\n        reid_hit_counter_max: Optional[int] = None,\n    ):\n        self.tracked_objects: Sequence[\"TrackedObject\"] = []\n\n        if isinstance(distance_function, str):\n            distance_function = get_distance_by_name(distance_function)\n        elif isinstance(distance_function, Callable):\n            warning(\n                \"You are using a scalar distance function. If you want to speed up the\"\n                \" tracking process please consider using a vectorized distance\"\n                f\" function such as {AVAILABLE_VECTORIZED_DISTANCES}.\"\n            )\n            distance_function = ScalarDistance(distance_function)\n        else:\n            raise ValueError(\n                \"Argument `distance_function` should be a string or function but is\"\n                f\" {type(distance_function)} instead.\"\n            )\n        self.distance_function = distance_function\n\n        self.hit_counter_max = hit_counter_max\n        self.reid_hit_counter_max = reid_hit_counter_max\n        self.pointwise_hit_counter_max = pointwise_hit_counter_max\n        self.filter_factory = filter_factory\n        if past_detections_length >= 0:\n            self.past_detections_length = past_detections_length\n        else:\n            raise ValueError(\n                f\"Argument `past_detections_length` is {past_detections_length} and should be larger than 0.\"\n            )\n\n        if initialization_delay is None:\n            self.initialization_delay = int(self.hit_counter_max / 2)\n        elif initialization_delay < 0 or initialization_delay >= self.hit_counter_max:\n            raise ValueError(\n                f\"Argument 'initialization_delay' for 'Tracker' class should be an int between 0 and (hit_counter_max = {hit_counter_max}). The selected value is {initialization_delay}.\\n\"\n            )\n        else:\n            self.initialization_delay = initialization_delay\n\n        self.distance_threshold = distance_threshold\n        self.detection_threshold = detection_threshold\n        if reid_distance_function is not None:\n            self.reid_distance_function = ScalarDistance(reid_distance_function)\n        else:\n            self.reid_distance_function = reid_distance_function\n        self.reid_distance_threshold = reid_distance_threshold\n        self._obj_factory = _TrackedObjectFactory()\n\n    def update(\n        self,\n        detections: Optional[List[\"Detection\"]] = None,\n        period: int = 1,\n        coord_transformations: Optional[CoordinatesTransformation] = None,\n    ) -> List[\"TrackedObject\"]:\n        \"\"\"\n        Process detections found in each frame.\n\n        The detections can be matched to previous tracked objects or new ones will be created\n        according to the configuration of the Tracker.\n        The currently alive and initialized tracked objects are returned\n\n        Parameters\n        ----------\n        detections : Optional[List[Detection]], optional\n            A list of [`Detection`][norfair.tracker.Detection] which represent the detections found in the current frame being processed.\n\n            If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time,\n            this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.\n        period : int, optional\n            The user can chose not to run their detector on all frames, so as to process video faster.\n            This parameter sets every how many frames the detector is getting ran,\n            so that the tracker is aware of this situation and can handle it properly.\n\n            This argument can be reset on each frame processed,\n            which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.\n        coord_transformations: Optional[CoordinatesTransformation]\n            The coordinate transformation calculated by the [MotionEstimator][norfair.camera_motion.MotionEstimator].\n\n        Returns\n        -------\n        List[TrackedObject]\n            The list of active tracked objects.\n        \"\"\"\n        if coord_transformations is not None:\n            for det in detections:\n                det.update_coordinate_transformation(coord_transformations)\n\n        # Remove stale trackers and make candidate object real if the hit counter is positive\n        alive_objects = []\n        dead_objects = []\n        if self.reid_hit_counter_max is None:\n            self.tracked_objects = [\n                o for o in self.tracked_objects if o.hit_counter_is_positive\n            ]\n            alive_objects = self.tracked_objects\n        else:\n            tracked_objects = []\n            for o in self.tracked_objects:\n                if o.reid_hit_counter_is_positive:\n                    tracked_objects.append(o)\n                    if o.hit_counter_is_positive:\n                        alive_objects.append(o)\n                    else:\n                        dead_objects.append(o)\n            self.tracked_objects = tracked_objects\n\n        # Update tracker\n        for obj in self.tracked_objects:\n            obj.tracker_step()\n            obj.update_coordinate_transformation(coord_transformations)\n\n        # Update initialized tracked objects with detections\n        (\n            unmatched_detections,\n            _,\n            unmatched_init_trackers,\n        ) = self._update_objects_in_place(\n            self.distance_function,\n            self.distance_threshold,\n            [o for o in alive_objects if not o.is_initializing],\n            detections,\n            period,\n        )\n\n        # Update not yet initialized tracked objects with yet unmatched detections\n        (\n            unmatched_detections,\n            matched_not_init_trackers,\n            _,\n        ) = self._update_objects_in_place(\n            self.distance_function,\n            self.distance_threshold,\n            [o for o in alive_objects if o.is_initializing],\n            unmatched_detections,\n            period,\n        )\n\n        if self.reid_distance_function is not None:\n            # Match unmatched initialized tracked objects with not yet initialized tracked objects\n            _, _, _ = self._update_objects_in_place(\n                self.reid_distance_function,\n                self.reid_distance_threshold,\n                unmatched_init_trackers + dead_objects,\n                matched_not_init_trackers,\n                period,\n            )\n\n        # Create new tracked objects from remaining unmatched detections\n        for detection in unmatched_detections:\n            self.tracked_objects.append(\n                self._obj_factory.create(\n                    initial_detection=detection,\n                    hit_counter_max=self.hit_counter_max,\n                    initialization_delay=self.initialization_delay,\n                    pointwise_hit_counter_max=self.pointwise_hit_counter_max,\n                    detection_threshold=self.detection_threshold,\n                    period=period,\n                    filter_factory=self.filter_factory,\n                    past_detections_length=self.past_detections_length,\n                    reid_hit_counter_max=self.reid_hit_counter_max,\n                    coord_transformations=coord_transformations,\n                )\n            )\n\n        return self.get_active_objects()\n\n    @property\n    def current_object_count(self) -> int:\n        \"\"\"Number of active TrackedObjects\"\"\"\n        return len(self.get_active_objects())\n\n    @property\n    def total_object_count(self) -> int:\n        \"\"\"Total number of TrackedObjects initialized in the by this Tracker\"\"\"\n        return self._obj_factory.count\n\n    def get_active_objects(self) -> List[\"TrackedObject\"]:\n        \"\"\"Get the list of active objects\n\n        Returns\n        -------\n        List[\"TrackedObject\"]\n            The list of active objects\n        \"\"\"\n        return [\n            o\n            for o in self.tracked_objects\n            if not o.is_initializing and o.hit_counter_is_positive\n        ]\n\n    def _update_objects_in_place(\n        self,\n        distance_function,\n        distance_threshold,\n        objects: Sequence[\"TrackedObject\"],\n        candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n        period: int,\n    ):\n        if candidates is not None and len(candidates) > 0:\n            distance_matrix = distance_function.get_distances(objects, candidates)\n            if np.isnan(distance_matrix).any():\n                raise ValueError(\n                    \"\\nReceived nan values from distance function, please check your distance function for errors!\"\n                )\n\n            # Used just for debugging distance function\n            if distance_matrix.any():\n                for i, minimum in enumerate(distance_matrix.min(axis=0)):\n                    objects[i].current_min_distance = (\n                        minimum if minimum < distance_threshold else None\n                    )\n\n            matched_cand_indices, matched_obj_indices = self.match_dets_and_objs(\n                distance_matrix, distance_threshold\n            )\n            if len(matched_cand_indices) > 0:\n                unmatched_candidates = [\n                    d for i, d in enumerate(candidates) if i not in matched_cand_indices\n                ]\n                unmatched_objects = [\n                    d for i, d in enumerate(objects) if i not in matched_obj_indices\n                ]\n                matched_objects = []\n\n                # Handle matched people/detections\n                for (match_cand_idx, match_obj_idx) in zip(\n                    matched_cand_indices, matched_obj_indices\n                ):\n                    match_distance = distance_matrix[match_cand_idx, match_obj_idx]\n                    matched_candidate = candidates[match_cand_idx]\n                    matched_object = objects[match_obj_idx]\n                    if match_distance < distance_threshold:\n                        if isinstance(matched_candidate, Detection):\n                            matched_object.hit(matched_candidate, period=period)\n                            matched_object.last_distance = match_distance\n                            matched_objects.append(matched_object)\n                        elif isinstance(matched_candidate, TrackedObject):\n                            # Merge new TrackedObject with the old one\n                            matched_object.merge(matched_candidate)\n                            # If we are matching TrackedObject instances we want to get rid of the\n                            # already matched candidate to avoid matching it again in future frames\n                            self.tracked_objects.remove(matched_candidate)\n                    else:\n                        unmatched_candidates.append(matched_candidate)\n                        unmatched_objects.append(matched_object)\n            else:\n                unmatched_candidates, matched_objects, unmatched_objects = (\n                    candidates,\n                    [],\n                    objects,\n                )\n        else:\n            unmatched_candidates, matched_objects, unmatched_objects = [], [], objects\n\n        return unmatched_candidates, matched_objects, unmatched_objects\n\n    def match_dets_and_objs(self, distance_matrix: np.ndarray, distance_threshold):\n        \"\"\"Matches detections with tracked_objects from a distance matrix\n\n        I used to match by minimizing the global distances, but found several\n        cases in which this was not optimal. So now I just match by starting\n        with the global minimum distance and matching the det-obj corresponding\n        to that distance, then taking the second minimum, and so on until we\n        reach the distance_threshold.\n\n        This avoids the the algorithm getting cute with us and matching things\n        that shouldn't be matching just for the sake of minimizing the global\n        distance, which is what used to happen\n        \"\"\"\n        # NOTE: This implementation is terribly inefficient, but it doesn't\n        #       seem to affect the fps at all.\n        distance_matrix = distance_matrix.copy()\n        if distance_matrix.size > 0:\n            det_idxs = []\n            obj_idxs = []\n            current_min = distance_matrix.min()\n\n            while current_min < distance_threshold:\n                flattened_arg_min = distance_matrix.argmin()\n                det_idx = flattened_arg_min // distance_matrix.shape[1]\n                obj_idx = flattened_arg_min % distance_matrix.shape[1]\n                det_idxs.append(det_idx)\n                obj_idxs.append(obj_idx)\n                distance_matrix[det_idx, :] = distance_threshold + 1\n                distance_matrix[:, obj_idx] = distance_threshold + 1\n                current_min = distance_matrix.min()\n\n            return det_idxs, obj_idxs\n        else:\n            return [], []\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.current_object_count","title":"current_object_count: int property","text":"

    Number of active TrackedObjects

    "},{"location":"reference/tracker/#norfair.tracker.Tracker.total_object_count","title":"total_object_count: int property","text":"

    Total number of TrackedObjects initialized in the by this Tracker

    "},{"location":"reference/tracker/#norfair.tracker.Tracker.update","title":"update(detections=None, period=1, coord_transformations=None)","text":"

    Process detections found in each frame.

    The detections can be matched to previous tracked objects or new ones will be created according to the configuration of the Tracker. The currently alive and initialized tracked objects are returned

    Parameters:

    Name Type Description Default detections Optional[List[Detection]]

    A list of Detection which represent the detections found in the current frame being processed.

    If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time, this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.

    None period int

    The user can chose not to run their detector on all frames, so as to process video faster. This parameter sets every how many frames the detector is getting ran, so that the tracker is aware of this situation and can handle it properly.

    This argument can be reset on each frame processed, which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.

    1 coord_transformations Optional[CoordinatesTransformation]

    The coordinate transformation calculated by the MotionEstimator.

    None

    Returns:

    Type Description List[TrackedObject]

    The list of active tracked objects.

    Source code in norfair/tracker.py
    def update(\n    self,\n    detections: Optional[List[\"Detection\"]] = None,\n    period: int = 1,\n    coord_transformations: Optional[CoordinatesTransformation] = None,\n) -> List[\"TrackedObject\"]:\n    \"\"\"\n    Process detections found in each frame.\n\n    The detections can be matched to previous tracked objects or new ones will be created\n    according to the configuration of the Tracker.\n    The currently alive and initialized tracked objects are returned\n\n    Parameters\n    ----------\n    detections : Optional[List[Detection]], optional\n        A list of [`Detection`][norfair.tracker.Detection] which represent the detections found in the current frame being processed.\n\n        If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time,\n        this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.\n    period : int, optional\n        The user can chose not to run their detector on all frames, so as to process video faster.\n        This parameter sets every how many frames the detector is getting ran,\n        so that the tracker is aware of this situation and can handle it properly.\n\n        This argument can be reset on each frame processed,\n        which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.\n    coord_transformations: Optional[CoordinatesTransformation]\n        The coordinate transformation calculated by the [MotionEstimator][norfair.camera_motion.MotionEstimator].\n\n    Returns\n    -------\n    List[TrackedObject]\n        The list of active tracked objects.\n    \"\"\"\n    if coord_transformations is not None:\n        for det in detections:\n            det.update_coordinate_transformation(coord_transformations)\n\n    # Remove stale trackers and make candidate object real if the hit counter is positive\n    alive_objects = []\n    dead_objects = []\n    if self.reid_hit_counter_max is None:\n        self.tracked_objects = [\n            o for o in self.tracked_objects if o.hit_counter_is_positive\n        ]\n        alive_objects = self.tracked_objects\n    else:\n        tracked_objects = []\n        for o in self.tracked_objects:\n            if o.reid_hit_counter_is_positive:\n                tracked_objects.append(o)\n                if o.hit_counter_is_positive:\n                    alive_objects.append(o)\n                else:\n                    dead_objects.append(o)\n        self.tracked_objects = tracked_objects\n\n    # Update tracker\n    for obj in self.tracked_objects:\n        obj.tracker_step()\n        obj.update_coordinate_transformation(coord_transformations)\n\n    # Update initialized tracked objects with detections\n    (\n        unmatched_detections,\n        _,\n        unmatched_init_trackers,\n    ) = self._update_objects_in_place(\n        self.distance_function,\n        self.distance_threshold,\n        [o for o in alive_objects if not o.is_initializing],\n        detections,\n        period,\n    )\n\n    # Update not yet initialized tracked objects with yet unmatched detections\n    (\n        unmatched_detections,\n        matched_not_init_trackers,\n        _,\n    ) = self._update_objects_in_place(\n        self.distance_function,\n        self.distance_threshold,\n        [o for o in alive_objects if o.is_initializing],\n        unmatched_detections,\n        period,\n    )\n\n    if self.reid_distance_function is not None:\n        # Match unmatched initialized tracked objects with not yet initialized tracked objects\n        _, _, _ = self._update_objects_in_place(\n            self.reid_distance_function,\n            self.reid_distance_threshold,\n            unmatched_init_trackers + dead_objects,\n            matched_not_init_trackers,\n            period,\n        )\n\n    # Create new tracked objects from remaining unmatched detections\n    for detection in unmatched_detections:\n        self.tracked_objects.append(\n            self._obj_factory.create(\n                initial_detection=detection,\n                hit_counter_max=self.hit_counter_max,\n                initialization_delay=self.initialization_delay,\n                pointwise_hit_counter_max=self.pointwise_hit_counter_max,\n                detection_threshold=self.detection_threshold,\n                period=period,\n                filter_factory=self.filter_factory,\n                past_detections_length=self.past_detections_length,\n                reid_hit_counter_max=self.reid_hit_counter_max,\n                coord_transformations=coord_transformations,\n            )\n        )\n\n    return self.get_active_objects()\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.get_active_objects","title":"get_active_objects()","text":"

    Get the list of active objects

    Returns:

    Type Description List[TrackedObject]

    The list of active objects

    Source code in norfair/tracker.py
    def get_active_objects(self) -> List[\"TrackedObject\"]:\n    \"\"\"Get the list of active objects\n\n    Returns\n    -------\n    List[\"TrackedObject\"]\n        The list of active objects\n    \"\"\"\n    return [\n        o\n        for o in self.tracked_objects\n        if not o.is_initializing and o.hit_counter_is_positive\n    ]\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.match_dets_and_objs","title":"match_dets_and_objs(distance_matrix, distance_threshold)","text":"

    Matches detections with tracked_objects from a distance matrix

    I used to match by minimizing the global distances, but found several cases in which this was not optimal. So now I just match by starting with the global minimum distance and matching the det-obj corresponding to that distance, then taking the second minimum, and so on until we reach the distance_threshold.

    This avoids the the algorithm getting cute with us and matching things that shouldn't be matching just for the sake of minimizing the global distance, which is what used to happen

    Source code in norfair/tracker.py
    def match_dets_and_objs(self, distance_matrix: np.ndarray, distance_threshold):\n    \"\"\"Matches detections with tracked_objects from a distance matrix\n\n    I used to match by minimizing the global distances, but found several\n    cases in which this was not optimal. So now I just match by starting\n    with the global minimum distance and matching the det-obj corresponding\n    to that distance, then taking the second minimum, and so on until we\n    reach the distance_threshold.\n\n    This avoids the the algorithm getting cute with us and matching things\n    that shouldn't be matching just for the sake of minimizing the global\n    distance, which is what used to happen\n    \"\"\"\n    # NOTE: This implementation is terribly inefficient, but it doesn't\n    #       seem to affect the fps at all.\n    distance_matrix = distance_matrix.copy()\n    if distance_matrix.size > 0:\n        det_idxs = []\n        obj_idxs = []\n        current_min = distance_matrix.min()\n\n        while current_min < distance_threshold:\n            flattened_arg_min = distance_matrix.argmin()\n            det_idx = flattened_arg_min // distance_matrix.shape[1]\n            obj_idx = flattened_arg_min % distance_matrix.shape[1]\n            det_idxs.append(det_idx)\n            obj_idxs.append(obj_idx)\n            distance_matrix[det_idx, :] = distance_threshold + 1\n            distance_matrix[:, obj_idx] = distance_threshold + 1\n            current_min = distance_matrix.min()\n\n        return det_idxs, obj_idxs\n    else:\n        return [], []\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject","title":"TrackedObject","text":"

    The objects returned by the tracker's update function on each iteration.

    They represent the objects currently being tracked by the tracker.

    Users should not instantiate TrackedObjects manually; the Tracker will be in charge of creating them.

    Attributes:

    Name Type Description estimate ndarray

    Where the tracker predicts the point will be in the current frame based on past detections. A numpy array with the same shape as the detections being fed to the tracker that produced it.

    id Optional[int]

    The unique identifier assigned to this object by the tracker. Set to None if the object is initializing.

    global_id Optional[int]

    The globally unique identifier assigned to this object. Set to None if the object is initializing

    last_detection Detection

    The last detection that matched with this tracked object. Useful if you are storing embeddings in your detections and want to do metric learning, or for debugging.

    last_distance Optional[float]

    The distance the tracker had with the last object it matched with.

    age int

    The age of this object measured in number of frames.

    live_points

    A boolean mask with shape (n_points,). Points marked as True have recently been matched with detections. Points marked as False haven't and are to be considered stale, and should be ignored.

    Functions like draw_tracked_objects use this property to determine which points not to draw.

    initializing_id int

    On top of id, objects also have an initializing_id which is the id they are given internally by the Tracker; this id is used solely for debugging.

    Each new object created by the Tracker starts as an uninitialized TrackedObject, which needs to reach a certain match rate to be converted into a full blown TrackedObject. initializing_id is the id temporarily assigned to TrackedObject while they are getting initialized.

    Source code in norfair/tracker.py
    class TrackedObject:\n    \"\"\"\n    The objects returned by the tracker's `update` function on each iteration.\n\n    They represent the objects currently being tracked by the tracker.\n\n    Users should not instantiate TrackedObjects manually;\n    the Tracker will be in charge of creating them.\n\n    Attributes\n    ----------\n    estimate : np.ndarray\n        Where the tracker predicts the point will be in the current frame based on past detections.\n        A numpy array with the same shape as the detections being fed to the tracker that produced it.\n    id : Optional[int]\n        The unique identifier assigned to this object by the tracker. Set to `None` if the object is initializing.\n    global_id : Optional[int]\n        The globally unique identifier assigned to this object. Set to `None` if the object is initializing\n    last_detection : Detection\n        The last detection that matched with this tracked object.\n        Useful if you are storing embeddings in your detections and want to do metric learning, or for debugging.\n    last_distance : Optional[float]\n        The distance the tracker had with the last object it matched with.\n    age : int\n        The age of this object measured in number of frames.\n    live_points :\n        A boolean mask with shape `(n_points,)`. Points marked as `True` have recently been matched with detections.\n        Points marked as `False` haven't and are to be considered stale, and should be ignored.\n\n        Functions like [`draw_tracked_objects`][norfair.drawing.draw_tracked_objects] use this property to determine which points not to draw.\n    initializing_id : int\n        On top of `id`, objects also have an `initializing_id` which is the id they are given internally by the `Tracker`;\n        this id is used solely for debugging.\n\n        Each new object created by the `Tracker` starts as an uninitialized `TrackedObject`,\n        which needs to reach a certain match rate to be converted into a full blown `TrackedObject`.\n        `initializing_id` is the id temporarily assigned to `TrackedObject` while they are getting initialized.\n    \"\"\"\n\n    def __init__(\n        self,\n        obj_factory: _TrackedObjectFactory,\n        initial_detection: \"Detection\",\n        hit_counter_max: int,\n        initialization_delay: int,\n        pointwise_hit_counter_max: int,\n        detection_threshold: float,\n        period: int,\n        filter_factory: \"FilterFactory\",\n        past_detections_length: int,\n        reid_hit_counter_max: Optional[int],\n        coord_transformations: Optional[CoordinatesTransformation] = None,\n    ):\n        if not isinstance(initial_detection, Detection):\n            raise ValueError(\n                f\"\\n[red]ERROR[/red]: The detection list fed into `tracker.update()` should be composed of {Detection} objects not {type(initial_detection)}.\\n\"\n            )\n        self._obj_factory = obj_factory\n        self.dim_points = initial_detection.absolute_points.shape[1]\n        self.num_points = initial_detection.absolute_points.shape[0]\n        self.hit_counter_max: int = hit_counter_max\n        self.pointwise_hit_counter_max: int = max(pointwise_hit_counter_max, period)\n        self.initialization_delay = initialization_delay\n        self.detection_threshold: float = detection_threshold\n        self.initial_period: int = period\n        self.hit_counter: int = period\n        self.reid_hit_counter_max = reid_hit_counter_max\n        self.reid_hit_counter: Optional[int] = None\n        self.last_distance: Optional[float] = None\n        self.current_min_distance: Optional[float] = None\n        self.last_detection: \"Detection\" = initial_detection\n        self.age: int = 0\n        self.is_initializing: bool = self.hit_counter <= self.initialization_delay\n\n        self.initializing_id: Optional[int] = self._obj_factory.get_initializing_id()\n        self.id: Optional[int] = None\n        self.global_id: Optional[int] = None\n        if not self.is_initializing:\n            self._acquire_ids()\n\n        if initial_detection.scores is None:\n            self.detected_at_least_once_points = np.array([True] * self.num_points)\n        else:\n            self.detected_at_least_once_points = (\n                initial_detection.scores > self.detection_threshold\n            )\n        self.point_hit_counter: np.ndarray = self.detected_at_least_once_points.astype(\n            int\n        )\n        initial_detection.age = self.age\n        self.past_detections_length = past_detections_length\n        if past_detections_length > 0:\n            self.past_detections: Sequence[\"Detection\"] = [initial_detection]\n        else:\n            self.past_detections: Sequence[\"Detection\"] = []\n\n        # Create Kalman Filter\n        self.filter = filter_factory.create_filter(initial_detection.absolute_points)\n        self.dim_z = self.dim_points * self.num_points\n        self.label = initial_detection.label\n        self.abs_to_rel = None\n        if coord_transformations is not None:\n            self.update_coordinate_transformation(coord_transformations)\n\n    def tracker_step(self):\n        if self.reid_hit_counter is None:\n            if self.hit_counter <= 0:\n                self.reid_hit_counter = self.reid_hit_counter_max\n        else:\n            self.reid_hit_counter -= 1\n        self.hit_counter -= 1\n        self.point_hit_counter -= 1\n        self.age += 1\n        # Advances the tracker's state\n        self.filter.predict()\n\n    @property\n    def hit_counter_is_positive(self):\n        return self.hit_counter >= 0\n\n    @property\n    def reid_hit_counter_is_positive(self):\n        return self.reid_hit_counter is None or self.reid_hit_counter >= 0\n\n    @property\n    def estimate_velocity(self) -> np.ndarray:\n        \"\"\"Get the velocity estimate of the object from the Kalman filter. This velocity is in the absolute coordinate system.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the velocity estimate of the object on each axis.\n        \"\"\"\n        return self.filter.x.T.flatten()[self.dim_z :].reshape(-1, self.dim_points)\n\n    @property\n    def estimate(self) -> np.ndarray:\n        \"\"\"Get the position estimate of the object from the Kalman filter.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n        \"\"\"\n        return self.get_estimate()\n\n    def get_estimate(self, absolute=False) -> np.ndarray:\n        \"\"\"Get the position estimate of the object from the Kalman filter in an absolute or relative format.\n\n        Parameters\n        ----------\n        absolute : bool, optional\n            If true the coordinates are returned in absolute format, by default False, by default False.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n\n        Raises\n        ------\n        ValueError\n            Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.\n        \"\"\"\n        positions = self.filter.x.T.flatten()[: self.dim_z].reshape(-1, self.dim_points)\n        if self.abs_to_rel is None:\n            if not absolute:\n                return positions\n            else:\n                raise ValueError(\n                    \"You must provide 'coord_transformations' to the tracker to get absolute coordinates\"\n                )\n        else:\n            if absolute:\n                return positions\n            else:\n                return self.abs_to_rel(positions)\n\n    @property\n    def live_points(self):\n        return self.point_hit_counter > 0\n\n    def hit(self, detection: \"Detection\", period: int = 1):\n        \"\"\"Update tracked object with a new detection\n\n        Parameters\n        ----------\n        detection : Detection\n            the new detection matched to this tracked object\n        period : int, optional\n            frames corresponding to the period of time since last update.\n        \"\"\"\n        self._conditionally_add_to_past_detections(detection)\n\n        self.last_detection = detection\n        self.hit_counter = min(self.hit_counter + 2 * period, self.hit_counter_max)\n\n        if self.is_initializing and self.hit_counter > self.initialization_delay:\n            self.is_initializing = False\n            self._acquire_ids()\n\n        # We use a kalman filter in which we consider each coordinate on each point as a sensor.\n        # This is a hacky way to update only certain sensors (only x, y coordinates for\n        # points which were detected).\n        # TODO: Use keypoint confidence information to change R on each sensor instead?\n        if detection.scores is not None:\n            assert len(detection.scores.shape) == 1\n            points_over_threshold_mask = detection.scores > self.detection_threshold\n            matched_sensors_mask = np.array(\n                [(m,) * self.dim_points for m in points_over_threshold_mask]\n            ).flatten()\n            H_pos = np.diag(matched_sensors_mask).astype(\n                float\n            )  # We measure x, y positions\n            self.point_hit_counter[points_over_threshold_mask] += 2 * period\n        else:\n            points_over_threshold_mask = np.array([True] * self.num_points)\n            H_pos = np.identity(self.num_points * self.dim_points)\n            self.point_hit_counter += 2 * period\n        self.point_hit_counter[\n            self.point_hit_counter >= self.pointwise_hit_counter_max\n        ] = self.pointwise_hit_counter_max\n        self.point_hit_counter[self.point_hit_counter < 0] = 0\n        H_vel = np.zeros(H_pos.shape)  # But we don't directly measure velocity\n        H = np.hstack([H_pos, H_vel])\n        self.filter.update(\n            np.expand_dims(detection.absolute_points.flatten(), 0).T, None, H\n        )\n\n        detected_at_least_once_mask = np.array(\n            [(m,) * self.dim_points for m in self.detected_at_least_once_points]\n        ).flatten()\n        now_detected_mask = np.hstack(\n            (points_over_threshold_mask,) * self.dim_points\n        ).flatten()\n        first_detection_mask = np.logical_and(\n            now_detected_mask, np.logical_not(detected_at_least_once_mask)\n        )\n\n        self.filter.x[: self.dim_z][first_detection_mask] = np.expand_dims(\n            detection.absolute_points.flatten(), 0\n        ).T[first_detection_mask]\n\n        # Force points being detected for the first time to have velocity = 0\n        # This is needed because some detectors (like OpenPose) set points with\n        # low confidence to coordinates (0, 0). And when they then get their first\n        # real detection this creates a huge velocity vector in our KalmanFilter\n        # and causes the tracker to start with wildly inaccurate estimations which\n        # eventually coverge to the real detections.\n        self.filter.x[self.dim_z :][np.logical_not(detected_at_least_once_mask)] = 0\n        self.detected_at_least_once_points = np.logical_or(\n            self.detected_at_least_once_points, points_over_threshold_mask\n        )\n\n    def __repr__(self):\n        if self.last_distance is None:\n            placeholder_text = \"\\033[1mObject_{}\\033[0m(age: {}, hit_counter: {}, last_distance: {}, init_id: {})\"\n        else:\n            placeholder_text = \"\\033[1mObject_{}\\033[0m(age: {}, hit_counter: {}, last_distance: {:.2f}, init_id: {})\"\n        return placeholder_text.format(\n            self.id,\n            self.age,\n            self.hit_counter,\n            self.last_distance,\n            self.initializing_id,\n        )\n\n    def _conditionally_add_to_past_detections(self, detection):\n        \"\"\"Adds detections into (and pops detections away) from `past_detections`\n\n        It does so by keeping a fixed amount of past detections saved into each\n        TrackedObject, while maintaining them distributed uniformly through the object's\n        lifetime.\n        \"\"\"\n        if self.past_detections_length == 0:\n            return\n        if len(self.past_detections) < self.past_detections_length:\n            detection.age = self.age\n            self.past_detections.append(detection)\n        elif self.age >= self.past_detections[0].age * self.past_detections_length:\n            self.past_detections.pop(0)\n            detection.age = self.age\n            self.past_detections.append(detection)\n\n    def merge(self, tracked_object):\n        \"\"\"Merge with a not yet initialized TrackedObject instance\"\"\"\n        self.reid_hit_counter = None\n        self.hit_counter = self.initial_period * 2\n        self.point_hit_counter = tracked_object.point_hit_counter\n        self.last_distance = tracked_object.last_distance\n        self.current_min_distance = tracked_object.current_min_distance\n        self.last_detection = tracked_object.last_detection\n        self.detected_at_least_once_points = (\n            tracked_object.detected_at_least_once_points\n        )\n        self.filter = tracked_object.filter\n\n        for past_detection in tracked_object.past_detections:\n            self._conditionally_add_to_past_detections(past_detection)\n\n    def update_coordinate_transformation(\n        self, coordinate_transformation: CoordinatesTransformation\n    ):\n        if coordinate_transformation is not None:\n            self.abs_to_rel = coordinate_transformation.abs_to_rel\n\n    def _acquire_ids(self):\n        self.id, self.global_id = self._obj_factory.get_ids()\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.estimate_velocity","title":"estimate_velocity: np.ndarray property","text":"

    Get the velocity estimate of the object from the Kalman filter. This velocity is in the absolute coordinate system.

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the velocity estimate of the object on each axis.

    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.estimate","title":"estimate: np.ndarray property","text":"

    Get the position estimate of the object from the Kalman filter.

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.

    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.get_estimate","title":"get_estimate(absolute=False)","text":"

    Get the position estimate of the object from the Kalman filter in an absolute or relative format.

    Parameters:

    Name Type Description Default absolute bool

    If true the coordinates are returned in absolute format, by default False, by default False.

    False

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.

    Raises:

    Type Description ValueError

    Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.

    Source code in norfair/tracker.py
    def get_estimate(self, absolute=False) -> np.ndarray:\n    \"\"\"Get the position estimate of the object from the Kalman filter in an absolute or relative format.\n\n    Parameters\n    ----------\n    absolute : bool, optional\n        If true the coordinates are returned in absolute format, by default False, by default False.\n\n    Returns\n    -------\n    np.ndarray\n        An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n\n    Raises\n    ------\n    ValueError\n        Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.\n    \"\"\"\n    positions = self.filter.x.T.flatten()[: self.dim_z].reshape(-1, self.dim_points)\n    if self.abs_to_rel is None:\n        if not absolute:\n            return positions\n        else:\n            raise ValueError(\n                \"You must provide 'coord_transformations' to the tracker to get absolute coordinates\"\n            )\n    else:\n        if absolute:\n            return positions\n        else:\n            return self.abs_to_rel(positions)\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.hit","title":"hit(detection, period=1)","text":"

    Update tracked object with a new detection

    Parameters:

    Name Type Description Default detection Detection

    the new detection matched to this tracked object

    required period int

    frames corresponding to the period of time since last update.

    1 Source code in norfair/tracker.py
    def hit(self, detection: \"Detection\", period: int = 1):\n    \"\"\"Update tracked object with a new detection\n\n    Parameters\n    ----------\n    detection : Detection\n        the new detection matched to this tracked object\n    period : int, optional\n        frames corresponding to the period of time since last update.\n    \"\"\"\n    self._conditionally_add_to_past_detections(detection)\n\n    self.last_detection = detection\n    self.hit_counter = min(self.hit_counter + 2 * period, self.hit_counter_max)\n\n    if self.is_initializing and self.hit_counter > self.initialization_delay:\n        self.is_initializing = False\n        self._acquire_ids()\n\n    # We use a kalman filter in which we consider each coordinate on each point as a sensor.\n    # This is a hacky way to update only certain sensors (only x, y coordinates for\n    # points which were detected).\n    # TODO: Use keypoint confidence information to change R on each sensor instead?\n    if detection.scores is not None:\n        assert len(detection.scores.shape) == 1\n        points_over_threshold_mask = detection.scores > self.detection_threshold\n        matched_sensors_mask = np.array(\n            [(m,) * self.dim_points for m in points_over_threshold_mask]\n        ).flatten()\n        H_pos = np.diag(matched_sensors_mask).astype(\n            float\n        )  # We measure x, y positions\n        self.point_hit_counter[points_over_threshold_mask] += 2 * period\n    else:\n        points_over_threshold_mask = np.array([True] * self.num_points)\n        H_pos = np.identity(self.num_points * self.dim_points)\n        self.point_hit_counter += 2 * period\n    self.point_hit_counter[\n        self.point_hit_counter >= self.pointwise_hit_counter_max\n    ] = self.pointwise_hit_counter_max\n    self.point_hit_counter[self.point_hit_counter < 0] = 0\n    H_vel = np.zeros(H_pos.shape)  # But we don't directly measure velocity\n    H = np.hstack([H_pos, H_vel])\n    self.filter.update(\n        np.expand_dims(detection.absolute_points.flatten(), 0).T, None, H\n    )\n\n    detected_at_least_once_mask = np.array(\n        [(m,) * self.dim_points for m in self.detected_at_least_once_points]\n    ).flatten()\n    now_detected_mask = np.hstack(\n        (points_over_threshold_mask,) * self.dim_points\n    ).flatten()\n    first_detection_mask = np.logical_and(\n        now_detected_mask, np.logical_not(detected_at_least_once_mask)\n    )\n\n    self.filter.x[: self.dim_z][first_detection_mask] = np.expand_dims(\n        detection.absolute_points.flatten(), 0\n    ).T[first_detection_mask]\n\n    # Force points being detected for the first time to have velocity = 0\n    # This is needed because some detectors (like OpenPose) set points with\n    # low confidence to coordinates (0, 0). And when they then get their first\n    # real detection this creates a huge velocity vector in our KalmanFilter\n    # and causes the tracker to start with wildly inaccurate estimations which\n    # eventually coverge to the real detections.\n    self.filter.x[self.dim_z :][np.logical_not(detected_at_least_once_mask)] = 0\n    self.detected_at_least_once_points = np.logical_or(\n        self.detected_at_least_once_points, points_over_threshold_mask\n    )\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.merge","title":"merge(tracked_object)","text":"

    Merge with a not yet initialized TrackedObject instance

    Source code in norfair/tracker.py
    def merge(self, tracked_object):\n    \"\"\"Merge with a not yet initialized TrackedObject instance\"\"\"\n    self.reid_hit_counter = None\n    self.hit_counter = self.initial_period * 2\n    self.point_hit_counter = tracked_object.point_hit_counter\n    self.last_distance = tracked_object.last_distance\n    self.current_min_distance = tracked_object.current_min_distance\n    self.last_detection = tracked_object.last_detection\n    self.detected_at_least_once_points = (\n        tracked_object.detected_at_least_once_points\n    )\n    self.filter = tracked_object.filter\n\n    for past_detection in tracked_object.past_detections:\n        self._conditionally_add_to_past_detections(past_detection)\n
    "},{"location":"reference/tracker/#norfair.tracker.Detection","title":"Detection","text":"

    Detections returned by the detector must be converted to a Detection object before being used by Norfair.

    Parameters:

    Name Type Description Default points ndarray

    Points detected. Must be a rank 2 array with shape (n_points, n_dimensions) where n_dimensions is 2 or 3.

    required scores ndarray

    An array of length n_points which assigns a score to each of the points defined in points.

    This is used to inform the tracker of which points to ignore; any point with a score below detection_threshold will be ignored.

    This useful for cases in which detections don't always have every point present, as is often the case in pose estimators.

    None data Any

    The place to store any extra data which may be useful when calculating the distance function. Anything stored here will be available to use inside the distance function.

    This enables the development of more interesting trackers which can do things like assign an appearance embedding to each detection to aid in its tracking.

    None label Hashable

    When working with multiple classes the detection's label can be stored to be used as a matching condition when associating tracked objects with new detections. Label's type must be hashable for drawing purposes.

    None embedding Any

    The embedding for the reid_distance.

    None Source code in norfair/tracker.py
    class Detection:\n    \"\"\"Detections returned by the detector must be converted to a `Detection` object before being used by Norfair.\n\n    Parameters\n    ----------\n    points : np.ndarray\n        Points detected. Must be a rank 2 array with shape `(n_points, n_dimensions)` where n_dimensions is 2 or 3.\n    scores : np.ndarray, optional\n        An array of length `n_points` which assigns a score to each of the points defined in `points`.\n\n        This is used to inform the tracker of which points to ignore;\n        any point with a score below `detection_threshold` will be ignored.\n\n        This useful for cases in which detections don't always have every point present, as is often the case in pose estimators.\n    data : Any, optional\n        The place to store any extra data which may be useful when calculating the distance function.\n        Anything stored here will be available to use inside the distance function.\n\n        This enables the development of more interesting trackers which can do things like assign an appearance embedding to each\n        detection to aid in its tracking.\n    label : Hashable, optional\n        When working with multiple classes the detection's label can be stored to be used as a matching condition when associating\n        tracked objects with new detections. Label's type must be hashable for drawing purposes.\n    embedding : Any, optional\n        The embedding for the reid_distance.\n    \"\"\"\n\n    def __init__(\n        self,\n        points: np.ndarray,\n        scores: np.ndarray = None,\n        data: Any = None,\n        label: Hashable = None,\n        embedding=None,\n    ):\n        self.points = validate_points(points)\n        self.scores = scores\n        self.data = data\n        self.label = label\n        self.absolute_points = self.points.copy()\n        self.embedding = embedding\n        self.age = None\n\n    def update_coordinate_transformation(\n        self, coordinate_transformation: CoordinatesTransformation\n    ):\n        if coordinate_transformation is not None:\n            self.absolute_points = coordinate_transformation.rel_to_abs(\n                self.absolute_points\n            )\n
    "},{"location":"reference/utils/","title":"Utils","text":""},{"location":"reference/utils/#norfair.utils.print_objects_as_table","title":"print_objects_as_table(tracked_objects)","text":"

    Used for helping in debugging

    Source code in norfair/utils.py
    def print_objects_as_table(tracked_objects: Sequence):\n    \"\"\"Used for helping in debugging\"\"\"\n    print()\n    console = Console()\n    table = Table(show_header=True, header_style=\"bold magenta\")\n    table.add_column(\"Id\", style=\"yellow\", justify=\"center\")\n    table.add_column(\"Age\", justify=\"right\")\n    table.add_column(\"Hit Counter\", justify=\"right\")\n    table.add_column(\"Last distance\", justify=\"right\")\n    table.add_column(\"Init Id\", justify=\"center\")\n    for obj in tracked_objects:\n        table.add_row(\n            str(obj.id),\n            str(obj.age),\n            str(obj.hit_counter),\n            f\"{obj.last_distance:.4f}\",\n            str(obj.initializing_id),\n        )\n    console.print(table)\n
    "},{"location":"reference/utils/#norfair.utils.get_cutout","title":"get_cutout(points, image)","text":"

    Returns a rectangular cut-out from a set of points on an image

    Source code in norfair/utils.py
    def get_cutout(points, image):\n    \"\"\"Returns a rectangular cut-out from a set of points on an image\"\"\"\n    max_x = int(max(points[:, 0]))\n    min_x = int(min(points[:, 0]))\n    max_y = int(max(points[:, 1]))\n    min_y = int(min(points[:, 1]))\n    return image[min_y:max_y, min_x:max_x]\n
    "},{"location":"reference/utils/#norfair.utils.warn_once","title":"warn_once(message) cached","text":"

    Write a warning message only once.

    Source code in norfair/utils.py
    @lru_cache(maxsize=None)\ndef warn_once(message):\n    \"\"\"\n    Write a warning message only once.\n    \"\"\"\n    warn(message)\n
    "},{"location":"reference/video/","title":"Video","text":""},{"location":"reference/video/#norfair.video.Video","title":"Video","text":"

    Class that provides a simple and pythonic way to interact with video.

    It returns regular OpenCV frames which enables the usage of the huge number of tools OpenCV provides to modify images.

    Parameters:

    Name Type Description Default camera Optional[int]

    An integer representing the device id of the camera to be used as the video source.

    Webcams tend to have an id of 0. Arguments camera and input_path can't be used at the same time, one must be chosen.

    None input_path Optional[str]

    A string consisting of the path to the video file to be used as the video source.

    Arguments camera and input_path can't be used at the same time, one must be chosen.

    None output_path str

    The path to the output video to be generated. Can be a folder were the file will be created or a full path with a file name.

    '.' output_fps Optional[float]

    The frames per second at which to encode the output video file.

    If not provided it is set to be equal to the input video source's fps. This argument is useful when using live video cameras as a video source, where the user may know the input fps, but where the frames are being fed to the output video at a rate that is lower than the video source's fps, due to the latency added by the detector.

    None label str

    Label to add to the progress bar that appears when processing the current video.

    '' output_fourcc Optional[str]

    OpenCV encoding for output video file. By default we use mp4v for .mp4 and XVID for .avi. This is a combination that works on most systems but it results in larger files. To get smaller files use avc1 or H264 if available. Notice that some fourcc are not compatible with some extensions.

    None output_extension str

    File extension used for the output video. Ignored if output_path is not a folder.

    'mp4'

    Examples:

    >>> video = Video(input_path=\"video.mp4\")\n>>> for frame in video:\n>>>     # << Your modifications to the frame would go here >>\n>>>     video.write(frame)\n
    Source code in norfair/video.py
    class Video:\n    \"\"\"\n    Class that provides a simple and pythonic way to interact with video.\n\n    It returns regular OpenCV frames which enables the usage of the huge number of tools OpenCV provides to modify images.\n\n    Parameters\n    ----------\n    camera : Optional[int], optional\n        An integer representing the device id of the camera to be used as the video source.\n\n        Webcams tend to have an id of `0`. Arguments `camera` and `input_path` can't be used at the same time, one must be chosen.\n    input_path : Optional[str], optional\n        A string consisting of the path to the video file to be used as the video source.\n\n        Arguments `camera` and `input_path` can't be used at the same time, one must be chosen.\n    output_path : str, optional\n        The path to the output video to be generated.\n        Can be a folder were the file will be created or a full path with a file name.\n    output_fps : Optional[float], optional\n        The frames per second at which to encode the output video file.\n\n        If not provided it is set to be equal to the input video source's fps.\n        This argument is useful when using live video cameras as a video source,\n        where the user may know the input fps,\n        but where the frames are being fed to the output video at a rate that is lower than the video source's fps,\n        due to the latency added by the detector.\n    label : str, optional\n        Label to add to the progress bar that appears when processing the current video.\n    output_fourcc : Optional[str], optional\n        OpenCV encoding for output video file.\n        By default we use `mp4v` for `.mp4` and `XVID` for `.avi`. This is a combination that works on most systems but\n        it results in larger files. To get smaller files use `avc1` or `H264` if available.\n        Notice that some fourcc are not compatible with some extensions.\n    output_extension : str, optional\n        File extension used for the output video. Ignored if `output_path` is not a folder.\n\n    Examples\n    --------\n    >>> video = Video(input_path=\"video.mp4\")\n    >>> for frame in video:\n    >>>     # << Your modifications to the frame would go here >>\n    >>>     video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        camera: Optional[int] = None,\n        input_path: Optional[str] = None,\n        output_path: str = \".\",\n        output_fps: Optional[float] = None,\n        label: str = \"\",\n        output_fourcc: Optional[str] = None,\n        output_extension: str = \"mp4\",\n    ):\n        self.camera = camera\n        self.input_path = input_path\n        self.output_path = output_path\n        self.label = label\n        self.output_fourcc = output_fourcc\n        self.output_extension = output_extension\n        self.output_video: Optional[cv2.VideoWriter] = None\n\n        # Input validation\n        if (input_path is None and camera is None) or (\n            input_path is not None and camera is not None\n        ):\n            raise ValueError(\n                \"You must set either 'camera' or 'input_path' arguments when setting 'Video' class\"\n            )\n        if camera is not None and type(camera) is not int:\n            raise ValueError(\n                \"Argument 'camera' refers to the device-id of your camera, and must be an int. Setting it to 0 usually works if you don't know the id.\"\n            )\n\n        # Read Input Video\n        if self.input_path is not None:\n            if \"~\" in self.input_path:\n                self.input_path = os.path.expanduser(self.input_path)\n            if not os.path.isfile(self.input_path):\n                self._fail(\n                    f\"[bold red]Error:[/bold red] File '{self.input_path}' does not exist.\"\n                )\n            self.video_capture = cv2.VideoCapture(self.input_path)\n            total_frames = int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n            if total_frames == 0:\n                self._fail(\n                    f\"[bold red]Error:[/bold red] '{self.input_path}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly.\"\n                )\n            description = os.path.basename(self.input_path)\n        else:\n            self.video_capture = cv2.VideoCapture(self.camera)\n            total_frames = 0\n            description = f\"Camera({self.camera})\"\n        self.output_fps = (\n            output_fps\n            if output_fps is not None\n            else self.video_capture.get(cv2.CAP_PROP_FPS)\n        )\n        self.input_height = self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)\n        self.input_width = self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)\n        self.frame_counter = 0\n\n        # Setup progressbar\n        if self.label:\n            description += f\" | {self.label}\"\n        progress_bar_fields: List[Union[str, ProgressColumn]] = [\n            \"[progress.description]{task.description}\",\n            BarColumn(),\n            \"[yellow]{task.fields[process_fps]:.2f}fps[/yellow]\",\n        ]\n        if self.input_path is not None:\n            progress_bar_fields.insert(\n                2, \"[progress.percentage]{task.percentage:>3.0f}%\"\n            )\n            progress_bar_fields.insert(\n                3,\n                TimeRemainingColumn(),\n            )\n        self.progress_bar = Progress(\n            *progress_bar_fields,\n            auto_refresh=False,\n            redirect_stdout=False,\n            redirect_stderr=False,\n        )\n        self.task = self.progress_bar.add_task(\n            self.abbreviate_description(description),\n            total=total_frames,\n            start=self.input_path is not None,\n            process_fps=0,\n        )\n\n    # This is a generator, note the yield keyword below.\n    def __iter__(self):\n        with self.progress_bar as progress_bar:\n            start = time.time()\n\n            # Iterate over video\n            while True:\n                self.frame_counter += 1\n                ret, frame = self.video_capture.read()\n                if ret is False or frame is None:\n                    break\n                process_fps = self.frame_counter / (time.time() - start)\n                progress_bar.update(\n                    self.task, advance=1, refresh=True, process_fps=process_fps\n                )\n                yield frame\n\n        # Cleanup\n        if self.output_video is not None:\n            self.output_video.release()\n            print(\n                f\"[white]Output video file saved to: {self.get_output_file_path()}[/white]\"\n            )\n        self.video_capture.release()\n        cv2.destroyAllWindows()\n\n    def _fail(self, msg: str):\n        raise RuntimeError(msg)\n\n    def write(self, frame: np.ndarray) -> int:\n        \"\"\"\n        Write one frame to the output video.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to write to file.\n\n        Returns\n        -------\n        int\n            _description_\n        \"\"\"\n        if self.output_video is None:\n            # The user may need to access the output file path on their code\n            output_file_path = self.get_output_file_path()\n            fourcc = cv2.VideoWriter_fourcc(*self.get_codec_fourcc(output_file_path))\n            # Set on first frame write in case the user resizes the frame in some way\n            output_size = (\n                frame.shape[1],\n                frame.shape[0],\n            )  # OpenCV format is (width, height)\n            self.output_video = cv2.VideoWriter(\n                output_file_path,\n                fourcc,\n                self.output_fps,\n                output_size,\n            )\n\n        self.output_video.write(frame)\n        return cv2.waitKey(1)\n\n    def show(self, frame: np.ndarray, downsample_ratio: float = 1.0) -> int:\n        \"\"\"\n        Display a frame through a GUI. Usually used inside a video inference loop to show the output video.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to be displayed.\n        downsample_ratio : float, optional\n            How much to downsample the frame being show.\n\n            Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.\n\n        Returns\n        -------\n        int\n            _description_\n        \"\"\"\n        # Resize to lower resolution for faster streaming over slow connections\n        if downsample_ratio != 1.0:\n            frame = cv2.resize(\n                frame,\n                (\n                    frame.shape[1] // downsample_ratio,\n                    frame.shape[0] // downsample_ratio,\n                ),\n            )\n        cv2.imshow(\"Output\", frame)\n        return cv2.waitKey(1)\n\n    def get_output_file_path(self) -> str:\n        \"\"\"\n        Calculate the output path being used in case you are writing your frames to a video file.\n\n        Useful if you didn't set `output_path`, and want to know what the autogenerated output file path by Norfair will be.\n\n        Returns\n        -------\n        str\n            The path to the file.\n        \"\"\"\n        if not os.path.isdir(self.output_path):\n            return self.output_path\n\n        if self.input_path is not None:\n            file_name = self.input_path.split(\"/\")[-1].split(\".\")[0]\n        else:\n            file_name = \"camera_{self.camera}\"\n        file_name = f\"{file_name}_out.{self.output_extension}\"\n\n        return os.path.join(self.output_path, file_name)\n\n    def get_codec_fourcc(self, filename: str) -> Optional[str]:\n        if self.output_fourcc is not None:\n            return self.output_fourcc\n\n        # Default codecs for each extension\n        extension = filename[-3:].lower()\n        if \"avi\" == extension:\n            return \"XVID\"\n        elif \"mp4\" == extension:\n            return \"mp4v\"  # When available, \"avc1\" is better\n        else:\n            self._fail(\n                f\"[bold red]Could not determine video codec for the provided output filename[/bold red]: \"\n                f\"[yellow]{filename}[/yellow]\\n\"\n                f\"Please use '.mp4', '.avi', or provide a custom OpenCV fourcc codec name.\"\n            )\n            return (\n                None  # Had to add this return to make mypya happy. I don't like this.\n            )\n\n    def abbreviate_description(self, description: str) -> str:\n        \"\"\"Conditionally abbreviate description so that progress bar fits in small terminals\"\"\"\n        terminal_columns, _ = get_terminal_size()\n        space_for_description = (\n            int(terminal_columns) - 25\n        )  # Leave 25 space for progressbar\n        if len(description) < space_for_description:\n            return description\n        else:\n            return \"{} ... {}\".format(\n                description[: space_for_description // 2 - 3],\n                description[-space_for_description // 2 + 3 :],\n            )\n
    "},{"location":"reference/video/#norfair.video.Video.write","title":"write(frame)","text":"

    Write one frame to the output video.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to write to file.

    required

    Returns:

    Type Description int

    description

    Source code in norfair/video.py
    def write(self, frame: np.ndarray) -> int:\n    \"\"\"\n    Write one frame to the output video.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to write to file.\n\n    Returns\n    -------\n    int\n        _description_\n    \"\"\"\n    if self.output_video is None:\n        # The user may need to access the output file path on their code\n        output_file_path = self.get_output_file_path()\n        fourcc = cv2.VideoWriter_fourcc(*self.get_codec_fourcc(output_file_path))\n        # Set on first frame write in case the user resizes the frame in some way\n        output_size = (\n            frame.shape[1],\n            frame.shape[0],\n        )  # OpenCV format is (width, height)\n        self.output_video = cv2.VideoWriter(\n            output_file_path,\n            fourcc,\n            self.output_fps,\n            output_size,\n        )\n\n    self.output_video.write(frame)\n    return cv2.waitKey(1)\n
    "},{"location":"reference/video/#norfair.video.Video.show","title":"show(frame, downsample_ratio=1.0)","text":"

    Display a frame through a GUI. Usually used inside a video inference loop to show the output video.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to be displayed.

    required downsample_ratio float

    How much to downsample the frame being show.

    Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.

    1.0

    Returns:

    Type Description int

    description

    Source code in norfair/video.py
    def show(self, frame: np.ndarray, downsample_ratio: float = 1.0) -> int:\n    \"\"\"\n    Display a frame through a GUI. Usually used inside a video inference loop to show the output video.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to be displayed.\n    downsample_ratio : float, optional\n        How much to downsample the frame being show.\n\n        Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.\n\n    Returns\n    -------\n    int\n        _description_\n    \"\"\"\n    # Resize to lower resolution for faster streaming over slow connections\n    if downsample_ratio != 1.0:\n        frame = cv2.resize(\n            frame,\n            (\n                frame.shape[1] // downsample_ratio,\n                frame.shape[0] // downsample_ratio,\n            ),\n        )\n    cv2.imshow(\"Output\", frame)\n    return cv2.waitKey(1)\n
    "},{"location":"reference/video/#norfair.video.Video.get_output_file_path","title":"get_output_file_path()","text":"

    Calculate the output path being used in case you are writing your frames to a video file.

    Useful if you didn't set output_path, and want to know what the autogenerated output file path by Norfair will be.

    Returns:

    Type Description str

    The path to the file.

    Source code in norfair/video.py
    def get_output_file_path(self) -> str:\n    \"\"\"\n    Calculate the output path being used in case you are writing your frames to a video file.\n\n    Useful if you didn't set `output_path`, and want to know what the autogenerated output file path by Norfair will be.\n\n    Returns\n    -------\n    str\n        The path to the file.\n    \"\"\"\n    if not os.path.isdir(self.output_path):\n        return self.output_path\n\n    if self.input_path is not None:\n        file_name = self.input_path.split(\"/\")[-1].split(\".\")[0]\n    else:\n        file_name = \"camera_{self.camera}\"\n    file_name = f\"{file_name}_out.{self.output_extension}\"\n\n    return os.path.join(self.output_path, file_name)\n
    "},{"location":"reference/video/#norfair.video.Video.abbreviate_description","title":"abbreviate_description(description)","text":"

    Conditionally abbreviate description so that progress bar fits in small terminals

    Source code in norfair/video.py
    def abbreviate_description(self, description: str) -> str:\n    \"\"\"Conditionally abbreviate description so that progress bar fits in small terminals\"\"\"\n    terminal_columns, _ = get_terminal_size()\n    space_for_description = (\n        int(terminal_columns) - 25\n    )  # Leave 25 space for progressbar\n    if len(description) < space_for_description:\n        return description\n    else:\n        return \"{} ... {}\".format(\n            description[: space_for_description // 2 - 3],\n            description[-space_for_description // 2 + 3 :],\n        )\n
    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

    Norfair is a customizable lightweight Python library for real-time multi-object tracking.

    Using Norfair, you can add tracking capabilities to any detector with just a few lines of code.

    Tracking players with moving camera Tracking 3D objects"},{"location":"#features","title":"Features","text":"
    • Any detector expressing its detections as a series of (x, y) coordinates can be used with Norfair. This includes detectors performing tasks such as object or keypoint detection (see examples).

    • Modular. It can easily be inserted into complex video processing pipelines to add tracking to existing projects. At the same time, it is possible to build a video inference loop from scratch using just Norfair and a detector.

    • Supports moving camera, re-identification with appearance embeddings, and n-dimensional object tracking (see Advanced features).

    • Norfair provides several predefined distance functions to compare tracked objects and detections. The distance functions can also be defined by the user, enabling the implementation of different tracking strategies.

    • Fast. The only thing bounding inference speed will be the detection network feeding detections to Norfair.

    Norfair is built, used and maintained by Tryolabs.

    "},{"location":"#installation","title":"Installation","text":"

    Norfair currently supports Python 3.8+. The latest tested version to support Python 3.7 is Norfair 2.2.0. Later versions may work, but no specific support is planned.

    For the minimal version, install as:

    pip install norfair\n

    To make Norfair install the dependencies to support more features, install as:

    pip install norfair[video]  # Adds several video helper features running on OpenCV\npip install norfair[metrics]  # Supports running MOT metrics evaluation\npip install norfair[metrics,video]  # Everything included\n

    If the needed dependencies are already present in the system, installing the minimal version of Norfair is enough for enabling the extra features. This is particularly useful for embedded devices, where installing compiled dependencies can be difficult, but they can sometimes come preinstalled with the system.

    "},{"location":"#documentation","title":"Documentation","text":"

    Getting started guide.

    Official reference.

    "},{"location":"#examples-demos","title":"Examples & demos","text":"

    We provide several examples of how Norfair can be used to add tracking capabilities to different detectors, and also showcase more advanced features.

    Note: for ease of reproducibility, we provide Dockerfiles for all the demos. Even though Norfair does not need a GPU, the default configuration of most demos requires a GPU to be able to run the detectors. For this, make sure you install NVIDIA Container Toolkit so that your GPU can be shared with Docker.

    It is possible to run several demos with a CPU, but you will have to modify the scripts or tinker with the installation of their dependencies.

    "},{"location":"#adding-tracking-to-different-detectors","title":"Adding tracking to different detectors","text":"

    Most tracking demos are showcased with vehicles and pedestrians, but the detectors are generally trained with many more classes from the COCO dataset.

    1. YOLOv7: tracking object centroids or bounding boxes.
    2. YOLOv5: tracking object centroids or bounding boxes.
    3. YOLOv4: tracking object centroids.
    4. Detectron2: tracking object centroids.
    5. AlphaPose: tracking human keypoints (pose estimation) and inserting Norfair into a complex existing pipeline.
    6. OpenPose: tracking human keypoints.
    7. YOLOPv2: tracking with a model for traffic object detection, drivable road area segmentation, and lane line detection.
    8. YOLO-NAS: tracking object centroids or bounding boxes.
    "},{"location":"#advanced-features","title":"Advanced features","text":"
    1. Speed up pose estimation by extrapolating detections using OpenPose.
    2. Track both bounding boxes and human keypoints (multi-class), unifying the detections from a YOLO model and OpenPose.
    3. Re-identification (ReID) of tracked objects using appearance embeddings. This is a good starting point for scenarios with a lot of occlusion, in which the Kalman filter alone would struggle.
    4. Accurately track objects even if the camera is moving, by estimating camera motion potentially accounting for pan, tilt, rotation, movement in any direction, and zoom.
    5. Track points in 3D, using MediaPipe Objectron.
    6. Tracking of small objects, using SAHI: Slicing Aided Hyper Inference.
    "},{"location":"#ros-integration","title":"ROS integration","text":"

    To make it even easier to use Norfair in robotics projects, we now offer a version that integrates with the Robotic Operating System (ROS).

    We present a ROS package and a fully functional environment running on Docker to do the first steps with this package and start your first application easier.

    "},{"location":"#benchmarking-and-profiling","title":"Benchmarking and profiling","text":"
    1. Kalman filter and distance function profiling using TRT pose estimator.
    2. Computation of MOT17 scores using motmetrics4norfair.
    "},{"location":"#how-it-works","title":"How it works","text":"

    Norfair works by estimating the future position of each point based on its past positions. It then tries to match these estimated positions with newly detected points provided by the detector. For this matching to occur, Norfair can rely on any distance function. There are some predefined distances already integrated in Norfair, and the users can also define their own custom distances. Therefore, each object tracker can be made as simple or as complex as needed.

    As an example we use Detectron2 to get the single point detections to use with this distance function. We just use the centroids of the bounding boxes it produces around cars as our detections, and get the following results.

    On the left you can see the points we get from Detectron2, and on the right how Norfair tracks them assigning a unique identifier through time. Even a straightforward distance function like this one can work when the tracking needed is simple.

    Norfair also provides several useful tools for creating a video inference loop. Here is what the full code for creating the previous example looks like, including the code needed to set up Detectron2:

    import cv2\nimport numpy as np\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import DefaultPredictor\n\nfrom norfair import Detection, Tracker, Video, draw_tracked_objects\n\n# Set up Detectron2 object detector\ncfg = get_cfg()\ncfg.merge_from_file(\"demos/faster_rcnn_R_50_FPN_3x.yaml\")\ncfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\ncfg.MODEL.WEIGHTS = \"detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl\"\ndetector = DefaultPredictor(cfg)\n\n# Norfair\nvideo = Video(input_path=\"video.mp4\")\ntracker = Tracker(distance_function=\"euclidean\", distance_threshold=20)\n\nfor frame in video:\n    detections = detector(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n    detections = [Detection(p) for p in detections['instances'].pred_boxes.get_centers().cpu().numpy()]\n    tracked_objects = tracker.update(detections=detections)\n    draw_tracked_objects(frame, tracked_objects)\n    video.write(frame)\n

    The video and drawing tools use OpenCV frames, so they are compatible with most Python video code available online. The point tracking is based on SORT generalized to detections consisting of a dynamically changing number of points per detection.

    "},{"location":"#motivation","title":"Motivation","text":"

    Trying out the latest state-of-the-art detectors normally requires running repositories that weren't intended to be easy to use. These tend to be repositories associated with a research paper describing a novel new way of doing detection, and they are therefore intended to be run as a one-off evaluation script to get some result metric to publish on a particular research paper. This explains why they tend to not be easy to run as inference scripts, or why extracting the core model to use in another standalone script isn't always trivial.

    Norfair was born out of the need to quickly add a simple layer of tracking over a wide range of newly released SOTA detectors. It was designed to seamlessly be plugged into a complex, highly coupled code base, with minimum effort. Norfair provides a series of modular but compatible tools, which you can pick and choose to use in your project.

    "},{"location":"#comparison-to-other-trackers","title":"Comparison to other trackers","text":"

    Norfair's contribution to Python's object tracker library repertoire is its ability to work with any object detector by being able to work with a variable number of points per detection, and the ability for the user to heavily customize the tracker by creating their own distance function.

    If you are looking for a tracker, here are some other projects worth noting:

    • OpenCV includes several tracking solutions like KCF Tracker and MedianFlow Tracker which are run by making the user select a part of the frame to track, and then letting the tracker follow that area. They tend not to be run on top of a detector and are not very robust.
    • dlib includes a correlation single object tracker. You have to create your own multiple object tracker on top of it yourself if you want to track multiple objects with it.
    • AlphaPose just released a new version of their human pose tracker. This tracker is tightly integrated into their code base, and to the task of tracking human poses.
    • SORT and Deep SORT are similar to this repo in that they use Kalman filters (and a deep embedding for Deep SORT), but they are hardcoded to a fixed distance function and to tracking boxes. Norfair also adds some filtering when matching tracked objects with detections, and changes the Hungarian Algorithm for its own distance minimizer. Both these repos are also released under the GPL license, which might be an issue for some individuals or companies because the source code of derivative works needs to be published.
    "},{"location":"#benchmarks","title":"Benchmarks","text":"

    MOT17 and MOT20 results obtained using motmetrics4norfair demo script on the train split. We used detections obtained with ByteTrack's YOLOX object detection model.

    MOT17 Train IDF1 IDP IDR Rcll Prcn MOTA MOTP MOT17-02 61.3% 63.6% 59.0% 86.8% 93.5% 79.9% 14.8% MOT17-04 93.3% 93.6% 93.0% 98.6% 99.3% 97.9% 07.9% MOT17-05 77.8% 77.7% 77.8% 85.9% 85.8% 71.2% 14.7% MOT17-09 65.0% 67.4% 62.9% 90.3% 96.8% 86.8% 12.2% MOT17-10 70.2% 72.5% 68.1% 87.3% 93.0% 80.1% 18.7% MOT17-11 80.2% 80.5% 80.0% 93.0% 93.6% 86.4% 11.3% MOT17-13 79.0% 79.6% 78.4% 90.6% 92.0% 82.4% 16.6% OVERALL 80.6% 81.8% 79.6% 92.9% 95.5% 88.1% 11.9% MOT20 Train IDF1 IDP IDR Rcll Prcn MOTA MOTP MOT20-01 85.9% 88.1% 83.8% 93.4% 98.2% 91.5% 12.6% MOT20-02 72.8% 74.6% 71.0% 93.2% 97.9% 91.0% 12.7% MOT20-03 93.0% 94.1% 92.0% 96.1% 98.3% 94.4% 13.7% MOT20-05 87.9% 88.9% 87.0% 96.0% 98.1% 94.1% 13.0% OVERALL 87.3% 88.4% 86.2% 95.6% 98.1% 93.7% 13.2%"},{"location":"#commercial-support","title":"Commercial support","text":"

    Tryolabs can provide commercial support, implement new features in Norfair or build video analytics tools for solving your challenging problems. Norfair powers several video analytics applications, such as the face mask detection tool.

    If you are interested, please contact us.

    "},{"location":"#citing-norfair","title":"Citing Norfair","text":"

    For citations in academic publications, please export your desired citation format (BibTeX or other) from Zenodo.

    "},{"location":"#license","title":"License","text":"

    Copyright \u00a9 2022, Tryolabs. Released under the BSD 3-Clause.

    "},{"location":"getting_started/","title":"Getting Started","text":"

    Norfair's goal is to easily track multiple objects in videos based on the frame-by-frame detections of a user-defined model.

    "},{"location":"getting_started/#model-or-detector","title":"Model or Detector","text":"

    We recommend first deciding and setting up the model and then adding Norfair on top of it. Models trained for any form of object detection or keypoint detection (including pose estimation) are all supported. You can check some of the integrations we have as examples:

    • Yolov7, Yolov5 and Yolov4
    • Detectron2
    • Alphapose
    • Openpose
    • MMDetection

    Any other model trained on one of the supported tasks is also supported and should be easy to integrate with Norfair, regardless of whether it uses Pytorch, TensorFlow, or other.

    If you are unsure of which model to use, Yolov7 is a good starting point since it's easy to set up and offers models of different sizes pre-trained on object detection and pose estimation.

    Note

    Norfair is a Detection-Based-Tracker (DBT) and as such, its performance is highly dependent on the performance of the model of choice.

    The detections from the model will need to be wrapped in an instance of Detection before passing them to Norfair.

    "},{"location":"getting_started/#install","title":"Install","text":"

    Installing Norfair is extremely easy, simply run pip install norfair to install the latest version from PyPI.

    You can also install the latest version from the master branch using pip install git+https://github.com/tryolabs/norfair.git@master#egg=norfair

    "},{"location":"getting_started/#video","title":"Video","text":"

    Norfair offers optional functionality to process videos (mp4 and mov formats are supported) or capture a live feed from a camera. To use this functionality you need to install Norfair with the video extra using this command: pip install norfair[video].

    Check the Video class for more info on how to use it.

    "},{"location":"getting_started/#tracking","title":"Tracking","text":"

    Let's dive right into a simple example in the following snippet:

    from norfair import Detection, Tracker, Video, draw_tracked_objects\n\ndetector = MyDetector()  # Set up a detector\nvideo = Video(input_path=\"video.mp4\")\ntracker = Tracker(distance_function=\"euclidean\", distance_threshold=100)\n\nfor frame in video:\n   detections = detector(frame)\n   norfair_detections = [Detection(points) for points in detections]\n   tracked_objects = tracker.update(detections=norfair_detections)\n   draw_tracked_objects(frame, tracked_objects)\n   video.write(frame)\n

    The tracker is created and then the detections are fed to it one frame at a time in order. This method is called online tracking and allows Norfair to be used in live feeds and real-time scenarios where future frames are not available.

    Norfair includes functionality for creating an output video with drawings which is useful for evaluating and debugging. We usually start with this simple setup and move from there.

    "},{"location":"getting_started/#next-steps","title":"Next Steps","text":"

    The next steps depend a lot on your goal and the result of evaluating the output videos, nevertheless here are some pointers that might help you solve common problems

    "},{"location":"getting_started/#detection-issues","title":"Detection Issues","text":"

    Most common problem is that the tracking has errors or is not precise enough. In this case, the first thing to check is whether this is a detection error or a tracking error. As mentioned above if the detector fails the tracking will suffer.

    To debug this use draw_points or draw_boxes to inspect the detections and analyze if they are precise enough. If you are filtering the detections based on scores, this is a good time to tweak the threshold. If you decide that the detections are not good enough you can try a different architecture, a bigger version of the model, or consider fine-tuning the model on your domain.

    "},{"location":"getting_started/#tracking-issues","title":"Tracking Issues","text":"

    After inspecting the detections you might find issues with the tracking, several things can go wrong with tracking but here is a list of common errors and things to try:

    • Objects take too long to start, this can have multiple causes:
      • initialization_delay is too big on the Tracker. Makes the TrackedObject stay on initializing for too long, 3 is usually a good value to start with.
      • distance_threshold is too small on the Tracker. Prevents the Detections to be matched with the correct TrackedObject. The best value depends on the distance used.
      • Incorrect distance_function on the Tracker. Some distances might not be valid in some cases, for instance, if using IoU but the objects in your video move so quickly that there is never an overlap between the detections of consecutive frames. Try different distances, euclidean or create_normalized_mean_euclidean_distance are good starting points.
    • Objects take too long to disappear. Lower hit_counter_max on the Tracker.
    • Points or bounding boxes jitter too much. Increase R (measurement error) or lower Q (estimate or process error) on the OptimizedKalmanFilterFactory or FilterPyKalmanFilterFactory. This makes the Kalman Filter put less weight on the measurements and trust more on the estimate, stabilizing the result.
    • Camera motion confuses the Tracker. If the camera moves, the apparent movement of objects can become too erratic for the Tracker. Use MotionEstimator.
    • Incorrect matches between Detections and TrackedObjects, a couple of scenarios can cause this:
      • distance_threshold is too big so the Tracker matches Detections to TrackedObjects that are simply too far. Lower the threshold until you fix the error, the correct value will depend on the distance function that you're using.
      • Mismatches when objects overlap. In this case, tracking becomes more challenging, usually, the quality of the detection degrades causing one of the objects to be missed or creating a single big detection that includes both objects. On top of the detection issues, the tracker needs to decide which detection should be matched to which TrackedObject which can be error-prone if only considering spatial information. The solution is not easy but incorporating the notion of the appearance similarity based on some kind of embedding to your distance_function can help.
    • Can't recover an object after occlusions. Use ReID distance, see this demo for an example but for real-world use you will need a good ReID model that can provide good embeddings.
    "},{"location":"reference/","title":"Reference","text":"

    A customizable lightweight Python library for real-time multi-object tracking.

    Examples:

    >>> from norfair import Detection, Tracker, Video, draw_tracked_objects\n>>> detector = MyDetector()  # Set up a detector\n>>> video = Video(input_path=\"video.mp4\")\n>>> tracker = Tracker(distance_function=\"euclidean\", distance_threshold=50)\n>>> for frame in video:\n>>>    detections = detector(frame)\n>>>    norfair_detections = [Detection(points) for points in detections]\n>>>    tracked_objects = tracker.update(detections=norfair_detections)\n>>>    draw_tracked_objects(frame, tracked_objects)\n>>>    video.write(frame)\n
    "},{"location":"reference/#norfair.Color","title":"Color","text":"

    Contains predefined colors.

    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR This is the format opencv uses.

    Source code in norfair/drawing/color.py
    class Color:\n    \"\"\"\n    Contains predefined colors.\n\n    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR\n    This is the format opencv uses.\n    \"\"\"\n\n    # from PIL.ImageColors.colormap\n    aliceblue = hex_to_bgr(\"#f0f8ff\")\n    antiquewhite = hex_to_bgr(\"#faebd7\")\n    aqua = hex_to_bgr(\"#00ffff\")\n    aquamarine = hex_to_bgr(\"#7fffd4\")\n    azure = hex_to_bgr(\"#f0ffff\")\n    beige = hex_to_bgr(\"#f5f5dc\")\n    bisque = hex_to_bgr(\"#ffe4c4\")\n    black = hex_to_bgr(\"#000000\")\n    blanchedalmond = hex_to_bgr(\"#ffebcd\")\n    blue = hex_to_bgr(\"#0000ff\")\n    blueviolet = hex_to_bgr(\"#8a2be2\")\n    brown = hex_to_bgr(\"#a52a2a\")\n    burlywood = hex_to_bgr(\"#deb887\")\n    cadetblue = hex_to_bgr(\"#5f9ea0\")\n    chartreuse = hex_to_bgr(\"#7fff00\")\n    chocolate = hex_to_bgr(\"#d2691e\")\n    coral = hex_to_bgr(\"#ff7f50\")\n    cornflowerblue = hex_to_bgr(\"#6495ed\")\n    cornsilk = hex_to_bgr(\"#fff8dc\")\n    crimson = hex_to_bgr(\"#dc143c\")\n    cyan = hex_to_bgr(\"#00ffff\")\n    darkblue = hex_to_bgr(\"#00008b\")\n    darkcyan = hex_to_bgr(\"#008b8b\")\n    darkgoldenrod = hex_to_bgr(\"#b8860b\")\n    darkgray = hex_to_bgr(\"#a9a9a9\")\n    darkgrey = hex_to_bgr(\"#a9a9a9\")\n    darkgreen = hex_to_bgr(\"#006400\")\n    darkkhaki = hex_to_bgr(\"#bdb76b\")\n    darkmagenta = hex_to_bgr(\"#8b008b\")\n    darkolivegreen = hex_to_bgr(\"#556b2f\")\n    darkorange = hex_to_bgr(\"#ff8c00\")\n    darkorchid = hex_to_bgr(\"#9932cc\")\n    darkred = hex_to_bgr(\"#8b0000\")\n    darksalmon = hex_to_bgr(\"#e9967a\")\n    darkseagreen = hex_to_bgr(\"#8fbc8f\")\n    darkslateblue = hex_to_bgr(\"#483d8b\")\n    darkslategray = hex_to_bgr(\"#2f4f4f\")\n    darkslategrey = hex_to_bgr(\"#2f4f4f\")\n    darkturquoise = hex_to_bgr(\"#00ced1\")\n    darkviolet = hex_to_bgr(\"#9400d3\")\n    deeppink = hex_to_bgr(\"#ff1493\")\n    deepskyblue = hex_to_bgr(\"#00bfff\")\n    dimgray = hex_to_bgr(\"#696969\")\n    dimgrey = hex_to_bgr(\"#696969\")\n    dodgerblue = hex_to_bgr(\"#1e90ff\")\n    firebrick = hex_to_bgr(\"#b22222\")\n    floralwhite = hex_to_bgr(\"#fffaf0\")\n    forestgreen = hex_to_bgr(\"#228b22\")\n    fuchsia = hex_to_bgr(\"#ff00ff\")\n    gainsboro = hex_to_bgr(\"#dcdcdc\")\n    ghostwhite = hex_to_bgr(\"#f8f8ff\")\n    gold = hex_to_bgr(\"#ffd700\")\n    goldenrod = hex_to_bgr(\"#daa520\")\n    gray = hex_to_bgr(\"#808080\")\n    grey = hex_to_bgr(\"#808080\")\n    green = (0, 128, 0)\n    greenyellow = hex_to_bgr(\"#adff2f\")\n    honeydew = hex_to_bgr(\"#f0fff0\")\n    hotpink = hex_to_bgr(\"#ff69b4\")\n    indianred = hex_to_bgr(\"#cd5c5c\")\n    indigo = hex_to_bgr(\"#4b0082\")\n    ivory = hex_to_bgr(\"#fffff0\")\n    khaki = hex_to_bgr(\"#f0e68c\")\n    lavender = hex_to_bgr(\"#e6e6fa\")\n    lavenderblush = hex_to_bgr(\"#fff0f5\")\n    lawngreen = hex_to_bgr(\"#7cfc00\")\n    lemonchiffon = hex_to_bgr(\"#fffacd\")\n    lightblue = hex_to_bgr(\"#add8e6\")\n    lightcoral = hex_to_bgr(\"#f08080\")\n    lightcyan = hex_to_bgr(\"#e0ffff\")\n    lightgoldenrodyellow = hex_to_bgr(\"#fafad2\")\n    lightgreen = hex_to_bgr(\"#90ee90\")\n    lightgray = hex_to_bgr(\"#d3d3d3\")\n    lightgrey = hex_to_bgr(\"#d3d3d3\")\n    lightpink = hex_to_bgr(\"#ffb6c1\")\n    lightsalmon = hex_to_bgr(\"#ffa07a\")\n    lightseagreen = hex_to_bgr(\"#20b2aa\")\n    lightskyblue = hex_to_bgr(\"#87cefa\")\n    lightslategray = hex_to_bgr(\"#778899\")\n    lightslategrey = hex_to_bgr(\"#778899\")\n    lightsteelblue = hex_to_bgr(\"#b0c4de\")\n    lightyellow = hex_to_bgr(\"#ffffe0\")\n    lime = hex_to_bgr(\"#00ff00\")\n    limegreen = hex_to_bgr(\"#32cd32\")\n    linen = hex_to_bgr(\"#faf0e6\")\n    magenta = hex_to_bgr(\"#ff00ff\")\n    maroon = hex_to_bgr(\"#800000\")\n    mediumaquamarine = hex_to_bgr(\"#66cdaa\")\n    mediumblue = hex_to_bgr(\"#0000cd\")\n    mediumorchid = hex_to_bgr(\"#ba55d3\")\n    mediumpurple = hex_to_bgr(\"#9370db\")\n    mediumseagreen = hex_to_bgr(\"#3cb371\")\n    mediumslateblue = hex_to_bgr(\"#7b68ee\")\n    mediumspringgreen = hex_to_bgr(\"#00fa9a\")\n    mediumturquoise = hex_to_bgr(\"#48d1cc\")\n    mediumvioletred = hex_to_bgr(\"#c71585\")\n    midnightblue = hex_to_bgr(\"#191970\")\n    mintcream = hex_to_bgr(\"#f5fffa\")\n    mistyrose = hex_to_bgr(\"#ffe4e1\")\n    moccasin = hex_to_bgr(\"#ffe4b5\")\n    navajowhite = hex_to_bgr(\"#ffdead\")\n    navy = hex_to_bgr(\"#000080\")\n    oldlace = hex_to_bgr(\"#fdf5e6\")\n    olive = hex_to_bgr(\"#808000\")\n    olivedrab = hex_to_bgr(\"#6b8e23\")\n    orange = hex_to_bgr(\"#ffa500\")\n    orangered = hex_to_bgr(\"#ff4500\")\n    orchid = hex_to_bgr(\"#da70d6\")\n    palegoldenrod = hex_to_bgr(\"#eee8aa\")\n    palegreen = hex_to_bgr(\"#98fb98\")\n    paleturquoise = hex_to_bgr(\"#afeeee\")\n    palevioletred = hex_to_bgr(\"#db7093\")\n    papayawhip = hex_to_bgr(\"#ffefd5\")\n    peachpuff = hex_to_bgr(\"#ffdab9\")\n    peru = hex_to_bgr(\"#cd853f\")\n    pink = hex_to_bgr(\"#ffc0cb\")\n    plum = hex_to_bgr(\"#dda0dd\")\n    powderblue = hex_to_bgr(\"#b0e0e6\")\n    purple = hex_to_bgr(\"#800080\")\n    rebeccapurple = hex_to_bgr(\"#663399\")\n    red = hex_to_bgr(\"#ff0000\")\n    rosybrown = hex_to_bgr(\"#bc8f8f\")\n    royalblue = hex_to_bgr(\"#4169e1\")\n    saddlebrown = hex_to_bgr(\"#8b4513\")\n    salmon = hex_to_bgr(\"#fa8072\")\n    sandybrown = hex_to_bgr(\"#f4a460\")\n    seagreen = hex_to_bgr(\"#2e8b57\")\n    seashell = hex_to_bgr(\"#fff5ee\")\n    sienna = hex_to_bgr(\"#a0522d\")\n    silver = hex_to_bgr(\"#c0c0c0\")\n    skyblue = hex_to_bgr(\"#87ceeb\")\n    slateblue = hex_to_bgr(\"#6a5acd\")\n    slategray = hex_to_bgr(\"#708090\")\n    slategrey = hex_to_bgr(\"#708090\")\n    snow = hex_to_bgr(\"#fffafa\")\n    springgreen = hex_to_bgr(\"#00ff7f\")\n    steelblue = hex_to_bgr(\"#4682b4\")\n    tan = hex_to_bgr(\"#d2b48c\")\n    teal = hex_to_bgr(\"#008080\")\n    thistle = hex_to_bgr(\"#d8bfd8\")\n    tomato = hex_to_bgr(\"#ff6347\")\n    turquoise = hex_to_bgr(\"#40e0d0\")\n    violet = hex_to_bgr(\"#ee82ee\")\n    wheat = hex_to_bgr(\"#f5deb3\")\n    white = hex_to_bgr(\"#ffffff\")\n    whitesmoke = hex_to_bgr(\"#f5f5f5\")\n    yellow = hex_to_bgr(\"#ffff00\")\n    yellowgreen = hex_to_bgr(\"#9acd32\")\n\n    # seaborn tab20 colors\n    tab1 = hex_to_bgr(\"#1f77b4\")\n    tab2 = hex_to_bgr(\"#aec7e8\")\n    tab3 = hex_to_bgr(\"#ff7f0e\")\n    tab4 = hex_to_bgr(\"#ffbb78\")\n    tab5 = hex_to_bgr(\"#2ca02c\")\n    tab6 = hex_to_bgr(\"#98df8a\")\n    tab7 = hex_to_bgr(\"#d62728\")\n    tab8 = hex_to_bgr(\"#ff9896\")\n    tab9 = hex_to_bgr(\"#9467bd\")\n    tab10 = hex_to_bgr(\"#c5b0d5\")\n    tab11 = hex_to_bgr(\"#8c564b\")\n    tab12 = hex_to_bgr(\"#c49c94\")\n    tab13 = hex_to_bgr(\"#e377c2\")\n    tab14 = hex_to_bgr(\"#f7b6d2\")\n    tab15 = hex_to_bgr(\"#7f7f7f\")\n    tab16 = hex_to_bgr(\"#c7c7c7\")\n    tab17 = hex_to_bgr(\"#bcbd22\")\n    tab18 = hex_to_bgr(\"#dbdb8d\")\n    tab19 = hex_to_bgr(\"#17becf\")\n    tab20 = hex_to_bgr(\"#9edae5\")\n    # seaborn colorblind\n    cb1 = hex_to_bgr(\"#0173b2\")\n    cb2 = hex_to_bgr(\"#de8f05\")\n    cb3 = hex_to_bgr(\"#029e73\")\n    cb4 = hex_to_bgr(\"#d55e00\")\n    cb5 = hex_to_bgr(\"#cc78bc\")\n    cb6 = hex_to_bgr(\"#ca9161\")\n    cb7 = hex_to_bgr(\"#fbafe4\")\n    cb8 = hex_to_bgr(\"#949494\")\n    cb9 = hex_to_bgr(\"#ece133\")\n    cb10 = hex_to_bgr(\"#56b4e9\")\n
    "},{"location":"reference/#norfair.Palette","title":"Palette","text":"

    Class to control the color pallete for drawing.

    Examples:

    Change palette:

    >>> from norfair import Palette\n>>> Palette.set(\"colorblind\")\n>>> # or a custom palette\n>>> from norfair import Color\n>>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n
    Source code in norfair/drawing/color.py
    class Palette:\n    \"\"\"\n    Class to control the color pallete for drawing.\n\n    Examples\n    --------\n    Change palette:\n    >>> from norfair import Palette\n    >>> Palette.set(\"colorblind\")\n    >>> # or a custom palette\n    >>> from norfair import Color\n    >>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n    \"\"\"\n\n    _colors = PALETTES[\"tab10\"]\n    _default_color = Color.black\n\n    @classmethod\n    def set(cls, palette: Union[str, Iterable[ColorLike]]):\n        \"\"\"\n        Selects a color palette.\n\n        Parameters\n        ----------\n        palette : Union[str, Iterable[ColorLike]]\n            can be either\n            - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n            - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n        \"\"\"\n        if isinstance(palette, str):\n            try:\n                cls._colors = PALETTES[palette]\n            except KeyError as e:\n                raise ValueError(\n                    f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n                ) from e\n        else:\n            colors = []\n            for c in palette:\n                colors.append(parse_color(c))\n\n            cls._colors = colors\n\n    @classmethod\n    def set_default_color(cls, color: ColorLike):\n        \"\"\"\n        Selects the default color of `choose_color` when hashable is None.\n\n        Parameters\n        ----------\n        color : ColorLike\n            The new default color.\n        \"\"\"\n        cls._default_color = parse_color(color)\n\n    @classmethod\n    def choose_color(cls, hashable: Hashable) -> ColorType:\n        if hashable is None:\n            return cls._default_color\n        return cls._colors[abs(hash(hashable)) % len(cls._colors)]\n
    "},{"location":"reference/#norfair.Palette.set","title":"set(palette) classmethod","text":"

    Selects a color palette.

    Parameters:

    Name Type Description Default palette Union[str, Iterable[ColorLike]]

    can be either - the name of one of the predefined palettes tab10, tab20, or colorblind - a list of ColorLike objects that can be parsed by parse_color

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set(cls, palette: Union[str, Iterable[ColorLike]]):\n    \"\"\"\n    Selects a color palette.\n\n    Parameters\n    ----------\n    palette : Union[str, Iterable[ColorLike]]\n        can be either\n        - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n        - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n    \"\"\"\n    if isinstance(palette, str):\n        try:\n            cls._colors = PALETTES[palette]\n        except KeyError as e:\n            raise ValueError(\n                f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n            ) from e\n    else:\n        colors = []\n        for c in palette:\n            colors.append(parse_color(c))\n\n        cls._colors = colors\n
    "},{"location":"reference/#norfair.Palette.set_default_color","title":"set_default_color(color) classmethod","text":"

    Selects the default color of choose_color when hashable is None.

    Parameters:

    Name Type Description Default color ColorLike

    The new default color.

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set_default_color(cls, color: ColorLike):\n    \"\"\"\n    Selects the default color of `choose_color` when hashable is None.\n\n    Parameters\n    ----------\n    color : ColorLike\n        The new default color.\n    \"\"\"\n    cls._default_color = parse_color(color)\n
    "},{"location":"reference/#norfair.Drawable","title":"Drawable","text":"

    Class to standardize Drawable objects like Detections and TrackedObjects

    Parameters:

    Name Type Description Default obj Union[Detection, TrackedObject]

    A Detection or a TrackedObject that will be used to initialized the drawable. If this parameter is passed, all other arguments are ignored

    None points ndarray

    Points included in the drawable, shape is (N_points, N_dimensions). Ignored if obj is passed

    None id Any

    Id of this object. Ignored if obj is passed

    None label Any

    Label specifying the class of the object. Ignored if obj is passed

    None scores ndarray

    Confidence scores of each point, shape is (N_points,). Ignored if obj is passed

    None live_points ndarray

    Bolean array indicating which points are alive, shape is (N_points,). Ignored if obj is passed

    None

    Raises:

    Type Description ValueError

    If obj is not an instance of the supported classes.

    Source code in norfair/drawing/drawer.py
    class Drawable:\n    \"\"\"\n    Class to standardize Drawable objects like Detections and TrackedObjects\n\n    Parameters\n    ----------\n    obj : Union[Detection, TrackedObject], optional\n        A [Detection][norfair.tracker.Detection] or a [TrackedObject][norfair.tracker.TrackedObject]\n        that will be used to initialized the drawable.\n        If this parameter is passed, all other arguments are ignored\n    points : np.ndarray, optional\n        Points included in the drawable, shape is `(N_points, N_dimensions)`. Ignored if `obj` is passed\n    id : Any, optional\n        Id of this object. Ignored if `obj` is passed\n    label : Any, optional\n        Label specifying the class of the object. Ignored if `obj` is passed\n    scores : np.ndarray, optional\n        Confidence scores of each point, shape is `(N_points,)`. Ignored if `obj` is passed\n    live_points : np.ndarray, optional\n        Bolean array indicating which points are alive, shape is `(N_points,)`. Ignored if `obj` is passed\n\n    Raises\n    ------\n    ValueError\n        If obj is not an instance of the supported classes.\n    \"\"\"\n\n    def __init__(\n        self,\n        obj: Union[Detection, TrackedObject] = None,\n        points: np.ndarray = None,\n        id: Any = None,\n        label: Any = None,\n        scores: np.ndarray = None,\n        live_points: np.ndarray = None,\n    ) -> None:\n        if isinstance(obj, Detection):\n            self.points = obj.points\n            self.id = None\n            self.label = obj.label\n            self.scores = obj.scores\n            # TODO: alive points for detections could be the ones over the threshold\n            # but that info is not available here\n            self.live_points = np.ones(obj.points.shape[0]).astype(bool)\n\n        elif isinstance(obj, TrackedObject):\n            self.points = obj.estimate\n            self.id = obj.id\n            self.label = obj.label\n            # TODO: TrackedObject.scores could be an interesting thing to have\n            # it could be the scores of the last detection or some kind of moving average\n            self.scores = None\n            self.live_points = obj.live_points\n        elif obj is None:\n            self.points = points\n            self.id = id\n            self.label = label\n            self.scores = scores\n            self.live_points = live_points\n        else:\n            raise ValueError(\n                f\"Extecting a Detection or a TrackedObject but received {type(obj)}\"\n            )\n
    "},{"location":"reference/#norfair.FixedCamera","title":"FixedCamera","text":"

    Class used to stabilize video based on the camera motion.

    Starts with a larger frame, where the original frame is drawn on top of a black background. As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

    Useful for debugging or demoing the camera motion.

    Warning

    This only works with TranslationTransformation, using HomographyTransformation will result in unexpected behaviour.

    Warning

    If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

    Note

    Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. In this case, a warning will be logged and the frames will be cropped to avoid errors.

    Parameters:

    Name Type Description Default scale float

    The resulting video will have a resolution of scale * (H, W) where HxW is the resolution of the original video. Use a bigger scale if the camera is moving too much.

    2 attenuation float

    Controls how fast the older frames fade to black.

    0.05

    Examples:

    >>> # setup\n>>> tracker = Tracker(\"frobenious\", 100)\n>>> motion_estimator = MotionEstimator()\n>>> video = Video(input_path=\"video.mp4\")\n>>> fixed_camera = FixedCamera()\n>>> # process video\n>>> for frame in video:\n>>>     coord_transformations = motion_estimator.update(frame)\n>>>     detections = get_detections(frame)\n>>>     tracked_objects = tracker.update(detections, coord_transformations)\n>>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n>>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n>>>     video.write(bigger_frame)\n
    Source code in norfair/drawing/fixed_camera.py
    class FixedCamera:\n    \"\"\"\n    Class used to stabilize video based on the camera motion.\n\n    Starts with a larger frame, where the original frame is drawn on top of a black background.\n    As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.\n\n    Useful for debugging or demoing the camera motion.\n    ![Example GIF](../../videos/camera_stabilization.gif)\n\n    !!! Warning\n        This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],\n        using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in\n        unexpected behaviour.\n\n    !!! Warning\n        If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.\n\n    !!! Note\n        Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.\n        In this case, a warning will be logged and the frames will be cropped to avoid errors.\n\n    Parameters\n    ----------\n    scale : float, optional\n        The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.\n        Use a bigger scale if the camera is moving too much.\n    attenuation : float, optional\n        Controls how fast the older frames fade to black.\n\n    Examples\n    --------\n    >>> # setup\n    >>> tracker = Tracker(\"frobenious\", 100)\n    >>> motion_estimator = MotionEstimator()\n    >>> video = Video(input_path=\"video.mp4\")\n    >>> fixed_camera = FixedCamera()\n    >>> # process video\n    >>> for frame in video:\n    >>>     coord_transformations = motion_estimator.update(frame)\n    >>>     detections = get_detections(frame)\n    >>>     tracked_objects = tracker.update(detections, coord_transformations)\n    >>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n    >>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n    >>>     video.write(bigger_frame)\n    \"\"\"\n\n    def __init__(self, scale: float = 2, attenuation: float = 0.05):\n        self.scale = scale\n        self._background = None\n        self._attenuation_factor = 1 - attenuation\n\n    def adjust_frame(\n        self, frame: np.ndarray, coord_transformation: TranslationTransformation\n    ) -> np.ndarray:\n        \"\"\"\n        Render scaled up frame.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame.\n        coord_transformation : TranslationTransformation\n            The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n        Returns\n        -------\n        np.ndarray\n            The new bigger frame with the original frame drawn on it.\n        \"\"\"\n\n        # initialize background if necessary\n        if self._background is None:\n            original_size = (\n                frame.shape[1],\n                frame.shape[0],\n            )  # OpenCV format is (width, height)\n\n            scaled_size = tuple(\n                (np.array(original_size) * np.array(self.scale)).round().astype(int)\n            )\n            self._background = np.zeros(\n                [scaled_size[1], scaled_size[0], frame.shape[-1]],\n                frame.dtype,\n            )\n        else:\n            self._background = (self._background * self._attenuation_factor).astype(\n                frame.dtype\n            )\n\n        # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n        # aim to draw it in the center of the background but transformations will move this point\n        top_left = (\n            np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n        )\n        top_left = (\n            coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n        )\n        # box of the background that will be updated and the limits of it\n        background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n        background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n        background_size_y, background_size_x = self._background.shape[:2]\n\n        # define box of the frame that will be used\n        # if the scale is not enough to support the movement, warn the user but keep drawing\n        # cropping the frame so that the operation doesn't fail\n        frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n        if (\n            background_y0 < 0\n            or background_x0 < 0\n            or background_y1 > background_size_y\n            or background_x1 > background_size_x\n        ):\n            warn_once(\n                \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n            )\n            # crop left or top of the frame if necessary\n            frame_y0 = max(-background_y0, 0)\n            frame_x0 = max(-background_x0, 0)\n            # crop right or bottom of the frame if necessary\n            frame_y1 = max(\n                min(background_size_y - background_y0, background_y1 - background_y0), 0\n            )\n            frame_x1 = max(\n                min(background_size_x - background_x0, background_x1 - background_x0), 0\n            )\n            # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n            background_y0 = max(background_y0, 0)\n            background_x0 = max(background_x0, 0)\n            background_y1 = max(background_y1, 0)\n            background_x1 = max(background_x1, 0)\n        self._background[\n            background_y0:background_y1, background_x0:background_x1, :\n        ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n        return self._background\n
    "},{"location":"reference/#norfair.FixedCamera.adjust_frame","title":"adjust_frame(frame, coord_transformation)","text":"

    Render scaled up frame.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame.

    required coord_transformation TranslationTransformation

    The coordinate transformation as returned by the MotionEstimator

    required

    Returns:

    Type Description ndarray

    The new bigger frame with the original frame drawn on it.

    Source code in norfair/drawing/fixed_camera.py
    def adjust_frame(\n    self, frame: np.ndarray, coord_transformation: TranslationTransformation\n) -> np.ndarray:\n    \"\"\"\n    Render scaled up frame.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame.\n    coord_transformation : TranslationTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n    Returns\n    -------\n    np.ndarray\n        The new bigger frame with the original frame drawn on it.\n    \"\"\"\n\n    # initialize background if necessary\n    if self._background is None:\n        original_size = (\n            frame.shape[1],\n            frame.shape[0],\n        )  # OpenCV format is (width, height)\n\n        scaled_size = tuple(\n            (np.array(original_size) * np.array(self.scale)).round().astype(int)\n        )\n        self._background = np.zeros(\n            [scaled_size[1], scaled_size[0], frame.shape[-1]],\n            frame.dtype,\n        )\n    else:\n        self._background = (self._background * self._attenuation_factor).astype(\n            frame.dtype\n        )\n\n    # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n    # aim to draw it in the center of the background but transformations will move this point\n    top_left = (\n        np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n    )\n    top_left = (\n        coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n    )\n    # box of the background that will be updated and the limits of it\n    background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n    background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n    background_size_y, background_size_x = self._background.shape[:2]\n\n    # define box of the frame that will be used\n    # if the scale is not enough to support the movement, warn the user but keep drawing\n    # cropping the frame so that the operation doesn't fail\n    frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n    if (\n        background_y0 < 0\n        or background_x0 < 0\n        or background_y1 > background_size_y\n        or background_x1 > background_size_x\n    ):\n        warn_once(\n            \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n        )\n        # crop left or top of the frame if necessary\n        frame_y0 = max(-background_y0, 0)\n        frame_x0 = max(-background_x0, 0)\n        # crop right or bottom of the frame if necessary\n        frame_y1 = max(\n            min(background_size_y - background_y0, background_y1 - background_y0), 0\n        )\n        frame_x1 = max(\n            min(background_size_x - background_x0, background_x1 - background_x0), 0\n        )\n        # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n        background_y0 = max(background_y0, 0)\n        background_x0 = max(background_x0, 0)\n        background_y1 = max(background_y1, 0)\n        background_x1 = max(background_x1, 0)\n    self._background[\n        background_y0:background_y1, background_x0:background_x1, :\n    ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n    return self._background\n
    "},{"location":"reference/#norfair.AbsolutePaths","title":"AbsolutePaths","text":"

    Class that draws the absolute paths taken by a set of points.

    Works just like Paths but supports camera motion.

    Warning

    This drawer is not optimized so it can be stremely slow. Performance degrades linearly with max_history * number_of_tracked_objects.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None max_history int

    Number of past points to include in the path. High values make the drawing slower

    20

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class AbsolutePaths:\n    \"\"\"\n    Class that draws the absolute paths taken by a set of points.\n\n    Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion.\n\n    !!! warning\n        This drawer is not optimized so it can be stremely slow. Performance degrades linearly with\n        `max_history * number_of_tracked_objects`.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    max_history : int, optional\n        Number of past points to include in the path. High values make the drawing slower\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        max_history=20,\n    ):\n\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.past_points = defaultdict(lambda: [])\n        self.max_history = max_history\n        self.alphas = np.linspace(0.99, 0.01, max_history)\n\n    def draw(self, frame, tracked_objects, coord_transform=None):\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n        for obj in tracked_objects:\n            if not obj.live_points.any():\n                continue\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True))\n\n            for point in coord_transform.abs_to_rel(points_to_draw):\n                Drawer.circle(\n                    frame,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n            last = points_to_draw\n            for i, past_points in enumerate(self.past_points[obj.id]):\n                overlay = frame.copy()\n                last = coord_transform.abs_to_rel(last)\n                for j, point in enumerate(coord_transform.abs_to_rel(past_points)):\n                    Drawer.line(\n                        overlay,\n                        tuple(last[j].astype(int)),\n                        tuple(point.astype(int)),\n                        color=color,\n                        thickness=self.thickness,\n                    )\n                last = past_points\n\n                alpha = self.alphas[i]\n                frame = Drawer.alpha_blend(overlay, frame, alpha=alpha)\n            self.past_points[obj.id].insert(0, points_to_draw)\n            self.past_points[obj.id] = self.past_points[obj.id][: self.max_history]\n        return frame\n
    "},{"location":"reference/#norfair.Paths","title":"Paths","text":"

    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None attenuation float

    A float number in [0, 1] that dictates the speed at which the path is erased. if it is 0 then the path is never erased.

    0.01

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class Paths:\n    \"\"\"\n    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    attenuation : float, optional\n        A float number in [0, 1] that dictates the speed at which the path is erased.\n        if it is `0` then the path is never erased.\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        attenuation: float = 0.01,\n    ):\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.mask = None\n        self.attenuation_factor = 1 - attenuation\n\n    def draw(\n        self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n    ) -> np.array:\n        \"\"\"\n        Draw the paths of the points interest on a frame.\n\n        !!! warning\n            This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to draw on.\n        tracked_objects : Sequence[TrackedObject]\n            List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n        Returns\n        -------\n        np.array\n            The resulting frame.\n        \"\"\"\n        if self.mask is None:\n            frame_scale = frame.shape[0] / 100\n\n            if self.radius is None:\n                self.radius = int(max(frame_scale * 0.7, 1))\n            if self.thickness is None:\n                self.thickness = int(max(frame_scale / 7, 1))\n\n            self.mask = np.zeros(frame.shape, np.uint8)\n\n        self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n        for obj in tracked_objects:\n            if obj.abs_to_rel is not None:\n                warn_once(\n                    \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n                )\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.estimate)\n\n            for point in points_to_draw:\n                self.mask = Drawer.circle(\n                    self.mask,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n        return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/#norfair.Paths.draw","title":"draw(frame, tracked_objects)","text":"

    Draw the paths of the points interest on a frame.

    Warning

    This method does not draw frames in place as other drawers do, the resulting frame is returned.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required tracked_objects Sequence[TrackedObject]

    List of TrackedObject to get the points of interest in order to update the paths.

    required

    Returns:

    Type Description array

    The resulting frame.

    Source code in norfair/drawing/path.py
    def draw(\n    self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n) -> np.array:\n    \"\"\"\n    Draw the paths of the points interest on a frame.\n\n    !!! warning\n        This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    tracked_objects : Sequence[TrackedObject]\n        List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n    Returns\n    -------\n    np.array\n        The resulting frame.\n    \"\"\"\n    if self.mask is None:\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n\n        self.mask = np.zeros(frame.shape, np.uint8)\n\n    self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n    for obj in tracked_objects:\n        if obj.abs_to_rel is not None:\n            warn_once(\n                \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n            )\n\n        if self.color is None:\n            color = Palette.choose_color(obj.id)\n        else:\n            color = self.color\n\n        points_to_draw = self.get_points_to_draw(obj.estimate)\n\n        for point in points_to_draw:\n            self.mask = Drawer.circle(\n                self.mask,\n                position=tuple(point.astype(int)),\n                radius=self.radius,\n                color=color,\n                thickness=self.thickness,\n            )\n\n    return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/#norfair.frobenius","title":"frobenius(detection, tracked_object)","text":"

    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

    The Frobenius distance and norm are given by:

    \\[ d_f(a, b) = ||a - b||_F \\] \\[ ||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def frobenius(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.\n\n    The Frobenius distance and norm are given by:\n\n    $$\n    d_f(a, b) = ||a - b||_F\n    $$\n\n    $$\n    ||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate)\n
    "},{"location":"reference/#norfair.mean_euclidean","title":"mean_euclidean(detection, tracked_object)","text":"

    Average euclidean distance between the points in detection and estimates in tracked_object.

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_2}{N} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_euclidean(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average euclidean distance between the points in detection and estimates in tracked_object.\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_2}{N}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate, axis=1).mean()\n
    "},{"location":"reference/#norfair.mean_manhattan","title":"mean_manhattan(detection, tracked_object)","text":"

    Average manhattan distance between the points in detection and the estimates in tracked_object

    Given by:

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_1}{N} \\]

    Where \\(||a||_1\\) is the manhattan norm.

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    a tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_manhattan(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average manhattan distance between the points in detection and the estimates in tracked_object\n\n    Given by:\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_1}{N}\n    $$\n\n    Where $||a||_1$ is the manhattan norm.\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        a tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(\n        detection.points - tracked_object.estimate, ord=1, axis=1\n    ).mean()\n
    "},{"location":"reference/#norfair.iou","title":"iou(candidates, objects)","text":"

    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected to be in [x_min, y_min, x_max, y_max] format.

    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap, to transform that into a distance that makes sense we return 1 - iou.

    Parameters:

    Name Type Description Default candidates ndarray

    (N, 4) numpy.ndarray containing candidates bounding boxes.

    required objects ndarray

    (K, 4) numpy.ndarray containing objects bounding boxes.

    required

    Returns:

    Type Description ndarray

    (N, K) numpy.ndarray of 1 - iou between candidates and objects.

    Source code in norfair/distances.py
    def iou(candidates: np.ndarray, objects: np.ndarray) -> np.ndarray:\n    \"\"\"\n    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected\n    to be in `[x_min, y_min, x_max, y_max]` format.\n\n    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,\n    to transform that into a distance that makes sense we return `1 - iou`.\n\n    Parameters\n    ----------\n    candidates : numpy.ndarray\n        (N, 4) numpy.ndarray containing candidates bounding boxes.\n    objects : numpy.ndarray\n        (K, 4) numpy.ndarray containing objects bounding boxes.\n\n    Returns\n    -------\n    numpy.ndarray\n        (N, K) numpy.ndarray of `1 - iou` between candidates and objects.\n    \"\"\"\n    _validate_bboxes(candidates)\n\n    area_candidates = _boxes_area(candidates.T)\n    area_objects = _boxes_area(objects.T)\n\n    top_left = np.maximum(candidates[:, None, :2], objects[:, :2])\n    bottom_right = np.minimum(candidates[:, None, 2:], objects[:, 2:])\n\n    area_intersection = np.prod(\n        np.clip(bottom_right - top_left, a_min=0, a_max=None), 2\n    )\n    return 1 - area_intersection / (\n        area_candidates[:, None] + area_objects - area_intersection\n    )\n
    "},{"location":"reference/#norfair.get_distance_by_name","title":"get_distance_by_name(name)","text":"

    Select a distance by name.

    Parameters:

    Name Type Description Default name str

    A string defining the metric to get.

    required

    Returns:

    Type Description Distance

    The distance object.

    Source code in norfair/distances.py
    def get_distance_by_name(name: str) -> Distance:\n    \"\"\"\n    Select a distance by name.\n\n    Parameters\n    ----------\n    name : str\n        A string defining the metric to get.\n\n    Returns\n    -------\n    Distance\n        The distance object.\n    \"\"\"\n\n    if name in _SCALAR_DISTANCE_FUNCTIONS:\n        warning(\n            \"You are using a scalar distance function. If you want to speed up the\"\n            \" tracking process please consider using a vectorized distance function\"\n            f\" such as {AVAILABLE_VECTORIZED_DISTANCES}.\"\n        )\n        distance = _SCALAR_DISTANCE_FUNCTIONS[name]\n        distance_function = ScalarDistance(distance)\n    elif name in _SCIPY_DISTANCE_FUNCTIONS:\n        distance_function = ScipyDistance(name)\n    elif name in _VECTORIZED_DISTANCE_FUNCTIONS:\n        if name == \"iou_opt\":\n            warning(\"iou_opt is deprecated, use iou instead\")\n        distance = _VECTORIZED_DISTANCE_FUNCTIONS[name]\n        distance_function = VectorizedDistance(distance)\n    else:\n        raise ValueError(\n            f\"Invalid distance '{name}', expecting one of\"\n            f\" {list(_SCALAR_DISTANCE_FUNCTIONS.keys()) + AVAILABLE_VECTORIZED_DISTANCES}\"\n        )\n\n    return distance_function\n
    "},{"location":"reference/#norfair.create_keypoints_voting_distance","title":"create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold)","text":"

    Construct a keypoint voting distance function configured with the thresholds.

    Count how many points in a detection match the with a tracked_object. A match is considered when distance between the points is < keypoint_distance_threshold and the score of the last_detection of the tracked_object is > detection_threshold. Notice the if multiple points are tracked, the ith point in detection can only match the ith point in the tracked object.

    Distance is 1 if no point matches and approximates 0 as more points are matched.

    Parameters:

    Name Type Description Default keypoint_distance_threshold float

    Points closer than this threshold are considered a match.

    required detection_threshold float

    Detections and objects with score lower than this threshold are ignored.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_keypoints_voting_distance(\n    keypoint_distance_threshold: float, detection_threshold: float\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a keypoint voting distance function configured with the thresholds.\n\n    Count how many points in a detection match the with a tracked_object.\n    A match is considered when distance between the points is < `keypoint_distance_threshold`\n    and the score of the last_detection of the tracked_object is > `detection_threshold`.\n    Notice the if multiple points are tracked, the ith point in detection can only match the ith\n    point in the tracked object.\n\n    Distance is 1 if no point matches and approximates 0 as more points are matched.\n\n    Parameters\n    ----------\n    keypoint_distance_threshold: float\n        Points closer than this threshold are considered a match.\n    detection_threshold: float\n        Detections and objects with score lower than this threshold are ignored.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def keypoints_voting_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        distances = np.linalg.norm(detection.points - tracked_object.estimate, axis=1)\n        match_num = np.count_nonzero(\n            (distances < keypoint_distance_threshold)\n            * (detection.scores > detection_threshold)\n            * (tracked_object.last_detection.scores > detection_threshold)\n        )\n        return 1 / (1 + match_num)\n\n    return keypoints_voting_distance\n
    "},{"location":"reference/#norfair.create_normalized_mean_euclidean_distance","title":"create_normalized_mean_euclidean_distance(height, width)","text":"

    Construct a normalized mean euclidean distance function configured with the max height and width.

    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

    Parameters:

    Name Type Description Default height int

    Height of the image.

    required width int

    Width of the image.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_normalized_mean_euclidean_distance(\n    height: int, width: int\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a normalized mean euclidean distance function configured with the max height and width.\n\n    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.\n\n    Parameters\n    ----------\n    height: int\n        Height of the image.\n    width: int\n        Width of the image.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def normalized__mean_euclidean_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        \"\"\"Normalized mean euclidean distance\"\"\"\n        # calculate distances and normalized it by width and height\n        difference = (detection.points - tracked_object.estimate).astype(float)\n        difference[:, 0] /= width\n        difference[:, 1] /= height\n\n        # calculate eucledean distance and average\n        return np.linalg.norm(difference, axis=1).mean()\n\n    return normalized__mean_euclidean_distance\n
    "},{"location":"reference/#norfair.draw_absolute_grid","title":"draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False)","text":"

    Draw a grid of points in absolute coordinates.

    Useful for debugging camera motion.

    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection of latitude and longitude lines over the surface of the sphere.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required coord_transformations CoordinatesTransformation

    The coordinate transformation as returned by the MotionEstimator

    required grid_size int

    How many points to draw.

    20 radius int

    Size of each point.

    2 thickness int

    Thickness of each point

    1 color ColorType

    Color of the points.

    black polar Bool

    If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). By default, False is used which means the points are drawn as if the camera were pointing to the Equator.

    False Source code in norfair/drawing/absolute_grid.py
    def draw_absolute_grid(\n    frame: np.ndarray,\n    coord_transformations: CoordinatesTransformation,\n    grid_size: int = 20,\n    radius: int = 2,\n    thickness: int = 1,\n    color: ColorType = Color.black,\n    polar: bool = False,\n):\n    \"\"\"\n    Draw a grid of points in absolute coordinates.\n\n    Useful for debugging camera motion.\n\n    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection\n    of latitude and longitude lines over the surface of the sphere.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    coord_transformations : CoordinatesTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n    grid_size : int, optional\n        How many points to draw.\n    radius : int, optional\n        Size of each point.\n    thickness : int, optional\n        Thickness of each point\n    color : ColorType, optional\n        Color of the points.\n    polar : Bool, optional\n        If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth).\n        By default, False is used which means the points are drawn as if the camera were pointing to the Equator.\n    \"\"\"\n    h, w, _ = frame.shape\n\n    # get absolute points grid\n    points = _get_grid(grid_size, w, h, polar=polar)\n\n    # transform the points to relative coordinates\n    if coord_transformations is None:\n        points_transformed = points\n    else:\n        points_transformed = coord_transformations.abs_to_rel(points)\n\n    # filter points that are not visible\n    visible_points = points_transformed[\n        (points_transformed <= np.array([w, h])).all(axis=1)\n        & (points_transformed >= 0).all(axis=1)\n    ]\n    for point in visible_points:\n        Drawer.cross(\n            frame, point.astype(int), radius=radius, thickness=thickness, color=color\n        )\n
    "},{"location":"reference/#norfair.draw_tracked_boxes","title":"draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None)","text":"

    Deprecated. Use draw_box

    Source code in norfair/drawing/draw_boxes.py
    def draw_tracked_boxes(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    border_colors: Optional[Tuple[int, int, int]] = None,\n    border_width: Optional[int] = None,\n    id_size: Optional[int] = None,\n    id_thickness: Optional[int] = None,\n    draw_box: bool = True,\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n    label_width: Optional[int] = None,\n) -> np.array:\n    \"**Deprecated**. Use [`draw_box`][norfair.drawing.draw_boxes.draw_boxes]\"\n    warn_once(\"draw_tracked_boxes is deprecated, use draw_box instead\")\n    return draw_boxes(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else border_colors,\n        thickness=border_width,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness or label_width,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_box=draw_box,\n    )\n
    "},{"location":"reference/#norfair.draw_tracked_objects","title":"draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None)","text":"

    Deprecated use draw_points

    Source code in norfair/drawing/draw_points.py
    def draw_tracked_objects(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    radius: Optional[int] = None,\n    color: Optional[ColorLike] = None,\n    id_size: Optional[float] = None,\n    id_thickness: Optional[int] = None,\n    draw_points: bool = True,  # pylint: disable=redefined-outer-name\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n):\n    \"\"\"\n    **Deprecated** use [`draw_points`][norfair.drawing.draw_points.draw_points]\n    \"\"\"\n    warn_once(\"draw_tracked_objects is deprecated, use draw_points instead\")\n\n    frame_scale = frame.shape[0] / 100\n    if radius is None:\n        radius = int(frame_scale * 0.5)\n    if id_size is None:\n        id_size = frame_scale / 10\n    if id_thickness is None:\n        id_thickness = int(frame_scale / 5)\n    if label_size is None:\n        label_size = int(max(frame_scale / 100, 1))\n\n    _draw_points_alias(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else color,\n        radius=radius,\n        thickness=None,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_points=draw_points,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness,\n        text_color=None,\n        hide_dead_points=True,\n    )\n
    "},{"location":"reference/camera_motion/","title":"Camera Motion","text":"

    Camera motion stimation module.

    "},{"location":"reference/camera_motion/#norfair.camera_motion.CoordinatesTransformation","title":"CoordinatesTransformation","text":"

    Bases: ABC

    Abstract class representing a coordinate transformation.

    Detections' and tracked objects' coordinates can be interpreted in 2 reference:

    • Relative: their position on the current frame, (0, 0) is top left
    • Absolute: their position on an fixed space, (0, 0) is the top left of the first frame of the video.

    Therefore, coordinate transformation in this context is a class that can transform coordinates in one reference to another.

    Source code in norfair/camera_motion.py
    class CoordinatesTransformation(ABC):\n    \"\"\"\n    Abstract class representing a coordinate transformation.\n\n    Detections' and tracked objects' coordinates can be interpreted in 2 reference:\n\n    - _Relative_: their position on the current frame, (0, 0) is top left\n    - _Absolute_: their position on an fixed space, (0, 0)\n        is the top left of the first frame of the video.\n\n    Therefore, coordinate transformation in this context is a class that can transform\n    coordinates in one reference to another.\n    \"\"\"\n\n    @abstractmethod\n    def abs_to_rel(self, points: np.ndarray) -> np.ndarray:\n        pass\n\n    @abstractmethod\n    def rel_to_abs(self, points: np.ndarray) -> np.ndarray:\n        pass\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TransformationGetter","title":"TransformationGetter","text":"

    Bases: ABC

    Abstract class representing a method for finding CoordinatesTransformation between 2 sets of points

    Source code in norfair/camera_motion.py
    class TransformationGetter(ABC):\n    \"\"\"\n    Abstract class representing a method for finding CoordinatesTransformation between 2 sets of points\n    \"\"\"\n\n    @abstractmethod\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, CoordinatesTransformation]:\n        pass\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TranslationTransformation","title":"TranslationTransformation","text":"

    Bases: CoordinatesTransformation

    Coordinate transformation between points using a simple translation

    Parameters:

    Name Type Description Default movement_vector ndarray

    The vector representing the translation.

    required Source code in norfair/camera_motion.py
    class TranslationTransformation(CoordinatesTransformation):\n    \"\"\"\n    Coordinate transformation between points using a simple translation\n\n    Parameters\n    ----------\n    movement_vector : np.ndarray\n        The vector representing the translation.\n    \"\"\"\n\n    def __init__(self, movement_vector):\n        self.movement_vector = movement_vector\n\n    def abs_to_rel(self, points: np.ndarray):\n        return points + self.movement_vector\n\n    def rel_to_abs(self, points: np.ndarray):\n        return points - self.movement_vector\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.TranslationTransformationGetter","title":"TranslationTransformationGetter","text":"

    Bases: TransformationGetter

    Calculates TranslationTransformation between points.

    The camera movement is calculated as the mode of optical flow between the previous reference frame and the current.

    Comparing consecutive frames can make differences too small to correctly estimate the translation, for this reason the reference frame is kept fixed as we progress through the video. Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.

    Parameters:

    Name Type Description Default bin_size float

    Before calculatin the mode, optiocal flow is bucketized into bins of this size.

    0.2 proportion_points_used_threshold float

    Proportion of points that must be matched, otherwise the reference frame must be updated.

    0.9 Source code in norfair/camera_motion.py
    class TranslationTransformationGetter(TransformationGetter):\n    \"\"\"\n    Calculates TranslationTransformation between points.\n\n    The camera movement is calculated as the mode of optical flow between the previous reference frame\n    and the current.\n\n    Comparing consecutive frames can make differences too small to correctly estimate the translation,\n    for this reason the reference frame is kept fixed as we progress through the video.\n    Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.\n\n    Parameters\n    ----------\n    bin_size : float\n        Before calculatin the mode, optiocal flow is bucketized into bins of this size.\n    proportion_points_used_threshold: float\n        Proportion of points that must be matched, otherwise the reference frame must be updated.\n    \"\"\"\n\n    def __init__(\n        self, bin_size: float = 0.2, proportion_points_used_threshold: float = 0.9\n    ) -> None:\n        self.bin_size = bin_size\n        self.proportion_points_used_threshold = proportion_points_used_threshold\n        self.data = None\n\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, TranslationTransformation]:\n        # get flow\n        flow = curr_pts - prev_pts\n\n        # get mode\n        flow = np.around(flow / self.bin_size) * self.bin_size\n        unique_flows, counts = np.unique(flow, axis=0, return_counts=True)\n\n        max_index = counts.argmax()\n\n        proportion_points_used = counts[max_index] / len(prev_pts)\n        update_prvs = proportion_points_used < self.proportion_points_used_threshold\n\n        flow_mode = unique_flows[max_index]\n\n        try:\n            flow_mode += self.data\n        except TypeError:\n            pass\n\n        if update_prvs:\n            self.data = flow_mode\n\n        return update_prvs, TranslationTransformation(flow_mode)\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.HomographyTransformation","title":"HomographyTransformation","text":"

    Bases: CoordinatesTransformation

    Coordinate transformation beweent points using an homography

    Parameters:

    Name Type Description Default homography_matrix ndarray

    The matrix representing the homography

    required Source code in norfair/camera_motion.py
    class HomographyTransformation(CoordinatesTransformation):\n    \"\"\"\n    Coordinate transformation beweent points using an homography\n\n    Parameters\n    ----------\n    homography_matrix : np.ndarray\n        The matrix representing the homography\n    \"\"\"\n\n    def __init__(self, homography_matrix: np.ndarray):\n        self.homography_matrix = homography_matrix\n        self.inverse_homography_matrix = np.linalg.inv(homography_matrix)\n\n    def abs_to_rel(self, points: np.ndarray):\n        ones = np.ones((len(points), 1))\n        points_with_ones = np.hstack((points, ones))\n        points_transformed = points_with_ones @ self.homography_matrix.T\n        last_column = points_transformed[:, -1]\n        last_column[last_column == 0] = 0.0000001\n        points_transformed = points_transformed / last_column.reshape(-1, 1)\n        new_points_transformed = points_transformed[:, :2]\n        return new_points_transformed\n\n    def rel_to_abs(self, points: np.ndarray):\n        ones = np.ones((len(points), 1))\n        points_with_ones = np.hstack((points, ones))\n        points_transformed = points_with_ones @ self.inverse_homography_matrix.T\n        last_column = points_transformed[:, -1]\n        last_column[last_column == 0] = 0.0000001\n        points_transformed = points_transformed / last_column.reshape(-1, 1)\n        return points_transformed[:, :2]\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.HomographyTransformationGetter","title":"HomographyTransformationGetter","text":"

    Bases: TransformationGetter

    Calculates HomographyTransformation between points.

    The camera movement is represented as an homography that matches the optical flow between the previous reference frame and the current.

    Comparing consecutive frames can make differences too small to correctly estimate the homography, often resulting in the identity. For this reason the reference frame is kept fixed as we progress through the video. Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.

    Parameters:

    Name Type Description Default method Optional[int]

    One of openCV's method for finding homographies. Valid options are: [0, cv.RANSAC, cv.LMEDS, cv.RHO], by default cv.RANSAC

    None ransac_reproj_threshold int

    Maximum allowed reprojection error to treat a point pair as an inlier. More info in links below.

    3 max_iters int

    The maximum number of RANSAC iterations. More info in links below.

    2000 confidence float

    Confidence level, must be between 0 and 1. More info in links below.

    0.995 proportion_points_used_threshold float

    Proportion of points that must be matched, otherwise the reference frame must be updated.

    0.9 See Also

    opencv.findHomography

    Source code in norfair/camera_motion.py
    class HomographyTransformationGetter(TransformationGetter):\n    \"\"\"\n    Calculates HomographyTransformation between points.\n\n    The camera movement is represented as an homography that matches the optical flow between the previous reference frame\n    and the current.\n\n    Comparing consecutive frames can make differences too small to correctly estimate the homography, often resulting in the identity.\n    For this reason the reference frame is kept fixed as we progress through the video.\n    Eventually, if the transformation is no longer able to match enough points, the reference frame is updated.\n\n    Parameters\n    ----------\n    method : Optional[int], optional\n        One of openCV's method for finding homographies.\n        Valid options are: `[0, cv.RANSAC, cv.LMEDS, cv.RHO]`, by default `cv.RANSAC`\n    ransac_reproj_threshold : int, optional\n        Maximum allowed reprojection error to treat a point pair as an inlier. More info in links below.\n    max_iters : int, optional\n        The maximum number of RANSAC iterations.  More info in links below.\n    confidence : float, optional\n        Confidence level, must be between 0 and 1. More info in links below.\n    proportion_points_used_threshold : float, optional\n        Proportion of points that must be matched, otherwise the reference frame must be updated.\n\n    See Also\n    --------\n    [opencv.findHomography](https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga4abc2ece9fab9398f2e560d53c8c9780)\n    \"\"\"\n\n    def __init__(\n        self,\n        method: Optional[int] = None,\n        ransac_reproj_threshold: int = 3,\n        max_iters: int = 2000,\n        confidence: float = 0.995,\n        proportion_points_used_threshold: float = 0.9,\n    ) -> None:\n        self.data = None\n        if method is None:\n            method = cv2.RANSAC\n        self.method = method\n        self.ransac_reproj_threshold = ransac_reproj_threshold\n        self.max_iters = max_iters\n        self.confidence = confidence\n        self.proportion_points_used_threshold = proportion_points_used_threshold\n\n    def __call__(\n        self, curr_pts: np.ndarray, prev_pts: np.ndarray\n    ) -> Tuple[bool, Optional[HomographyTransformation]]:\n\n        if not (\n            isinstance(prev_pts, np.ndarray)\n            and prev_pts.shape[0] >= 4\n            and isinstance(curr_pts, np.ndarray)\n            and curr_pts.shape[0] >= 4\n        ):\n            warning(\n                \"The homography couldn't be computed in this frame \"\n                \"due to low amount of points\"\n            )\n            if isinstance(self.data, np.ndarray):\n                return True, HomographyTransformation(self.data)\n            else:\n                return True, None\n\n        homography_matrix, points_used = cv2.findHomography(\n            prev_pts,\n            curr_pts,\n            method=self.method,\n            ransacReprojThreshold=self.ransac_reproj_threshold,\n            maxIters=self.max_iters,\n            confidence=self.confidence,\n        )\n\n        proportion_points_used = np.sum(points_used) / len(points_used)\n\n        update_prvs = proportion_points_used < self.proportion_points_used_threshold\n\n        try:\n            homography_matrix = homography_matrix @ self.data\n        except (TypeError, ValueError):\n            pass\n\n        if update_prvs:\n            self.data = homography_matrix\n\n        return update_prvs, HomographyTransformation(homography_matrix)\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.MotionEstimator","title":"MotionEstimator","text":"

    Estimator of the motion of the camera.

    Uses optical flow to estimate the motion of the camera from frame to frame. The optical flow is calculated on a sample of strong points (corners).

    Parameters:

    Name Type Description Default max_points int

    Maximum amount of points sampled. More points make the estimation process slower but more precise

    200 min_distance int

    Minimum distance between the sample points.

    15 block_size int

    Size of an average block when finding the corners. More info in links below.

    3 transformations_getter TransformationGetter

    An instance of TransformationGetter. By default HomographyTransformationGetter

    None draw_flow bool

    Draws the optical flow on the frame for debugging.

    False flow_color Optional[Tuple[int, int, int]]

    Color of the drawing, by default blue.

    None quality_level float

    Parameter characterizing the minimal accepted quality of image corners.

    0.01

    Examples:

    >>> from norfair import Tracker, Video\n>>> from norfair.camera_motion MotionEstimator\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> motion_estimator = MotionEstimator()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    coord_transformation = motion_estimator.update(frame)\n>>>    tracked_objects = tracker.update(detections, coord_transformations=coord_transformation)\n
    See Also

    For more infor on how the points are sampled: OpenCV.goodFeaturesToTrack

    Source code in norfair/camera_motion.py
    class MotionEstimator:\n    \"\"\"\n    Estimator of the motion of the camera.\n\n    Uses optical flow to estimate the motion of the camera from frame to frame.\n    The optical flow is calculated on a sample of strong points (corners).\n\n    Parameters\n    ----------\n    max_points : int, optional\n        Maximum amount of points sampled.\n        More points make the estimation process slower but more precise\n    min_distance : int, optional\n        Minimum distance between the sample points.\n    block_size : int, optional\n        Size of an average block when finding the corners. More info in links below.\n    transformations_getter : TransformationGetter, optional\n        An instance of TransformationGetter. By default [`HomographyTransformationGetter`][norfair.camera_motion.HomographyTransformationGetter]\n    draw_flow : bool, optional\n        Draws the optical flow on the frame for debugging.\n    flow_color : Optional[Tuple[int, int, int]], optional\n        Color of the drawing, by default blue.\n    quality_level : float, optional\n        Parameter characterizing the minimal accepted quality of image corners.\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video\n    >>> from norfair.camera_motion MotionEstimator\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> motion_estimator = MotionEstimator()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    coord_transformation = motion_estimator.update(frame)\n    >>>    tracked_objects = tracker.update(detections, coord_transformations=coord_transformation)\n\n    See Also\n    --------\n    For more infor on how the points are sampled: [OpenCV.goodFeaturesToTrack](https://docs.opencv.org/3.4/dd/d1a/group__imgproc__feature.html#ga1d6bb77486c8f92d79c8793ad995d541)\n    \"\"\"\n\n    def __init__(\n        self,\n        max_points: int = 200,\n        min_distance: int = 15,\n        block_size: int = 3,\n        transformations_getter: TransformationGetter = None,\n        draw_flow: bool = False,\n        flow_color: Optional[Tuple[int, int, int]] = None,\n        quality_level: float = 0.01,\n    ):\n\n        self.max_points = max_points\n        self.min_distance = min_distance\n        self.block_size = block_size\n\n        self.draw_flow = draw_flow\n        if self.draw_flow and flow_color is None:\n            flow_color = [0, 0, 100]\n        self.flow_color = flow_color\n\n        self.gray_prvs = None\n        self.prev_pts = None\n        if transformations_getter is None:\n            transformations_getter = HomographyTransformationGetter()\n\n        self.transformations_getter = transformations_getter\n        self.transformations_getter_copy = copy.deepcopy(transformations_getter)\n\n        self.prev_mask = None\n        self.gray_next = None\n        self.quality_level = quality_level\n\n    def update(\n        self, frame: np.ndarray, mask: np.ndarray = None\n    ) -> Optional[CoordinatesTransformation]:\n        \"\"\"\n        Estimate camera motion for each frame\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The frame.\n        mask : np.ndarray, optional\n            An optional mask to avoid areas of the frame when sampling the corner.\n            Must be an array of shape `(frame.shape[0], frame.shape[1])`, dtype same as frame,\n            and values in {0, 1}.\n\n            In general, the estimation will work best when it samples many points from the background;\n            with that intention, this parameters is usefull for masking out the detections/tracked objects,\n            forcing the MotionEstimator ignore the moving objects.\n            Can be used to mask static areas of the image, such as score overlays in sport transmisions or\n            timestamps in security cameras.\n\n        Returns\n        -------\n        CoordinatesTransformation\n            The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates\n            or vice versa.\n        \"\"\"\n\n        self.gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n        if self.gray_prvs is None:\n            self.gray_prvs = self.gray_next\n            self.prev_mask = mask\n\n        curr_pts, prev_pts = None, None\n        try:\n            curr_pts, prev_pts = _get_sparse_flow(\n                self.gray_next,\n                self.gray_prvs,\n                self.prev_pts,\n                self.max_points,\n                self.min_distance,\n                self.block_size,\n                self.prev_mask,\n                quality_level=self.quality_level,\n            )\n            if self.draw_flow:\n                for (curr, prev) in zip(curr_pts, prev_pts):\n                    c = tuple(curr.astype(int).ravel())\n                    p = tuple(prev.astype(int).ravel())\n                    cv2.line(frame, c, p, self.flow_color, 2)\n                    cv2.circle(frame, c, 3, self.flow_color, -1)\n        except Exception as e:\n            warning(e)\n\n        update_prvs, coord_transformations = True, None\n        try:\n            update_prvs, coord_transformations = self.transformations_getter(\n                curr_pts, prev_pts\n            )\n        except Exception as e:\n            warning(e)\n            del self.transformations_getter\n            self.transformations_getter = copy.deepcopy(\n                self.transformations_getter_copy\n            )\n\n        if update_prvs:\n            self.gray_prvs = self.gray_next\n            self.prev_pts = None\n            self.prev_mask = mask\n        else:\n            self.prev_pts = prev_pts\n\n        return coord_transformations\n
    "},{"location":"reference/camera_motion/#norfair.camera_motion.MotionEstimator.update","title":"update(frame, mask=None)","text":"

    Estimate camera motion for each frame

    Parameters:

    Name Type Description Default frame ndarray

    The frame.

    required mask ndarray

    An optional mask to avoid areas of the frame when sampling the corner. Must be an array of shape (frame.shape[0], frame.shape[1]), dtype same as frame, and values in {0, 1}.

    In general, the estimation will work best when it samples many points from the background; with that intention, this parameters is usefull for masking out the detections/tracked objects, forcing the MotionEstimator ignore the moving objects. Can be used to mask static areas of the image, such as score overlays in sport transmisions or timestamps in security cameras.

    None

    Returns:

    Type Description CoordinatesTransformation

    The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates or vice versa.

    Source code in norfair/camera_motion.py
    def update(\n    self, frame: np.ndarray, mask: np.ndarray = None\n) -> Optional[CoordinatesTransformation]:\n    \"\"\"\n    Estimate camera motion for each frame\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The frame.\n    mask : np.ndarray, optional\n        An optional mask to avoid areas of the frame when sampling the corner.\n        Must be an array of shape `(frame.shape[0], frame.shape[1])`, dtype same as frame,\n        and values in {0, 1}.\n\n        In general, the estimation will work best when it samples many points from the background;\n        with that intention, this parameters is usefull for masking out the detections/tracked objects,\n        forcing the MotionEstimator ignore the moving objects.\n        Can be used to mask static areas of the image, such as score overlays in sport transmisions or\n        timestamps in security cameras.\n\n    Returns\n    -------\n    CoordinatesTransformation\n        The CoordinatesTransformation that can transform coordinates on this frame to absolute coordinates\n        or vice versa.\n    \"\"\"\n\n    self.gray_next = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n    if self.gray_prvs is None:\n        self.gray_prvs = self.gray_next\n        self.prev_mask = mask\n\n    curr_pts, prev_pts = None, None\n    try:\n        curr_pts, prev_pts = _get_sparse_flow(\n            self.gray_next,\n            self.gray_prvs,\n            self.prev_pts,\n            self.max_points,\n            self.min_distance,\n            self.block_size,\n            self.prev_mask,\n            quality_level=self.quality_level,\n        )\n        if self.draw_flow:\n            for (curr, prev) in zip(curr_pts, prev_pts):\n                c = tuple(curr.astype(int).ravel())\n                p = tuple(prev.astype(int).ravel())\n                cv2.line(frame, c, p, self.flow_color, 2)\n                cv2.circle(frame, c, 3, self.flow_color, -1)\n    except Exception as e:\n        warning(e)\n\n    update_prvs, coord_transformations = True, None\n    try:\n        update_prvs, coord_transformations = self.transformations_getter(\n            curr_pts, prev_pts\n        )\n    except Exception as e:\n        warning(e)\n        del self.transformations_getter\n        self.transformations_getter = copy.deepcopy(\n            self.transformations_getter_copy\n        )\n\n    if update_prvs:\n        self.gray_prvs = self.gray_next\n        self.prev_pts = None\n        self.prev_mask = mask\n    else:\n        self.prev_pts = prev_pts\n\n    return coord_transformations\n
    "},{"location":"reference/distances/","title":"Distances","text":"

    Predefined distances

    "},{"location":"reference/distances/#norfair.distances.frobenius","title":"frobenius(detection, tracked_object)","text":"

    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.

    The Frobenius distance and norm are given by:

    \\[ d_f(a, b) = ||a - b||_F \\] \\[ ||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def frobenius(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Frobernius norm on the difference of the points in detection and the estimates in tracked_object.\n\n    The Frobenius distance and norm are given by:\n\n    $$\n    d_f(a, b) = ||a - b||_F\n    $$\n\n    $$\n    ||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate)\n
    "},{"location":"reference/distances/#norfair.distances.mean_euclidean","title":"mean_euclidean(detection, tracked_object)","text":"

    Average euclidean distance between the points in detection and estimates in tracked_object.

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_2}{N} \\]

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    A tracked object

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_euclidean(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average euclidean distance between the points in detection and estimates in tracked_object.\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_2}{N}\n    $$\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        A tracked object\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(detection.points - tracked_object.estimate, axis=1).mean()\n
    "},{"location":"reference/distances/#norfair.distances.mean_manhattan","title":"mean_manhattan(detection, tracked_object)","text":"

    Average manhattan distance between the points in detection and the estimates in tracked_object

    Given by:

    \\[ d(a, b) = \\frac{\\sum_{i=0}^N ||a_i - b_i||_1}{N} \\]

    Where \\(||a||_1\\) is the manhattan norm.

    Parameters:

    Name Type Description Default detection Detection

    A detection.

    required tracked_object TrackedObject

    a tracked object.

    required

    Returns:

    Type Description float

    The distance.

    See Also

    np.linalg.norm

    Source code in norfair/distances.py
    def mean_manhattan(detection: \"Detection\", tracked_object: \"TrackedObject\") -> float:\n    \"\"\"\n    Average manhattan distance between the points in detection and the estimates in tracked_object\n\n    Given by:\n\n    $$\n    d(a, b) = \\\\frac{\\\\sum_{i=0}^N ||a_i - b_i||_1}{N}\n    $$\n\n    Where $||a||_1$ is the manhattan norm.\n\n    Parameters\n    ----------\n    detection : Detection\n        A detection.\n    tracked_object : TrackedObject\n        a tracked object.\n\n    Returns\n    -------\n    float\n        The distance.\n\n    See Also\n    --------\n    [`np.linalg.norm`](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html)\n    \"\"\"\n    return np.linalg.norm(\n        detection.points - tracked_object.estimate, ord=1, axis=1\n    ).mean()\n
    "},{"location":"reference/distances/#norfair.distances.iou","title":"iou(candidates, objects)","text":"

    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected to be in [x_min, y_min, x_max, y_max] format.

    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap, to transform that into a distance that makes sense we return 1 - iou.

    Parameters:

    Name Type Description Default candidates ndarray

    (N, 4) numpy.ndarray containing candidates bounding boxes.

    required objects ndarray

    (K, 4) numpy.ndarray containing objects bounding boxes.

    required

    Returns:

    Type Description ndarray

    (N, K) numpy.ndarray of 1 - iou between candidates and objects.

    Source code in norfair/distances.py
    def iou(candidates: np.ndarray, objects: np.ndarray) -> np.ndarray:\n    \"\"\"\n    Calculate IoU between two sets of bounding boxes. Both sets of boxes are expected\n    to be in `[x_min, y_min, x_max, y_max]` format.\n\n    Normal IoU is 1 when the boxes are the same and 0 when they don't overlap,\n    to transform that into a distance that makes sense we return `1 - iou`.\n\n    Parameters\n    ----------\n    candidates : numpy.ndarray\n        (N, 4) numpy.ndarray containing candidates bounding boxes.\n    objects : numpy.ndarray\n        (K, 4) numpy.ndarray containing objects bounding boxes.\n\n    Returns\n    -------\n    numpy.ndarray\n        (N, K) numpy.ndarray of `1 - iou` between candidates and objects.\n    \"\"\"\n    _validate_bboxes(candidates)\n\n    area_candidates = _boxes_area(candidates.T)\n    area_objects = _boxes_area(objects.T)\n\n    top_left = np.maximum(candidates[:, None, :2], objects[:, :2])\n    bottom_right = np.minimum(candidates[:, None, 2:], objects[:, 2:])\n\n    area_intersection = np.prod(\n        np.clip(bottom_right - top_left, a_min=0, a_max=None), 2\n    )\n    return 1 - area_intersection / (\n        area_candidates[:, None] + area_objects - area_intersection\n    )\n
    "},{"location":"reference/distances/#norfair.distances.get_distance_by_name","title":"get_distance_by_name(name)","text":"

    Select a distance by name.

    Parameters:

    Name Type Description Default name str

    A string defining the metric to get.

    required

    Returns:

    Type Description Distance

    The distance object.

    Source code in norfair/distances.py
    def get_distance_by_name(name: str) -> Distance:\n    \"\"\"\n    Select a distance by name.\n\n    Parameters\n    ----------\n    name : str\n        A string defining the metric to get.\n\n    Returns\n    -------\n    Distance\n        The distance object.\n    \"\"\"\n\n    if name in _SCALAR_DISTANCE_FUNCTIONS:\n        warning(\n            \"You are using a scalar distance function. If you want to speed up the\"\n            \" tracking process please consider using a vectorized distance function\"\n            f\" such as {AVAILABLE_VECTORIZED_DISTANCES}.\"\n        )\n        distance = _SCALAR_DISTANCE_FUNCTIONS[name]\n        distance_function = ScalarDistance(distance)\n    elif name in _SCIPY_DISTANCE_FUNCTIONS:\n        distance_function = ScipyDistance(name)\n    elif name in _VECTORIZED_DISTANCE_FUNCTIONS:\n        if name == \"iou_opt\":\n            warning(\"iou_opt is deprecated, use iou instead\")\n        distance = _VECTORIZED_DISTANCE_FUNCTIONS[name]\n        distance_function = VectorizedDistance(distance)\n    else:\n        raise ValueError(\n            f\"Invalid distance '{name}', expecting one of\"\n            f\" {list(_SCALAR_DISTANCE_FUNCTIONS.keys()) + AVAILABLE_VECTORIZED_DISTANCES}\"\n        )\n\n    return distance_function\n
    "},{"location":"reference/distances/#norfair.distances.create_keypoints_voting_distance","title":"create_keypoints_voting_distance(keypoint_distance_threshold, detection_threshold)","text":"

    Construct a keypoint voting distance function configured with the thresholds.

    Count how many points in a detection match the with a tracked_object. A match is considered when distance between the points is < keypoint_distance_threshold and the score of the last_detection of the tracked_object is > detection_threshold. Notice the if multiple points are tracked, the ith point in detection can only match the ith point in the tracked object.

    Distance is 1 if no point matches and approximates 0 as more points are matched.

    Parameters:

    Name Type Description Default keypoint_distance_threshold float

    Points closer than this threshold are considered a match.

    required detection_threshold float

    Detections and objects with score lower than this threshold are ignored.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_keypoints_voting_distance(\n    keypoint_distance_threshold: float, detection_threshold: float\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a keypoint voting distance function configured with the thresholds.\n\n    Count how many points in a detection match the with a tracked_object.\n    A match is considered when distance between the points is < `keypoint_distance_threshold`\n    and the score of the last_detection of the tracked_object is > `detection_threshold`.\n    Notice the if multiple points are tracked, the ith point in detection can only match the ith\n    point in the tracked object.\n\n    Distance is 1 if no point matches and approximates 0 as more points are matched.\n\n    Parameters\n    ----------\n    keypoint_distance_threshold: float\n        Points closer than this threshold are considered a match.\n    detection_threshold: float\n        Detections and objects with score lower than this threshold are ignored.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def keypoints_voting_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        distances = np.linalg.norm(detection.points - tracked_object.estimate, axis=1)\n        match_num = np.count_nonzero(\n            (distances < keypoint_distance_threshold)\n            * (detection.scores > detection_threshold)\n            * (tracked_object.last_detection.scores > detection_threshold)\n        )\n        return 1 / (1 + match_num)\n\n    return keypoints_voting_distance\n
    "},{"location":"reference/distances/#norfair.distances.create_normalized_mean_euclidean_distance","title":"create_normalized_mean_euclidean_distance(height, width)","text":"

    Construct a normalized mean euclidean distance function configured with the max height and width.

    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.

    Parameters:

    Name Type Description Default height int

    Height of the image.

    required width int

    Width of the image.

    required

    Returns:

    Type Description Callable

    The distance funtion that must be passed to the Tracker.

    Source code in norfair/distances.py
    def create_normalized_mean_euclidean_distance(\n    height: int, width: int\n) -> Callable[[\"Detection\", \"TrackedObject\"], float]:\n    \"\"\"\n    Construct a normalized mean euclidean distance function configured with the max height and width.\n\n    The result distance is bound to [0, 1] where 1 indicates oposite corners of the image.\n\n    Parameters\n    ----------\n    height: int\n        Height of the image.\n    width: int\n        Width of the image.\n\n    Returns\n    -------\n    Callable\n        The distance funtion that must be passed to the Tracker.\n    \"\"\"\n\n    def normalized__mean_euclidean_distance(\n        detection: \"Detection\", tracked_object: \"TrackedObject\"\n    ) -> float:\n        \"\"\"Normalized mean euclidean distance\"\"\"\n        # calculate distances and normalized it by width and height\n        difference = (detection.points - tracked_object.estimate).astype(float)\n        difference[:, 0] /= width\n        difference[:, 1] /= height\n\n        # calculate eucledean distance and average\n        return np.linalg.norm(difference, axis=1).mean()\n\n    return normalized__mean_euclidean_distance\n
    "},{"location":"reference/drawing/","title":"Drawing","text":"

    Collection of drawing functions

    "},{"location":"reference/drawing/#norfair.drawing.draw_points","title":"draw_points","text":""},{"location":"reference/drawing/#norfair.drawing.draw_points.draw_points","title":"draw_points(frame, drawables=None, radius=None, thickness=None, color='by_id', color_by_label=None, draw_labels=True, text_size=None, draw_ids=True, draw_points=True, text_thickness=None, text_color=None, hide_dead_points=True, detections=None, label_size=None, draw_scores=False)","text":"

    Draw the points included in a list of Detections or TrackedObjects.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on. Modified in place.

    required drawables Union[Sequence[Detection], Sequence[TrackedObject]]

    List of objects to draw, Detections and TrackedObjects are accepted.

    None radius Optional[int]

    Radius of the circles representing each point. By default a sensible value is picked considering the frame size.

    None thickness Optional[int]

    Thickness or width of the line.

    None color ColorLike

    This parameter can take:

    1. A color as a tuple of ints describing the BGR (0, 0, 255)
    2. A 6-digit hex string \"#FF0000\"
    3. One of the defined color names \"red\"
    4. A string defining the strategy to choose colors from the Palette:

      1. based on the id of the objects \"by_id\"
      2. based on the label of the objects \"by_label\"
      3. random choice \"random\"

    If using by_id or by_label strategy but your objects don't have that field defined (Detections never have ids) the selected color will be the same for all objects (Palette's default Color).

    'by_id' color_by_label bool

    Deprecated. set color=\"by_label\".

    None draw_labels bool

    If set to True, the label is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    True draw_scores bool

    If set to True, the score is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False text_size Optional[int]

    Size of the title, the value is used as a multiplier of the base size of the font. By default the size is scaled automatically based on the frame size.

    None draw_ids bool

    If set to True, the id is added to a title that is drawn on top of the box. If an object doesn't have an id this parameter is ignored.

    True draw_points bool

    Set to False to hide the points and just draw the text.

    True text_thickness Optional[int]

    Thickness of the font. By default it's scaled with the text_size.

    None text_color Optional[ColorLike]

    Color of the text. By default the same color as the box is used.

    None hide_dead_points bool

    Set this param to False to always draw all points, even the ones considered \"dead\". A point is \"dead\" when the corresponding value of TrackedObject.live_points is set to False. If all objects are dead the object is not drawn. All points of a detection are considered to be alive.

    True detections Sequence[Detection]

    Deprecated. use drawables.

    None label_size Optional[int]

    Deprecated. text_size.

    None

    Returns:

    Type Description ndarray

    The resulting frame.

    Source code in norfair/drawing/draw_points.py
    def draw_points(\n    frame: np.ndarray,\n    drawables: Union[Sequence[Detection], Sequence[TrackedObject]] = None,\n    radius: Optional[int] = None,\n    thickness: Optional[int] = None,\n    color: ColorLike = \"by_id\",\n    color_by_label: bool = None,  # deprecated\n    draw_labels: bool = True,\n    text_size: Optional[int] = None,\n    draw_ids: bool = True,\n    draw_points: bool = True,  # pylint: disable=redefined-outer-name\n    text_thickness: Optional[int] = None,\n    text_color: Optional[ColorLike] = None,\n    hide_dead_points: bool = True,\n    detections: Sequence[\"Detection\"] = None,  # deprecated\n    label_size: Optional[int] = None,  # deprecated\n    draw_scores: bool = False,\n) -> np.ndarray:\n    \"\"\"\n    Draw the points included in a list of Detections or TrackedObjects.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on. Modified in place.\n    drawables : Union[Sequence[Detection], Sequence[TrackedObject]], optional\n        List of objects to draw, Detections and TrackedObjects are accepted.\n    radius : Optional[int], optional\n        Radius of the circles representing each point.\n        By default a sensible value is picked considering the frame size.\n    thickness : Optional[int], optional\n        Thickness or width of the line.\n    color : ColorLike, optional\n        This parameter can take:\n\n        1. A color as a tuple of ints describing the BGR `(0, 0, 255)`\n        2. A 6-digit hex string `\"#FF0000\"`\n        3. One of the defined color names `\"red\"`\n        4. A string defining the strategy to choose colors from the Palette:\n\n            1. based on the id of the objects `\"by_id\"`\n            2. based on the label of the objects `\"by_label\"`\n            3. random choice `\"random\"`\n\n        If using `by_id` or `by_label` strategy but your objects don't\n        have that field defined (Detections never have ids) the\n        selected color will be the same for all objects (Palette's default Color).\n    color_by_label : bool, optional\n        **Deprecated**. set `color=\"by_label\"`.\n    draw_labels : bool, optional\n        If set to True, the label is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    draw_scores : bool, optional\n        If set to True, the score is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    text_size : Optional[int], optional\n        Size of the title, the value is used as a multiplier of the base size of the font.\n        By default the size is scaled automatically based on the frame size.\n    draw_ids : bool, optional\n        If set to True, the id is added to a title that is drawn on top of the box.\n        If an object doesn't have an id this parameter is ignored.\n    draw_points : bool, optional\n        Set to False to hide the points and just draw the text.\n    text_thickness : Optional[int], optional\n        Thickness of the font. By default it's scaled with the `text_size`.\n    text_color : Optional[ColorLike], optional\n        Color of the text. By default the same color as the box is used.\n    hide_dead_points : bool, optional\n        Set this param to False to always draw all points, even the ones considered \"dead\".\n        A point is \"dead\" when the corresponding value of `TrackedObject.live_points`\n        is set to False. If all objects are dead the object is not drawn.\n        All points of a detection are considered to be alive.\n    detections : Sequence[Detection], optional\n        **Deprecated**. use drawables.\n    label_size : Optional[int], optional\n        **Deprecated**. text_size.\n\n    Returns\n    -------\n    np.ndarray\n        The resulting frame.\n    \"\"\"\n    #\n    # handle deprecated parameters\n    #\n    if color_by_label is not None:\n        warn_once(\n            'Parameter \"color_by_label\" on function draw_points is deprecated, set `color=\"by_label\"` instead'\n        )\n        color = \"by_label\"\n    if detections is not None:\n        warn_once(\n            \"Parameter 'detections' on function draw_points is deprecated, use 'drawables' instead\"\n        )\n        drawables = detections\n    if label_size is not None:\n        warn_once(\n            \"Parameter 'label_size' on function draw_points is deprecated, use 'text_size' instead\"\n        )\n        text_size = label_size\n    # end\n\n    if drawables is None:\n        return\n\n    if text_color is not None:\n        text_color = parse_color(text_color)\n\n    if color is None:\n        color = \"by_id\"\n    if thickness is None:\n        thickness = -1\n    if radius is None:\n        radius = int(round(max(max(frame.shape) * 0.002, 1)))\n\n    for o in drawables:\n        if not isinstance(o, Drawable):\n            d = Drawable(o)\n        else:\n            d = o\n\n        if hide_dead_points and not d.live_points.any():\n            continue\n\n        if color == \"by_id\":\n            obj_color = Palette.choose_color(d.id)\n        elif color == \"by_label\":\n            obj_color = Palette.choose_color(d.label)\n        elif color == \"random\":\n            obj_color = Palette.choose_color(np.random.rand())\n        else:\n            obj_color = parse_color(color)\n\n        if text_color is None:\n            obj_text_color = obj_color\n        else:\n            obj_text_color = text_color\n\n        if draw_points:\n            for point, live in zip(d.points, d.live_points):\n                if live or not hide_dead_points:\n                    Drawer.circle(\n                        frame,\n                        tuple(point.astype(int)),\n                        radius=radius,\n                        color=obj_color,\n                        thickness=thickness,\n                    )\n\n        if draw_labels or draw_ids or draw_scores:\n            position = d.points[d.live_points].mean(axis=0)\n            position -= radius\n            text = _build_text(\n                d, draw_labels=draw_labels, draw_ids=draw_ids, draw_scores=draw_scores\n            )\n\n            Drawer.text(\n                frame,\n                text,\n                tuple(position.astype(int)),\n                size=text_size,\n                color=obj_text_color,\n                thickness=text_thickness,\n            )\n\n    return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_points.draw_tracked_objects","title":"draw_tracked_objects(frame, objects, radius=None, color=None, id_size=None, id_thickness=None, draw_points=True, color_by_label=False, draw_labels=False, label_size=None)","text":"

    Deprecated use draw_points

    Source code in norfair/drawing/draw_points.py
    def draw_tracked_objects(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    radius: Optional[int] = None,\n    color: Optional[ColorLike] = None,\n    id_size: Optional[float] = None,\n    id_thickness: Optional[int] = None,\n    draw_points: bool = True,  # pylint: disable=redefined-outer-name\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n):\n    \"\"\"\n    **Deprecated** use [`draw_points`][norfair.drawing.draw_points.draw_points]\n    \"\"\"\n    warn_once(\"draw_tracked_objects is deprecated, use draw_points instead\")\n\n    frame_scale = frame.shape[0] / 100\n    if radius is None:\n        radius = int(frame_scale * 0.5)\n    if id_size is None:\n        id_size = frame_scale / 10\n    if id_thickness is None:\n        id_thickness = int(frame_scale / 5)\n    if label_size is None:\n        label_size = int(max(frame_scale / 100, 1))\n\n    _draw_points_alias(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else color,\n        radius=radius,\n        thickness=None,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_points=draw_points,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness,\n        text_color=None,\n        hide_dead_points=True,\n    )\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_boxes","title":"draw_boxes","text":""},{"location":"reference/drawing/#norfair.drawing.draw_boxes.draw_boxes","title":"draw_boxes(frame, drawables=None, color='by_id', thickness=None, random_color=None, color_by_label=None, draw_labels=False, text_size=None, draw_ids=True, text_color=None, text_thickness=None, draw_box=True, detections=None, line_color=None, line_width=None, label_size=None, draw_scores=False)","text":"

    Draw bounding boxes corresponding to Detections or TrackedObjects.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on. Modified in place.

    required drawables Union[Sequence[Detection], Sequence[TrackedObject]]

    List of objects to draw, Detections and TrackedObjects are accepted. This objects are assumed to contain 2 bi-dimensional points defining the bounding box as [[x0, y0], [x1, y1]].

    None color ColorLike

    This parameter can take:

    1. A color as a tuple of ints describing the BGR (0, 0, 255)
    2. A 6-digit hex string \"#FF0000\"
    3. One of the defined color names \"red\"
    4. A string defining the strategy to choose colors from the Palette:

      1. based on the id of the objects \"by_id\"
      2. based on the label of the objects \"by_label\"
      3. random choice \"random\"

    If using by_id or by_label strategy but your objects don't have that field defined (Detections never have ids) the selected color will be the same for all objects (Palette's default Color).

    'by_id' thickness Optional[int]

    Thickness or width of the line.

    None random_color bool

    Deprecated. Set color=\"random\".

    None color_by_label bool

    Deprecated. Set color=\"by_label\".

    None draw_labels bool

    If set to True, the label is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False draw_scores bool

    If set to True, the score is added to a title that is drawn on top of the box. If an object doesn't have a label this parameter is ignored.

    False text_size Optional[float]

    Size of the title, the value is used as a multiplier of the base size of the font. By default the size is scaled automatically based on the frame size.

    None draw_ids bool

    If set to True, the id is added to a title that is drawn on top of the box. If an object doesn't have an id this parameter is ignored.

    True text_color Optional[ColorLike]

    Color of the text. By default the same color as the box is used.

    None text_thickness Optional[int]

    Thickness of the font. By default it's scaled with the text_size.

    None draw_box bool

    Set to False to hide the box and just draw the text.

    True detections Sequence[Detection]

    Deprecated. Use drawables.

    None line_color Optional[ColorLike]

    Deprecated. Use color.

    None line_width Optional[int]

    Deprecated. Use thickness.

    None label_size Optional[int]

    Deprecated. Use text_size.

    None

    Returns:

    Type Description ndarray

    The resulting frame.

    Source code in norfair/drawing/draw_boxes.py
    def draw_boxes(\n    frame: np.ndarray,\n    drawables: Union[Sequence[Detection], Sequence[TrackedObject]] = None,\n    color: ColorLike = \"by_id\",\n    thickness: Optional[int] = None,\n    random_color: bool = None,  # Deprecated\n    color_by_label: bool = None,  # Deprecated\n    draw_labels: bool = False,\n    text_size: Optional[float] = None,\n    draw_ids: bool = True,\n    text_color: Optional[ColorLike] = None,\n    text_thickness: Optional[int] = None,\n    draw_box: bool = True,\n    detections: Sequence[\"Detection\"] = None,  # Deprecated\n    line_color: Optional[ColorLike] = None,  # Deprecated\n    line_width: Optional[int] = None,  # Deprecated\n    label_size: Optional[int] = None,  # Deprecated\u00b4\n    draw_scores: bool = False,\n) -> np.ndarray:\n    \"\"\"\n    Draw bounding boxes corresponding to Detections or TrackedObjects.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on. Modified in place.\n    drawables : Union[Sequence[Detection], Sequence[TrackedObject]], optional\n        List of objects to draw, Detections and TrackedObjects are accepted.\n        This objects are assumed to contain 2 bi-dimensional points defining\n        the bounding box as `[[x0, y0], [x1, y1]]`.\n    color : ColorLike, optional\n        This parameter can take:\n\n        1. A color as a tuple of ints describing the BGR `(0, 0, 255)`\n        2. A 6-digit hex string `\"#FF0000\"`\n        3. One of the defined color names `\"red\"`\n        4. A string defining the strategy to choose colors from the Palette:\n\n            1. based on the id of the objects `\"by_id\"`\n            2. based on the label of the objects `\"by_label\"`\n            3. random choice `\"random\"`\n\n        If using `by_id` or `by_label` strategy but your objects don't\n        have that field defined (Detections never have ids) the\n        selected color will be the same for all objects (Palette's default Color).\n    thickness : Optional[int], optional\n        Thickness or width of the line.\n    random_color : bool, optional\n        **Deprecated**. Set color=\"random\".\n    color_by_label : bool, optional\n        **Deprecated**. Set color=\"by_label\".\n    draw_labels : bool, optional\n        If set to True, the label is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    draw_scores : bool, optional\n        If set to True, the score is added to a title that is drawn on top of the box.\n        If an object doesn't have a label this parameter is ignored.\n    text_size : Optional[float], optional\n        Size of the title, the value is used as a multiplier of the base size of the font.\n        By default the size is scaled automatically based on the frame size.\n    draw_ids : bool, optional\n        If set to True, the id is added to a title that is drawn on top of the box.\n        If an object doesn't have an id this parameter is ignored.\n    text_color : Optional[ColorLike], optional\n        Color of the text. By default the same color as the box is used.\n    text_thickness : Optional[int], optional\n        Thickness of the font. By default it's scaled with the `text_size`.\n    draw_box : bool, optional\n        Set to False to hide the box and just draw the text.\n    detections : Sequence[Detection], optional\n        **Deprecated**. Use drawables.\n    line_color: Optional[ColorLike], optional\n        **Deprecated**. Use color.\n    line_width: Optional[int], optional\n        **Deprecated**. Use thickness.\n    label_size: Optional[int], optional\n        **Deprecated**. Use text_size.\n\n    Returns\n    -------\n    np.ndarray\n        The resulting frame.\n    \"\"\"\n    #\n    # handle deprecated parameters\n    #\n    if random_color is not None:\n        warn_once(\n            'Parameter \"random_color\" is deprecated, set `color=\"random\"` instead'\n        )\n        color = \"random\"\n    if color_by_label is not None:\n        warn_once(\n            'Parameter \"color_by_label\" is deprecated, set `color=\"by_label\"` instead'\n        )\n        color = \"by_label\"\n    if detections is not None:\n        warn_once('Parameter \"detections\" is deprecated, use \"drawables\" instead')\n        drawables = detections\n    if line_color is not None:\n        warn_once('Parameter \"line_color\" is deprecated, use \"color\" instead')\n        color = line_color\n    if line_width is not None:\n        warn_once('Parameter \"line_width\" is deprecated, use \"thickness\" instead')\n        thickness = line_width\n    if label_size is not None:\n        warn_once('Parameter \"label_size\" is deprecated, use \"text_size\" instead')\n        text_size = label_size\n    # end\n\n    if color is None:\n        color = \"by_id\"\n    if thickness is None:\n        thickness = int(max(frame.shape) / 500)\n\n    if drawables is None:\n        return frame\n\n    if text_color is not None:\n        text_color = parse_color(text_color)\n\n    for obj in drawables:\n        if not isinstance(obj, Drawable):\n            d = Drawable(obj)\n        else:\n            d = obj\n\n        if color == \"by_id\":\n            obj_color = Palette.choose_color(d.id)\n        elif color == \"by_label\":\n            obj_color = Palette.choose_color(d.label)\n        elif color == \"random\":\n            obj_color = Palette.choose_color(np.random.rand())\n        else:\n            obj_color = parse_color(color)\n\n        points = d.points.astype(int)\n        if draw_box:\n            Drawer.rectangle(\n                frame,\n                tuple(points),\n                color=obj_color,\n                thickness=thickness,\n            )\n\n        text = _build_text(\n            d, draw_labels=draw_labels, draw_ids=draw_ids, draw_scores=draw_scores\n        )\n        if text:\n            if text_color is None:\n                obj_text_color = obj_color\n            else:\n                obj_text_color = text_color\n            # the anchor will become the bottom-left of the text,\n            # we select-top left of the bbox compensating for the thickness of the box\n            text_anchor = (\n                points[0, 0] - thickness // 2,\n                points[0, 1] - thickness // 2 - 1,\n            )\n            frame = Drawer.text(\n                frame,\n                text,\n                position=text_anchor,\n                size=text_size,\n                color=obj_text_color,\n                thickness=text_thickness,\n            )\n\n    return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.draw_boxes.draw_tracked_boxes","title":"draw_tracked_boxes(frame, objects, border_colors=None, border_width=None, id_size=None, id_thickness=None, draw_box=True, color_by_label=False, draw_labels=False, label_size=None, label_width=None)","text":"

    Deprecated. Use draw_box

    Source code in norfair/drawing/draw_boxes.py
    def draw_tracked_boxes(\n    frame: np.ndarray,\n    objects: Sequence[\"TrackedObject\"],\n    border_colors: Optional[Tuple[int, int, int]] = None,\n    border_width: Optional[int] = None,\n    id_size: Optional[int] = None,\n    id_thickness: Optional[int] = None,\n    draw_box: bool = True,\n    color_by_label: bool = False,\n    draw_labels: bool = False,\n    label_size: Optional[int] = None,\n    label_width: Optional[int] = None,\n) -> np.array:\n    \"**Deprecated**. Use [`draw_box`][norfair.drawing.draw_boxes.draw_boxes]\"\n    warn_once(\"draw_tracked_boxes is deprecated, use draw_box instead\")\n    return draw_boxes(\n        frame=frame,\n        drawables=objects,\n        color=\"by_label\" if color_by_label else border_colors,\n        thickness=border_width,\n        text_size=label_size or id_size,\n        text_thickness=id_thickness or label_width,\n        draw_labels=draw_labels,\n        draw_ids=id_size is not None and id_size > 0,\n        draw_box=draw_box,\n    )\n
    "},{"location":"reference/drawing/#norfair.drawing.color","title":"color","text":""},{"location":"reference/drawing/#norfair.drawing.color.Color","title":"Color","text":"

    Contains predefined colors.

    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR This is the format opencv uses.

    Source code in norfair/drawing/color.py
    class Color:\n    \"\"\"\n    Contains predefined colors.\n\n    Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR\n    This is the format opencv uses.\n    \"\"\"\n\n    # from PIL.ImageColors.colormap\n    aliceblue = hex_to_bgr(\"#f0f8ff\")\n    antiquewhite = hex_to_bgr(\"#faebd7\")\n    aqua = hex_to_bgr(\"#00ffff\")\n    aquamarine = hex_to_bgr(\"#7fffd4\")\n    azure = hex_to_bgr(\"#f0ffff\")\n    beige = hex_to_bgr(\"#f5f5dc\")\n    bisque = hex_to_bgr(\"#ffe4c4\")\n    black = hex_to_bgr(\"#000000\")\n    blanchedalmond = hex_to_bgr(\"#ffebcd\")\n    blue = hex_to_bgr(\"#0000ff\")\n    blueviolet = hex_to_bgr(\"#8a2be2\")\n    brown = hex_to_bgr(\"#a52a2a\")\n    burlywood = hex_to_bgr(\"#deb887\")\n    cadetblue = hex_to_bgr(\"#5f9ea0\")\n    chartreuse = hex_to_bgr(\"#7fff00\")\n    chocolate = hex_to_bgr(\"#d2691e\")\n    coral = hex_to_bgr(\"#ff7f50\")\n    cornflowerblue = hex_to_bgr(\"#6495ed\")\n    cornsilk = hex_to_bgr(\"#fff8dc\")\n    crimson = hex_to_bgr(\"#dc143c\")\n    cyan = hex_to_bgr(\"#00ffff\")\n    darkblue = hex_to_bgr(\"#00008b\")\n    darkcyan = hex_to_bgr(\"#008b8b\")\n    darkgoldenrod = hex_to_bgr(\"#b8860b\")\n    darkgray = hex_to_bgr(\"#a9a9a9\")\n    darkgrey = hex_to_bgr(\"#a9a9a9\")\n    darkgreen = hex_to_bgr(\"#006400\")\n    darkkhaki = hex_to_bgr(\"#bdb76b\")\n    darkmagenta = hex_to_bgr(\"#8b008b\")\n    darkolivegreen = hex_to_bgr(\"#556b2f\")\n    darkorange = hex_to_bgr(\"#ff8c00\")\n    darkorchid = hex_to_bgr(\"#9932cc\")\n    darkred = hex_to_bgr(\"#8b0000\")\n    darksalmon = hex_to_bgr(\"#e9967a\")\n    darkseagreen = hex_to_bgr(\"#8fbc8f\")\n    darkslateblue = hex_to_bgr(\"#483d8b\")\n    darkslategray = hex_to_bgr(\"#2f4f4f\")\n    darkslategrey = hex_to_bgr(\"#2f4f4f\")\n    darkturquoise = hex_to_bgr(\"#00ced1\")\n    darkviolet = hex_to_bgr(\"#9400d3\")\n    deeppink = hex_to_bgr(\"#ff1493\")\n    deepskyblue = hex_to_bgr(\"#00bfff\")\n    dimgray = hex_to_bgr(\"#696969\")\n    dimgrey = hex_to_bgr(\"#696969\")\n    dodgerblue = hex_to_bgr(\"#1e90ff\")\n    firebrick = hex_to_bgr(\"#b22222\")\n    floralwhite = hex_to_bgr(\"#fffaf0\")\n    forestgreen = hex_to_bgr(\"#228b22\")\n    fuchsia = hex_to_bgr(\"#ff00ff\")\n    gainsboro = hex_to_bgr(\"#dcdcdc\")\n    ghostwhite = hex_to_bgr(\"#f8f8ff\")\n    gold = hex_to_bgr(\"#ffd700\")\n    goldenrod = hex_to_bgr(\"#daa520\")\n    gray = hex_to_bgr(\"#808080\")\n    grey = hex_to_bgr(\"#808080\")\n    green = (0, 128, 0)\n    greenyellow = hex_to_bgr(\"#adff2f\")\n    honeydew = hex_to_bgr(\"#f0fff0\")\n    hotpink = hex_to_bgr(\"#ff69b4\")\n    indianred = hex_to_bgr(\"#cd5c5c\")\n    indigo = hex_to_bgr(\"#4b0082\")\n    ivory = hex_to_bgr(\"#fffff0\")\n    khaki = hex_to_bgr(\"#f0e68c\")\n    lavender = hex_to_bgr(\"#e6e6fa\")\n    lavenderblush = hex_to_bgr(\"#fff0f5\")\n    lawngreen = hex_to_bgr(\"#7cfc00\")\n    lemonchiffon = hex_to_bgr(\"#fffacd\")\n    lightblue = hex_to_bgr(\"#add8e6\")\n    lightcoral = hex_to_bgr(\"#f08080\")\n    lightcyan = hex_to_bgr(\"#e0ffff\")\n    lightgoldenrodyellow = hex_to_bgr(\"#fafad2\")\n    lightgreen = hex_to_bgr(\"#90ee90\")\n    lightgray = hex_to_bgr(\"#d3d3d3\")\n    lightgrey = hex_to_bgr(\"#d3d3d3\")\n    lightpink = hex_to_bgr(\"#ffb6c1\")\n    lightsalmon = hex_to_bgr(\"#ffa07a\")\n    lightseagreen = hex_to_bgr(\"#20b2aa\")\n    lightskyblue = hex_to_bgr(\"#87cefa\")\n    lightslategray = hex_to_bgr(\"#778899\")\n    lightslategrey = hex_to_bgr(\"#778899\")\n    lightsteelblue = hex_to_bgr(\"#b0c4de\")\n    lightyellow = hex_to_bgr(\"#ffffe0\")\n    lime = hex_to_bgr(\"#00ff00\")\n    limegreen = hex_to_bgr(\"#32cd32\")\n    linen = hex_to_bgr(\"#faf0e6\")\n    magenta = hex_to_bgr(\"#ff00ff\")\n    maroon = hex_to_bgr(\"#800000\")\n    mediumaquamarine = hex_to_bgr(\"#66cdaa\")\n    mediumblue = hex_to_bgr(\"#0000cd\")\n    mediumorchid = hex_to_bgr(\"#ba55d3\")\n    mediumpurple = hex_to_bgr(\"#9370db\")\n    mediumseagreen = hex_to_bgr(\"#3cb371\")\n    mediumslateblue = hex_to_bgr(\"#7b68ee\")\n    mediumspringgreen = hex_to_bgr(\"#00fa9a\")\n    mediumturquoise = hex_to_bgr(\"#48d1cc\")\n    mediumvioletred = hex_to_bgr(\"#c71585\")\n    midnightblue = hex_to_bgr(\"#191970\")\n    mintcream = hex_to_bgr(\"#f5fffa\")\n    mistyrose = hex_to_bgr(\"#ffe4e1\")\n    moccasin = hex_to_bgr(\"#ffe4b5\")\n    navajowhite = hex_to_bgr(\"#ffdead\")\n    navy = hex_to_bgr(\"#000080\")\n    oldlace = hex_to_bgr(\"#fdf5e6\")\n    olive = hex_to_bgr(\"#808000\")\n    olivedrab = hex_to_bgr(\"#6b8e23\")\n    orange = hex_to_bgr(\"#ffa500\")\n    orangered = hex_to_bgr(\"#ff4500\")\n    orchid = hex_to_bgr(\"#da70d6\")\n    palegoldenrod = hex_to_bgr(\"#eee8aa\")\n    palegreen = hex_to_bgr(\"#98fb98\")\n    paleturquoise = hex_to_bgr(\"#afeeee\")\n    palevioletred = hex_to_bgr(\"#db7093\")\n    papayawhip = hex_to_bgr(\"#ffefd5\")\n    peachpuff = hex_to_bgr(\"#ffdab9\")\n    peru = hex_to_bgr(\"#cd853f\")\n    pink = hex_to_bgr(\"#ffc0cb\")\n    plum = hex_to_bgr(\"#dda0dd\")\n    powderblue = hex_to_bgr(\"#b0e0e6\")\n    purple = hex_to_bgr(\"#800080\")\n    rebeccapurple = hex_to_bgr(\"#663399\")\n    red = hex_to_bgr(\"#ff0000\")\n    rosybrown = hex_to_bgr(\"#bc8f8f\")\n    royalblue = hex_to_bgr(\"#4169e1\")\n    saddlebrown = hex_to_bgr(\"#8b4513\")\n    salmon = hex_to_bgr(\"#fa8072\")\n    sandybrown = hex_to_bgr(\"#f4a460\")\n    seagreen = hex_to_bgr(\"#2e8b57\")\n    seashell = hex_to_bgr(\"#fff5ee\")\n    sienna = hex_to_bgr(\"#a0522d\")\n    silver = hex_to_bgr(\"#c0c0c0\")\n    skyblue = hex_to_bgr(\"#87ceeb\")\n    slateblue = hex_to_bgr(\"#6a5acd\")\n    slategray = hex_to_bgr(\"#708090\")\n    slategrey = hex_to_bgr(\"#708090\")\n    snow = hex_to_bgr(\"#fffafa\")\n    springgreen = hex_to_bgr(\"#00ff7f\")\n    steelblue = hex_to_bgr(\"#4682b4\")\n    tan = hex_to_bgr(\"#d2b48c\")\n    teal = hex_to_bgr(\"#008080\")\n    thistle = hex_to_bgr(\"#d8bfd8\")\n    tomato = hex_to_bgr(\"#ff6347\")\n    turquoise = hex_to_bgr(\"#40e0d0\")\n    violet = hex_to_bgr(\"#ee82ee\")\n    wheat = hex_to_bgr(\"#f5deb3\")\n    white = hex_to_bgr(\"#ffffff\")\n    whitesmoke = hex_to_bgr(\"#f5f5f5\")\n    yellow = hex_to_bgr(\"#ffff00\")\n    yellowgreen = hex_to_bgr(\"#9acd32\")\n\n    # seaborn tab20 colors\n    tab1 = hex_to_bgr(\"#1f77b4\")\n    tab2 = hex_to_bgr(\"#aec7e8\")\n    tab3 = hex_to_bgr(\"#ff7f0e\")\n    tab4 = hex_to_bgr(\"#ffbb78\")\n    tab5 = hex_to_bgr(\"#2ca02c\")\n    tab6 = hex_to_bgr(\"#98df8a\")\n    tab7 = hex_to_bgr(\"#d62728\")\n    tab8 = hex_to_bgr(\"#ff9896\")\n    tab9 = hex_to_bgr(\"#9467bd\")\n    tab10 = hex_to_bgr(\"#c5b0d5\")\n    tab11 = hex_to_bgr(\"#8c564b\")\n    tab12 = hex_to_bgr(\"#c49c94\")\n    tab13 = hex_to_bgr(\"#e377c2\")\n    tab14 = hex_to_bgr(\"#f7b6d2\")\n    tab15 = hex_to_bgr(\"#7f7f7f\")\n    tab16 = hex_to_bgr(\"#c7c7c7\")\n    tab17 = hex_to_bgr(\"#bcbd22\")\n    tab18 = hex_to_bgr(\"#dbdb8d\")\n    tab19 = hex_to_bgr(\"#17becf\")\n    tab20 = hex_to_bgr(\"#9edae5\")\n    # seaborn colorblind\n    cb1 = hex_to_bgr(\"#0173b2\")\n    cb2 = hex_to_bgr(\"#de8f05\")\n    cb3 = hex_to_bgr(\"#029e73\")\n    cb4 = hex_to_bgr(\"#d55e00\")\n    cb5 = hex_to_bgr(\"#cc78bc\")\n    cb6 = hex_to_bgr(\"#ca9161\")\n    cb7 = hex_to_bgr(\"#fbafe4\")\n    cb8 = hex_to_bgr(\"#949494\")\n    cb9 = hex_to_bgr(\"#ece133\")\n    cb10 = hex_to_bgr(\"#56b4e9\")\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette","title":"Palette","text":"

    Class to control the color pallete for drawing.

    Examples:

    Change palette:

    >>> from norfair import Palette\n>>> Palette.set(\"colorblind\")\n>>> # or a custom palette\n>>> from norfair import Color\n>>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n
    Source code in norfair/drawing/color.py
    class Palette:\n    \"\"\"\n    Class to control the color pallete for drawing.\n\n    Examples\n    --------\n    Change palette:\n    >>> from norfair import Palette\n    >>> Palette.set(\"colorblind\")\n    >>> # or a custom palette\n    >>> from norfair import Color\n    >>> Palette.set([Color.red, Color.blue, \"#ffeeff\"])\n    \"\"\"\n\n    _colors = PALETTES[\"tab10\"]\n    _default_color = Color.black\n\n    @classmethod\n    def set(cls, palette: Union[str, Iterable[ColorLike]]):\n        \"\"\"\n        Selects a color palette.\n\n        Parameters\n        ----------\n        palette : Union[str, Iterable[ColorLike]]\n            can be either\n            - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n            - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n        \"\"\"\n        if isinstance(palette, str):\n            try:\n                cls._colors = PALETTES[palette]\n            except KeyError as e:\n                raise ValueError(\n                    f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n                ) from e\n        else:\n            colors = []\n            for c in palette:\n                colors.append(parse_color(c))\n\n            cls._colors = colors\n\n    @classmethod\n    def set_default_color(cls, color: ColorLike):\n        \"\"\"\n        Selects the default color of `choose_color` when hashable is None.\n\n        Parameters\n        ----------\n        color : ColorLike\n            The new default color.\n        \"\"\"\n        cls._default_color = parse_color(color)\n\n    @classmethod\n    def choose_color(cls, hashable: Hashable) -> ColorType:\n        if hashable is None:\n            return cls._default_color\n        return cls._colors[abs(hash(hashable)) % len(cls._colors)]\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette.set","title":"set(palette) classmethod","text":"

    Selects a color palette.

    Parameters:

    Name Type Description Default palette Union[str, Iterable[ColorLike]]

    can be either - the name of one of the predefined palettes tab10, tab20, or colorblind - a list of ColorLike objects that can be parsed by parse_color

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set(cls, palette: Union[str, Iterable[ColorLike]]):\n    \"\"\"\n    Selects a color palette.\n\n    Parameters\n    ----------\n    palette : Union[str, Iterable[ColorLike]]\n        can be either\n        - the name of one of the predefined palettes `tab10`, `tab20`, or `colorblind`\n        - a list of ColorLike objects that can be parsed by [`parse_color`][norfair.drawing.color.parse_color]\n    \"\"\"\n    if isinstance(palette, str):\n        try:\n            cls._colors = PALETTES[palette]\n        except KeyError as e:\n            raise ValueError(\n                f\"Invalid palette name '{palette}', valid values are {PALETTES.keys()}\"\n            ) from e\n    else:\n        colors = []\n        for c in palette:\n            colors.append(parse_color(c))\n\n        cls._colors = colors\n
    "},{"location":"reference/drawing/#norfair.drawing.color.Palette.set_default_color","title":"set_default_color(color) classmethod","text":"

    Selects the default color of choose_color when hashable is None.

    Parameters:

    Name Type Description Default color ColorLike

    The new default color.

    required Source code in norfair/drawing/color.py
    @classmethod\ndef set_default_color(cls, color: ColorLike):\n    \"\"\"\n    Selects the default color of `choose_color` when hashable is None.\n\n    Parameters\n    ----------\n    color : ColorLike\n        The new default color.\n    \"\"\"\n    cls._default_color = parse_color(color)\n
    "},{"location":"reference/drawing/#norfair.drawing.color.hex_to_bgr","title":"hex_to_bgr(hex_value)","text":"

    Converts conventional 6 digits hex colors to BGR tuples

    Parameters:

    Name Type Description Default hex_value str

    hex value with leading # for instance \"#ff0000\"

    required

    Returns:

    Type Description Tuple[int, int, int]

    BGR values

    Raises:

    Type Description ValueError

    if the string is invalid

    Source code in norfair/drawing/color.py
    def hex_to_bgr(hex_value: str) -> ColorType:\n    \"\"\"Converts conventional 6 digits hex colors to BGR tuples\n\n    Parameters\n    ----------\n    hex_value : str\n        hex value with leading `#` for instance `\"#ff0000\"`\n\n    Returns\n    -------\n    Tuple[int, int, int]\n        BGR values\n\n    Raises\n    ------\n    ValueError\n        if the string is invalid\n    \"\"\"\n    if re.match(\"#[a-f0-9]{6}$\", hex_value):\n        return (\n            int(hex_value[5:7], 16),\n            int(hex_value[3:5], 16),\n            int(hex_value[1:3], 16),\n        )\n\n    if re.match(\"#[a-f0-9]{3}$\", hex_value):\n        return (\n            int(hex_value[3] * 2, 16),\n            int(hex_value[2] * 2, 16),\n            int(hex_value[1] * 2, 16),\n        )\n    raise ValueError(f\"'{hex_value}' is not a valid color\")\n
    "},{"location":"reference/drawing/#norfair.drawing.color.parse_color","title":"parse_color(color_like)","text":"

    Makes best effort to parse the given value to a Color

    Parameters:

    Name Type Description Default color_like ColorLike

    Can be one of:

    1. a string with the 6 digits hex value (\"#ff0000\")
    2. a string with one of the names defined in Colors (\"red\")
    3. a BGR tuple ((0, 0, 255))
    required

    Returns:

    Type Description Color

    The BGR tuple.

    Source code in norfair/drawing/color.py
    def parse_color(color_like: ColorLike) -> ColorType:\n    \"\"\"Makes best effort to parse the given value to a Color\n\n    Parameters\n    ----------\n    color_like : ColorLike\n        Can be one of:\n\n        1. a string with the 6 digits hex value (`\"#ff0000\"`)\n        2. a string with one of the names defined in Colors (`\"red\"`)\n        3. a BGR tuple (`(0, 0, 255)`)\n\n    Returns\n    -------\n    Color\n        The BGR tuple.\n    \"\"\"\n    if isinstance(color_like, str):\n        if color_like.startswith(\"#\"):\n            return hex_to_bgr(color_like)\n        else:\n            return getattr(Color, color_like)\n    # TODO: validate?\n    return tuple([int(v) for v in color_like])\n
    "},{"location":"reference/drawing/#norfair.drawing.path","title":"path","text":""},{"location":"reference/drawing/#norfair.drawing.path.Paths","title":"Paths","text":"

    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None attenuation float

    A float number in [0, 1] that dictates the speed at which the path is erased. if it is 0 then the path is never erased.

    0.01

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class Paths:\n    \"\"\"\n    Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    attenuation : float, optional\n        A float number in [0, 1] that dictates the speed at which the path is erased.\n        if it is `0` then the path is never erased.\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        attenuation: float = 0.01,\n    ):\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.mask = None\n        self.attenuation_factor = 1 - attenuation\n\n    def draw(\n        self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n    ) -> np.array:\n        \"\"\"\n        Draw the paths of the points interest on a frame.\n\n        !!! warning\n            This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to draw on.\n        tracked_objects : Sequence[TrackedObject]\n            List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n        Returns\n        -------\n        np.array\n            The resulting frame.\n        \"\"\"\n        if self.mask is None:\n            frame_scale = frame.shape[0] / 100\n\n            if self.radius is None:\n                self.radius = int(max(frame_scale * 0.7, 1))\n            if self.thickness is None:\n                self.thickness = int(max(frame_scale / 7, 1))\n\n            self.mask = np.zeros(frame.shape, np.uint8)\n\n        self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n        for obj in tracked_objects:\n            if obj.abs_to_rel is not None:\n                warn_once(\n                    \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n                )\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.estimate)\n\n            for point in points_to_draw:\n                self.mask = Drawer.circle(\n                    self.mask,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n        return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/drawing/#norfair.drawing.path.Paths.draw","title":"draw(frame, tracked_objects)","text":"

    Draw the paths of the points interest on a frame.

    Warning

    This method does not draw frames in place as other drawers do, the resulting frame is returned.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required tracked_objects Sequence[TrackedObject]

    List of TrackedObject to get the points of interest in order to update the paths.

    required

    Returns:

    Type Description array

    The resulting frame.

    Source code in norfair/drawing/path.py
    def draw(\n    self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject]\n) -> np.array:\n    \"\"\"\n    Draw the paths of the points interest on a frame.\n\n    !!! warning\n        This method does **not** draw frames in place as other drawers do, the resulting frame is returned.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    tracked_objects : Sequence[TrackedObject]\n        List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths.\n\n    Returns\n    -------\n    np.array\n        The resulting frame.\n    \"\"\"\n    if self.mask is None:\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n\n        self.mask = np.zeros(frame.shape, np.uint8)\n\n    self.mask = (self.mask * self.attenuation_factor).astype(\"uint8\")\n\n    for obj in tracked_objects:\n        if obj.abs_to_rel is not None:\n            warn_once(\n                \"It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected\"\n            )\n\n        if self.color is None:\n            color = Palette.choose_color(obj.id)\n        else:\n            color = self.color\n\n        points_to_draw = self.get_points_to_draw(obj.estimate)\n\n        for point in points_to_draw:\n            self.mask = Drawer.circle(\n                self.mask,\n                position=tuple(point.astype(int)),\n                radius=self.radius,\n                color=color,\n                thickness=self.thickness,\n            )\n\n    return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1)\n
    "},{"location":"reference/drawing/#norfair.drawing.path.AbsolutePaths","title":"AbsolutePaths","text":"

    Class that draws the absolute paths taken by a set of points.

    Works just like Paths but supports camera motion.

    Warning

    This drawer is not optimized so it can be stremely slow. Performance degrades linearly with max_history * number_of_tracked_objects.

    Parameters:

    Name Type Description Default get_points_to_draw Optional[Callable[[array], array]]

    Function that takes a list of points (the .estimate attribute of a TrackedObject) and returns a list of points for which we want to draw their paths.

    By default it is the mean point of all the points in the tracker.

    None thickness Optional[int]

    Thickness of the circles representing the paths of interest.

    None color Optional[Tuple[int, int, int]]

    Color of the circles representing the paths of interest.

    None radius Optional[int]

    Radius of the circles representing the paths of interest.

    None max_history int

    Number of past points to include in the path. High values make the drawing slower

    20

    Examples:

    >>> from norfair import Tracker, Video, Path\n>>> video = Video(\"video.mp4\")\n>>> tracker = Tracker(...)\n>>> path_drawer = Path()\n>>> for frame in video:\n>>>    detections = get_detections(frame)  # runs detector and returns Detections\n>>>    tracked_objects = tracker.update(detections)\n>>>    frame = path_drawer.draw(frame, tracked_objects)\n>>>    video.write(frame)\n
    Source code in norfair/drawing/path.py
    class AbsolutePaths:\n    \"\"\"\n    Class that draws the absolute paths taken by a set of points.\n\n    Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion.\n\n    !!! warning\n        This drawer is not optimized so it can be stremely slow. Performance degrades linearly with\n        `max_history * number_of_tracked_objects`.\n\n    Parameters\n    ----------\n    get_points_to_draw : Optional[Callable[[np.array], np.array]], optional\n        Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject])\n        and returns a list of points for which we want to draw their paths.\n\n        By default it is the mean point of all the points in the tracker.\n    thickness : Optional[int], optional\n        Thickness of the circles representing the paths of interest.\n    color : Optional[Tuple[int, int, int]], optional\n        [Color][norfair.drawing.Color] of the circles representing the paths of interest.\n    radius : Optional[int], optional\n        Radius of the circles representing the paths of interest.\n    max_history : int, optional\n        Number of past points to include in the path. High values make the drawing slower\n\n    Examples\n    --------\n    >>> from norfair import Tracker, Video, Path\n    >>> video = Video(\"video.mp4\")\n    >>> tracker = Tracker(...)\n    >>> path_drawer = Path()\n    >>> for frame in video:\n    >>>    detections = get_detections(frame)  # runs detector and returns Detections\n    >>>    tracked_objects = tracker.update(detections)\n    >>>    frame = path_drawer.draw(frame, tracked_objects)\n    >>>    video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        get_points_to_draw: Optional[Callable[[np.array], np.array]] = None,\n        thickness: Optional[int] = None,\n        color: Optional[Tuple[int, int, int]] = None,\n        radius: Optional[int] = None,\n        max_history=20,\n    ):\n\n        if get_points_to_draw is None:\n\n            def get_points_to_draw(points):\n                return [np.mean(np.array(points), axis=0)]\n\n        self.get_points_to_draw = get_points_to_draw\n\n        self.radius = radius\n        self.thickness = thickness\n        self.color = color\n        self.past_points = defaultdict(lambda: [])\n        self.max_history = max_history\n        self.alphas = np.linspace(0.99, 0.01, max_history)\n\n    def draw(self, frame, tracked_objects, coord_transform=None):\n        frame_scale = frame.shape[0] / 100\n\n        if self.radius is None:\n            self.radius = int(max(frame_scale * 0.7, 1))\n        if self.thickness is None:\n            self.thickness = int(max(frame_scale / 7, 1))\n        for obj in tracked_objects:\n            if not obj.live_points.any():\n                continue\n\n            if self.color is None:\n                color = Palette.choose_color(obj.id)\n            else:\n                color = self.color\n\n            points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True))\n\n            for point in coord_transform.abs_to_rel(points_to_draw):\n                Drawer.circle(\n                    frame,\n                    position=tuple(point.astype(int)),\n                    radius=self.radius,\n                    color=color,\n                    thickness=self.thickness,\n                )\n\n            last = points_to_draw\n            for i, past_points in enumerate(self.past_points[obj.id]):\n                overlay = frame.copy()\n                last = coord_transform.abs_to_rel(last)\n                for j, point in enumerate(coord_transform.abs_to_rel(past_points)):\n                    Drawer.line(\n                        overlay,\n                        tuple(last[j].astype(int)),\n                        tuple(point.astype(int)),\n                        color=color,\n                        thickness=self.thickness,\n                    )\n                last = past_points\n\n                alpha = self.alphas[i]\n                frame = Drawer.alpha_blend(overlay, frame, alpha=alpha)\n            self.past_points[obj.id].insert(0, points_to_draw)\n            self.past_points[obj.id] = self.past_points[obj.id][: self.max_history]\n        return frame\n
    "},{"location":"reference/drawing/#norfair.drawing.fixed_camera","title":"fixed_camera","text":""},{"location":"reference/drawing/#norfair.drawing.fixed_camera.FixedCamera","title":"FixedCamera","text":"

    Class used to stabilize video based on the camera motion.

    Starts with a larger frame, where the original frame is drawn on top of a black background. As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.

    Useful for debugging or demoing the camera motion.

    Warning

    This only works with TranslationTransformation, using HomographyTransformation will result in unexpected behaviour.

    Warning

    If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.

    Note

    Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. In this case, a warning will be logged and the frames will be cropped to avoid errors.

    Parameters:

    Name Type Description Default scale float

    The resulting video will have a resolution of scale * (H, W) where HxW is the resolution of the original video. Use a bigger scale if the camera is moving too much.

    2 attenuation float

    Controls how fast the older frames fade to black.

    0.05

    Examples:

    >>> # setup\n>>> tracker = Tracker(\"frobenious\", 100)\n>>> motion_estimator = MotionEstimator()\n>>> video = Video(input_path=\"video.mp4\")\n>>> fixed_camera = FixedCamera()\n>>> # process video\n>>> for frame in video:\n>>>     coord_transformations = motion_estimator.update(frame)\n>>>     detections = get_detections(frame)\n>>>     tracked_objects = tracker.update(detections, coord_transformations)\n>>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n>>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n>>>     video.write(bigger_frame)\n
    Source code in norfair/drawing/fixed_camera.py
    class FixedCamera:\n    \"\"\"\n    Class used to stabilize video based on the camera motion.\n\n    Starts with a larger frame, where the original frame is drawn on top of a black background.\n    As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it.\n\n    Useful for debugging or demoing the camera motion.\n    ![Example GIF](../../videos/camera_stabilization.gif)\n\n    !!! Warning\n        This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation],\n        using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in\n        unexpected behaviour.\n\n    !!! Warning\n        If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected.\n\n    !!! Note\n        Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame.\n        In this case, a warning will be logged and the frames will be cropped to avoid errors.\n\n    Parameters\n    ----------\n    scale : float, optional\n        The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video.\n        Use a bigger scale if the camera is moving too much.\n    attenuation : float, optional\n        Controls how fast the older frames fade to black.\n\n    Examples\n    --------\n    >>> # setup\n    >>> tracker = Tracker(\"frobenious\", 100)\n    >>> motion_estimator = MotionEstimator()\n    >>> video = Video(input_path=\"video.mp4\")\n    >>> fixed_camera = FixedCamera()\n    >>> # process video\n    >>> for frame in video:\n    >>>     coord_transformations = motion_estimator.update(frame)\n    >>>     detections = get_detections(frame)\n    >>>     tracked_objects = tracker.update(detections, coord_transformations)\n    >>>     draw_tracked_objects(frame, tracked_objects)  # fixed_camera should always be the last drawer\n    >>>     bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations)\n    >>>     video.write(bigger_frame)\n    \"\"\"\n\n    def __init__(self, scale: float = 2, attenuation: float = 0.05):\n        self.scale = scale\n        self._background = None\n        self._attenuation_factor = 1 - attenuation\n\n    def adjust_frame(\n        self, frame: np.ndarray, coord_transformation: TranslationTransformation\n    ) -> np.ndarray:\n        \"\"\"\n        Render scaled up frame.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame.\n        coord_transformation : TranslationTransformation\n            The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n        Returns\n        -------\n        np.ndarray\n            The new bigger frame with the original frame drawn on it.\n        \"\"\"\n\n        # initialize background if necessary\n        if self._background is None:\n            original_size = (\n                frame.shape[1],\n                frame.shape[0],\n            )  # OpenCV format is (width, height)\n\n            scaled_size = tuple(\n                (np.array(original_size) * np.array(self.scale)).round().astype(int)\n            )\n            self._background = np.zeros(\n                [scaled_size[1], scaled_size[0], frame.shape[-1]],\n                frame.dtype,\n            )\n        else:\n            self._background = (self._background * self._attenuation_factor).astype(\n                frame.dtype\n            )\n\n        # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n        # aim to draw it in the center of the background but transformations will move this point\n        top_left = (\n            np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n        )\n        top_left = (\n            coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n        )\n        # box of the background that will be updated and the limits of it\n        background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n        background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n        background_size_y, background_size_x = self._background.shape[:2]\n\n        # define box of the frame that will be used\n        # if the scale is not enough to support the movement, warn the user but keep drawing\n        # cropping the frame so that the operation doesn't fail\n        frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n        if (\n            background_y0 < 0\n            or background_x0 < 0\n            or background_y1 > background_size_y\n            or background_x1 > background_size_x\n        ):\n            warn_once(\n                \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n            )\n            # crop left or top of the frame if necessary\n            frame_y0 = max(-background_y0, 0)\n            frame_x0 = max(-background_x0, 0)\n            # crop right or bottom of the frame if necessary\n            frame_y1 = max(\n                min(background_size_y - background_y0, background_y1 - background_y0), 0\n            )\n            frame_x1 = max(\n                min(background_size_x - background_x0, background_x1 - background_x0), 0\n            )\n            # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n            background_y0 = max(background_y0, 0)\n            background_x0 = max(background_x0, 0)\n            background_y1 = max(background_y1, 0)\n            background_x1 = max(background_x1, 0)\n        self._background[\n            background_y0:background_y1, background_x0:background_x1, :\n        ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n        return self._background\n
    "},{"location":"reference/drawing/#norfair.drawing.fixed_camera.FixedCamera.adjust_frame","title":"adjust_frame(frame, coord_transformation)","text":"

    Render scaled up frame.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame.

    required coord_transformation TranslationTransformation

    The coordinate transformation as returned by the MotionEstimator

    required

    Returns:

    Type Description ndarray

    The new bigger frame with the original frame drawn on it.

    Source code in norfair/drawing/fixed_camera.py
    def adjust_frame(\n    self, frame: np.ndarray, coord_transformation: TranslationTransformation\n) -> np.ndarray:\n    \"\"\"\n    Render scaled up frame.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame.\n    coord_transformation : TranslationTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n\n    Returns\n    -------\n    np.ndarray\n        The new bigger frame with the original frame drawn on it.\n    \"\"\"\n\n    # initialize background if necessary\n    if self._background is None:\n        original_size = (\n            frame.shape[1],\n            frame.shape[0],\n        )  # OpenCV format is (width, height)\n\n        scaled_size = tuple(\n            (np.array(original_size) * np.array(self.scale)).round().astype(int)\n        )\n        self._background = np.zeros(\n            [scaled_size[1], scaled_size[0], frame.shape[-1]],\n            frame.dtype,\n        )\n    else:\n        self._background = (self._background * self._attenuation_factor).astype(\n            frame.dtype\n        )\n\n    # top_left is the anchor coordinate from where we start drawing the fame on top of the background\n    # aim to draw it in the center of the background but transformations will move this point\n    top_left = (\n        np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2\n    )\n    top_left = (\n        coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1]\n    )\n    # box of the background that will be updated and the limits of it\n    background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0])\n    background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1])\n    background_size_y, background_size_x = self._background.shape[:2]\n\n    # define box of the frame that will be used\n    # if the scale is not enough to support the movement, warn the user but keep drawing\n    # cropping the frame so that the operation doesn't fail\n    frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1])\n    if (\n        background_y0 < 0\n        or background_x0 < 0\n        or background_y1 > background_size_y\n        or background_x1 > background_size_x\n    ):\n        warn_once(\n            \"moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped\"\n        )\n        # crop left or top of the frame if necessary\n        frame_y0 = max(-background_y0, 0)\n        frame_x0 = max(-background_x0, 0)\n        # crop right or bottom of the frame if necessary\n        frame_y1 = max(\n            min(background_size_y - background_y0, background_y1 - background_y0), 0\n        )\n        frame_x1 = max(\n            min(background_size_x - background_x0, background_x1 - background_x0), 0\n        )\n        # handle cases where the limits of the background become negative which numpy will interpret incorrectly\n        background_y0 = max(background_y0, 0)\n        background_x0 = max(background_x0, 0)\n        background_y1 = max(background_y1, 0)\n        background_x1 = max(background_x1, 0)\n    self._background[\n        background_y0:background_y1, background_x0:background_x1, :\n    ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :]\n    return self._background\n
    "},{"location":"reference/drawing/#norfair.drawing.absolute_grid","title":"absolute_grid","text":""},{"location":"reference/drawing/#norfair.drawing.absolute_grid.draw_absolute_grid","title":"draw_absolute_grid(frame, coord_transformations, grid_size=20, radius=2, thickness=1, color=Color.black, polar=False)","text":"

    Draw a grid of points in absolute coordinates.

    Useful for debugging camera motion.

    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection of latitude and longitude lines over the surface of the sphere.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to draw on.

    required coord_transformations CoordinatesTransformation

    The coordinate transformation as returned by the MotionEstimator

    required grid_size int

    How many points to draw.

    20 radius int

    Size of each point.

    2 thickness int

    Thickness of each point

    1 color ColorType

    Color of the points.

    black polar Bool

    If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). By default, False is used which means the points are drawn as if the camera were pointing to the Equator.

    False Source code in norfair/drawing/absolute_grid.py
    def draw_absolute_grid(\n    frame: np.ndarray,\n    coord_transformations: CoordinatesTransformation,\n    grid_size: int = 20,\n    radius: int = 2,\n    thickness: int = 1,\n    color: ColorType = Color.black,\n    polar: bool = False,\n):\n    \"\"\"\n    Draw a grid of points in absolute coordinates.\n\n    Useful for debugging camera motion.\n\n    The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection\n    of latitude and longitude lines over the surface of the sphere.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to draw on.\n    coord_transformations : CoordinatesTransformation\n        The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator]\n    grid_size : int, optional\n        How many points to draw.\n    radius : int, optional\n        Size of each point.\n    thickness : int, optional\n        Thickness of each point\n    color : ColorType, optional\n        Color of the points.\n    polar : Bool, optional\n        If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth).\n        By default, False is used which means the points are drawn as if the camera were pointing to the Equator.\n    \"\"\"\n    h, w, _ = frame.shape\n\n    # get absolute points grid\n    points = _get_grid(grid_size, w, h, polar=polar)\n\n    # transform the points to relative coordinates\n    if coord_transformations is None:\n        points_transformed = points\n    else:\n        points_transformed = coord_transformations.abs_to_rel(points)\n\n    # filter points that are not visible\n    visible_points = points_transformed[\n        (points_transformed <= np.array([w, h])).all(axis=1)\n        & (points_transformed >= 0).all(axis=1)\n    ]\n    for point in visible_points:\n        Drawer.cross(\n            frame, point.astype(int), radius=radius, thickness=thickness, color=color\n        )\n
    "},{"location":"reference/filter/","title":"Filter","text":""},{"location":"reference/filter/#norfair.filter.FilterPyKalmanFilterFactory","title":"FilterPyKalmanFilterFactory","text":"

    Bases: FilterFactory

    This class can be used either to change some parameters of the KalmanFilter that the tracker uses, or to fully customize the predictive filter implementation to use (as long as the methods and properties are compatible).

    The former case only requires changing the default parameters upon tracker creation: tracker = Tracker(..., filter_factory=FilterPyKalmanFilterFactory(R=100)), while the latter requires creating your own class extending FilterPyKalmanFilterFactory, and rewriting its create_filter method to return your own customized filter.

    Parameters:

    Name Type Description Default R float

    Multiplier for the sensor measurement noise matrix, by default 4.0

    4.0 Q float

    Multiplier for the process uncertainty, by default 0.1

    0.1 P float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables, by default 10.0

    10.0 See Also

    filterpy.KalmanFilter.

    Source code in norfair/filter.py
    class FilterPyKalmanFilterFactory(FilterFactory):\n    \"\"\"\n    This class can be used either to change some parameters of the [KalmanFilter](https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html)\n    that the tracker uses, or to fully customize the predictive filter implementation to use (as long as the methods and properties are compatible).\n\n    The former case only requires changing the default parameters upon tracker creation: `tracker = Tracker(..., filter_factory=FilterPyKalmanFilterFactory(R=100))`,\n    while the latter requires creating your own class extending `FilterPyKalmanFilterFactory`, and rewriting its `create_filter` method to return your own customized filter.\n\n    Parameters\n    ----------\n    R : float, optional\n        Multiplier for the sensor measurement noise matrix, by default 4.0\n    Q : float, optional\n        Multiplier for the process uncertainty, by default 0.1\n    P : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables, by default 10.0\n\n    See Also\n    --------\n    [`filterpy.KalmanFilter`](https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html).\n    \"\"\"\n\n    def __init__(self, R: float = 4.0, Q: float = 0.1, P: float = 10.0):\n        self.R = R\n        self.Q = Q\n        self.P = P\n\n    def create_filter(self, initial_detection: np.ndarray) -> KalmanFilter:\n        \"\"\"\n        This method returns a new predictive filter instance with the current setup, to be used by each new [`TrackedObject`][norfair.tracker.TrackedObject] that is created.\n        This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.\n\n        Parameters\n        ----------\n        initial_detection : np.ndarray\n            numpy array of shape `(number of points per object, 2)`, corresponding to the [`Detection.points`][norfair.tracker.Detection] of the tracked object being born,\n            which shall be used as initial position estimation for it.\n\n        Returns\n        -------\n        KalmanFilter\n            The kalman filter\n        \"\"\"\n        num_points = initial_detection.shape[0]\n        dim_points = initial_detection.shape[1]\n        dim_z = dim_points * num_points\n        dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n        filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)\n\n        # State transition matrix (models physics): numpy.array()\n        filter.F = np.eye(dim_x)\n        dt = 1  # At each step we update pos with v * dt\n\n        filter.F[:dim_z, dim_z:] = dt * np.eye(dim_z)\n\n        # Measurement function: numpy.array(dim_z, dim_x)\n        filter.H = np.eye(\n            dim_z,\n            dim_x,\n        )\n\n        # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)\n        filter.R *= self.R\n\n        # Process uncertainty: numpy.array(dim_x, dim_x)\n        # Don't decrease it too much or trackers pay too little attention to detections\n        filter.Q[dim_z:, dim_z:] *= self.Q\n\n        # Initial state: numpy.array(dim_x, 1)\n        filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n        filter.x[dim_z:] = 0\n\n        # Estimation uncertainty: numpy.array(dim_x, dim_x)\n        filter.P[dim_z:, dim_z:] *= self.P\n\n        return filter\n
    "},{"location":"reference/filter/#norfair.filter.FilterPyKalmanFilterFactory.create_filter","title":"create_filter(initial_detection)","text":"

    This method returns a new predictive filter instance with the current setup, to be used by each new TrackedObject that is created. This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.

    Parameters:

    Name Type Description Default initial_detection ndarray

    numpy array of shape (number of points per object, 2), corresponding to the Detection.points of the tracked object being born, which shall be used as initial position estimation for it.

    required

    Returns:

    Type Description KalmanFilter

    The kalman filter

    Source code in norfair/filter.py
    def create_filter(self, initial_detection: np.ndarray) -> KalmanFilter:\n    \"\"\"\n    This method returns a new predictive filter instance with the current setup, to be used by each new [`TrackedObject`][norfair.tracker.TrackedObject] that is created.\n    This predictive filter will be used to estimate speed and future positions of the object, to better match the detections during its trajectory.\n\n    Parameters\n    ----------\n    initial_detection : np.ndarray\n        numpy array of shape `(number of points per object, 2)`, corresponding to the [`Detection.points`][norfair.tracker.Detection] of the tracked object being born,\n        which shall be used as initial position estimation for it.\n\n    Returns\n    -------\n    KalmanFilter\n        The kalman filter\n    \"\"\"\n    num_points = initial_detection.shape[0]\n    dim_points = initial_detection.shape[1]\n    dim_z = dim_points * num_points\n    dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n    filter = KalmanFilter(dim_x=dim_x, dim_z=dim_z)\n\n    # State transition matrix (models physics): numpy.array()\n    filter.F = np.eye(dim_x)\n    dt = 1  # At each step we update pos with v * dt\n\n    filter.F[:dim_z, dim_z:] = dt * np.eye(dim_z)\n\n    # Measurement function: numpy.array(dim_z, dim_x)\n    filter.H = np.eye(\n        dim_z,\n        dim_x,\n    )\n\n    # Measurement uncertainty (sensor noise): numpy.array(dim_z, dim_z)\n    filter.R *= self.R\n\n    # Process uncertainty: numpy.array(dim_x, dim_x)\n    # Don't decrease it too much or trackers pay too little attention to detections\n    filter.Q[dim_z:, dim_z:] *= self.Q\n\n    # Initial state: numpy.array(dim_x, 1)\n    filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n    filter.x[dim_z:] = 0\n\n    # Estimation uncertainty: numpy.array(dim_x, dim_x)\n    filter.P[dim_z:, dim_z:] *= self.P\n\n    return filter\n
    "},{"location":"reference/filter/#norfair.filter.OptimizedKalmanFilterFactory","title":"OptimizedKalmanFilterFactory","text":"

    Bases: FilterFactory

    Creates faster Filters than FilterPyKalmanFilterFactory.

    It allows the user to create Kalman Filter optimized for tracking and set its parameters.

    Parameters:

    Name Type Description Default R float

    Multiplier for the sensor measurement noise matrix.

    4.0 Q float

    Multiplier for the process uncertainty.

    0.1 pos_variance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables.

    10 pos_vel_covariance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to the covariance between position and speed.

    0 vel_variance float

    Multiplier for the initial covariance matrix estimation, only in the entries that correspond to velocity (not position) variables.

    1 Source code in norfair/filter.py
    class OptimizedKalmanFilterFactory(FilterFactory):\n    \"\"\"\n    Creates faster Filters than [`FilterPyKalmanFilterFactory`][norfair.filter.FilterPyKalmanFilterFactory].\n\n    It allows the user to create Kalman Filter optimized for tracking and set its parameters.\n\n    Parameters\n    ----------\n    R : float, optional\n        Multiplier for the sensor measurement noise matrix.\n    Q : float, optional\n        Multiplier for the process uncertainty.\n    pos_variance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to position (not speed) variables.\n    pos_vel_covariance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to the covariance between position and speed.\n    vel_variance : float, optional\n        Multiplier for the initial covariance matrix estimation, only in the entries that correspond to velocity (not position) variables.\n    \"\"\"\n\n    def __init__(\n        self,\n        R: float = 4.0,\n        Q: float = 0.1,\n        pos_variance: float = 10,\n        pos_vel_covariance: float = 0,\n        vel_variance: float = 1,\n    ):\n        self.R = R\n        self.Q = Q\n\n        # entrances P matrix of KF\n        self.pos_variance = pos_variance\n        self.pos_vel_covariance = pos_vel_covariance\n        self.vel_variance = vel_variance\n\n    def create_filter(self, initial_detection: np.ndarray):\n        num_points = initial_detection.shape[0]\n        dim_points = initial_detection.shape[1]\n        dim_z = dim_points * num_points  # flattened positions\n        dim_x = 2 * dim_z  # We need to accommodate for velocities\n\n        custom_filter = OptimizedKalmanFilter(\n            dim_x,\n            dim_z,\n            pos_variance=self.pos_variance,\n            pos_vel_covariance=self.pos_vel_covariance,\n            vel_variance=self.vel_variance,\n            q=self.Q,\n            r=self.R,\n        )\n        custom_filter.x[:dim_z] = np.expand_dims(initial_detection.flatten(), 0).T\n\n        return custom_filter\n
    "},{"location":"reference/metrics/","title":"Metrics","text":""},{"location":"reference/metrics/#norfair.metrics.PredictionsTextFile","title":"PredictionsTextFile","text":"

    Generates a text file with your predicted tracked objects, in the MOTChallenge format. It needs the 'input_path', which is the path to the sequence being processed, the 'save_path', and optionally the 'information_file' (in case you don't give an 'information_file', is assumed there is one in the input_path folder).

    Source code in norfair/metrics.py
    class PredictionsTextFile:\n    \"\"\"Generates a text file with your predicted tracked objects, in the MOTChallenge format.\n    It needs the 'input_path', which is the path to the sequence being processed,\n    the 'save_path', and optionally the 'information_file' (in case you don't give an\n    'information_file', is assumed there is one in the input_path folder).\n    \"\"\"\n\n    def __init__(self, input_path, save_path=\".\", information_file=None):\n\n        file_name = os.path.split(input_path)[1]\n\n        if information_file is None:\n            seqinfo_path = os.path.join(input_path, \"seqinfo.ini\")\n            information_file = InformationFile(file_path=seqinfo_path)\n\n        self.length = information_file.search(variable_name=\"seqLength\")\n\n        predictions_folder = os.path.join(save_path, \"predictions\")\n        if not os.path.exists(predictions_folder):\n            os.makedirs(predictions_folder)\n\n        out_file_name = os.path.join(predictions_folder, file_name + \".txt\")\n        self.text_file = open(out_file_name, \"w+\")\n\n        self.frame_number = 1\n\n    def update(self, predictions, frame_number=None):\n        if frame_number is None:\n            frame_number = self.frame_number\n        \"\"\"\n        Write tracked object information in the output file (for this frame), in the format\n        frame_number, id, bb_left, bb_top, bb_width, bb_height, -1, -1, -1, -1\n        \"\"\"\n        for obj in predictions:\n            frame_str = str(int(frame_number))\n            id_str = str(int(obj.id))\n            bb_left_str = str((obj.estimate[0, 0]))\n            bb_top_str = str((obj.estimate[0, 1]))  # [0,1]\n            bb_width_str = str((obj.estimate[1, 0] - obj.estimate[0, 0]))\n            bb_height_str = str((obj.estimate[1, 1] - obj.estimate[0, 1]))\n            row_text_out = (\n                frame_str\n                + \",\"\n                + id_str\n                + \",\"\n                + bb_left_str\n                + \",\"\n                + bb_top_str\n                + \",\"\n                + bb_width_str\n                + \",\"\n                + bb_height_str\n                + \",-1,-1,-1,-1\"\n            )\n            self.text_file.write(row_text_out)\n            self.text_file.write(\"\\n\")\n\n        self.frame_number += 1\n\n        if self.frame_number > self.length:\n            self.text_file.close()\n
    "},{"location":"reference/metrics/#norfair.metrics.DetectionFileParser","title":"DetectionFileParser","text":"

    Get Norfair detections from MOTChallenge text files containing detections

    Source code in norfair/metrics.py
    class DetectionFileParser:\n    \"\"\"Get Norfair detections from MOTChallenge text files containing detections\"\"\"\n\n    def __init__(self, input_path, information_file=None):\n        self.frame_number = 1\n\n        # Get detecions matrix data with rows corresponding to:\n        # frame, id, bb_left, bb_top, bb_right, bb_down, conf, x, y, z\n        detections_path = os.path.join(input_path, \"det/det.txt\")\n\n        self.matrix_detections = np.loadtxt(detections_path, dtype=\"f\", delimiter=\",\")\n        row_order = np.argsort(self.matrix_detections[:, 0])\n        self.matrix_detections = self.matrix_detections[row_order]\n        # Coordinates refer to box corners\n        self.matrix_detections[:, 4] = (\n            self.matrix_detections[:, 2] + self.matrix_detections[:, 4]\n        )\n        self.matrix_detections[:, 5] = (\n            self.matrix_detections[:, 3] + self.matrix_detections[:, 5]\n        )\n\n        if information_file is None:\n            seqinfo_path = os.path.join(input_path, \"seqinfo.ini\")\n            information_file = InformationFile(file_path=seqinfo_path)\n        self.length = information_file.search(variable_name=\"seqLength\")\n\n        self.sorted_by_frame = []\n        for frame_number in range(1, self.length + 1):\n            self.sorted_by_frame.append(self.get_dets_from_frame(frame_number))\n\n    def get_dets_from_frame(self, frame_number):\n        \"\"\"this function returns a list of norfair Detections class, corresponding to frame=frame_number\"\"\"\n\n        indexes = np.argwhere(self.matrix_detections[:, 0] == frame_number)\n        detections = []\n        if len(indexes) > 0:\n            actual_det = self.matrix_detections[indexes]\n            actual_det.shape = [actual_det.shape[0], actual_det.shape[2]]\n            for det in actual_det:\n                points = np.array([[det[2], det[3]], [det[4], det[5]]])\n                conf = det[6]\n                new_detection = Detection(points, np.array([conf, conf]))\n                detections.append(new_detection)\n        self.actual_detections = detections\n        return detections\n\n    def __iter__(self):\n        self.frame_number = 1\n        return self\n\n    def __next__(self):\n        if self.frame_number <= self.length:\n            self.frame_number += 1\n            # Frame_number is always 1 unit bigger than the corresponding index in self.sorted_by_frame, and\n            # also we just incremented the frame_number, so now is 2 units bigger than the corresponding index\n            return self.sorted_by_frame[self.frame_number - 2]\n\n        raise StopIteration()\n
    "},{"location":"reference/metrics/#norfair.metrics.DetectionFileParser.get_dets_from_frame","title":"get_dets_from_frame(frame_number)","text":"

    this function returns a list of norfair Detections class, corresponding to frame=frame_number

    Source code in norfair/metrics.py
    def get_dets_from_frame(self, frame_number):\n    \"\"\"this function returns a list of norfair Detections class, corresponding to frame=frame_number\"\"\"\n\n    indexes = np.argwhere(self.matrix_detections[:, 0] == frame_number)\n    detections = []\n    if len(indexes) > 0:\n        actual_det = self.matrix_detections[indexes]\n        actual_det.shape = [actual_det.shape[0], actual_det.shape[2]]\n        for det in actual_det:\n            points = np.array([[det[2], det[3]], [det[4], det[5]]])\n            conf = det[6]\n            new_detection = Detection(points, np.array([conf, conf]))\n            detections.append(new_detection)\n    self.actual_detections = detections\n    return detections\n
    "},{"location":"reference/metrics/#norfair.metrics.load_motchallenge","title":"load_motchallenge(matrix_data, min_confidence=-1)","text":"

    Load MOT challenge data.

    This is a modification of the function load_motchallenge from the py-motmetrics library, defined in io.py In this version, the pandas dataframe is generated from a numpy array (matrix_data) instead of a text file.

    Params

    matrix_data : array of float that has [frame, id, X, Y, width, height, conf, cassId, visibility] in each row, for each prediction on a particular video

    min_confidence : float Rows with confidence less than this threshold are removed. Defaults to -1. You should set this to 1 when loading ground truth MOTChallenge data, so that invalid rectangles in the ground truth are not considered during matching.

    Returns:

    Name Type Description df DataFrame

    The returned dataframe has the following columns 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility' The dataframe is indexed by ('FrameId', 'Id')

    Source code in norfair/metrics.py
    def load_motchallenge(matrix_data, min_confidence=-1):\n    \"\"\"Load MOT challenge data.\n\n    This is a modification of the function load_motchallenge from the py-motmetrics library, defined in io.py\n    In this version, the pandas dataframe is generated from a numpy array (matrix_data) instead of a text file.\n\n    Params\n    ------\n    matrix_data : array  of float that has [frame, id, X, Y, width, height, conf, cassId, visibility] in each row, for each prediction on a particular video\n\n    min_confidence : float\n        Rows with confidence less than this threshold are removed.\n        Defaults to -1. You should set this to 1 when loading\n        ground truth MOTChallenge data, so that invalid rectangles in\n        the ground truth are not considered during matching.\n\n    Returns\n    ------\n    df : pandas.DataFrame\n        The returned dataframe has the following columns\n            'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'\n        The dataframe is indexed by ('FrameId', 'Id')\n    \"\"\"\n\n    df = pd.DataFrame(\n        data=matrix_data,\n        columns=[\n            \"FrameId\",\n            \"Id\",\n            \"X\",\n            \"Y\",\n            \"Width\",\n            \"Height\",\n            \"Confidence\",\n            \"ClassId\",\n            \"Visibility\",\n            \"unused\",\n        ],\n    )\n    df = df.set_index([\"FrameId\", \"Id\"])\n    # Account for matlab convention.\n    df[[\"X\", \"Y\"]] -= (1, 1)\n\n    # Removed trailing column\n    del df[\"unused\"]\n\n    # Remove all rows without sufficient confidence\n    return df[df[\"Confidence\"] >= min_confidence]\n
    "},{"location":"reference/metrics/#norfair.metrics.compare_dataframes","title":"compare_dataframes(gts, ts)","text":"

    Builds accumulator for each sequence.

    Source code in norfair/metrics.py
    def compare_dataframes(gts, ts):\n    \"\"\"Builds accumulator for each sequence.\"\"\"\n    accs = []\n    names = []\n    for k, tsacc in ts.items():\n        print(\"Comparing \", k, \"...\")\n        if k in gts:\n            accs.append(\n                mm.utils.compare_to_groundtruth(gts[k], tsacc, \"iou\", distth=0.5)\n            )\n            names.append(k)\n\n    return accs, names\n
    "},{"location":"reference/tracker/","title":"Tracker","text":""},{"location":"reference/tracker/#norfair.tracker.Tracker","title":"Tracker","text":"

    The class in charge of performing the tracking of the detections produced by a detector.

    Parameters:

    Name Type Description Default distance_function Union[str, Callable[[Detection, TrackedObject], float]]

    Function used by the tracker to determine the distance between newly detected objects and the objects that are currently being tracked. This function should take 2 input arguments, the first being a Detection, and the second a TrackedObject. It has to return a float with the distance it calculates. Some common distances are implemented in distances, as a shortcut the tracker accepts the name of these predefined distances. Scipy's predefined distances are also accepted. A str with one of the available metrics in scipy.spatial.distance.cdist.

    required distance_threshold float

    Defines what is the maximum distance that can constitute a match. Detections and tracked objects whose distances are above this threshold won't be matched by the tracker.

    required hit_counter_max int

    Each tracked objects keeps an internal hit counter which tracks how often it's getting matched to a detection, each time it gets a match this counter goes up, and each time it doesn't it goes down.

    If it goes below 0 the object gets destroyed. This argument defines how large this inertia can grow, and therefore defines how long an object can live without getting matched to any detections, before it is displaced as a dead object, if no ReID distance function is implemented it will be destroyed.

    15 initialization_delay Optional[int]

    Determines how large the object's hit counter must be in order to be considered as initialized, and get returned to the user as a real object. It must be smaller than hit_counter_max or otherwise the object would never be initialized.

    If set to 0, objects will get returned to the user as soon as they are detected for the first time, which can be problematic as this can result in objects appearing and immediately dissapearing.

    Defaults to hit_counter_max / 2

    None pointwise_hit_counter_max int

    Each tracked object keeps track of how often the points it's tracking have been getting matched. Points that are getting matched (pointwise_hit_counter > 0) are said to be live, and points which aren't (pointwise_hit_counter = 0) are said to not be live.

    This is used to determine things like which individual points in a tracked object get drawn by draw_tracked_objects and which don't. This argument defines how large the inertia for each point of a tracker can grow.

    4 detection_threshold float

    Sets the threshold at which the scores of the points in a detection being fed into the tracker must dip below to be ignored by the tracker.

    0 filter_factory FilterFactory

    This parameter can be used to change what filter the TrackedObject instances created by the tracker will use. Defaults to OptimizedKalmanFilterFactory()

    OptimizedKalmanFilterFactory() past_detections_length int

    How many past detections to save for each tracked object. Norfair tries to distribute these past detections uniformly through the object's lifetime so they're more representative. Very useful if you want to add metric learning to your model, as you can associate an embedding to each detection and access them in your distance function.

    4 reid_distance_function Optional[Callable[[TrackedObject, TrackedObject], float]]

    Function used by the tracker to determine the ReID distance between newly detected trackers and unmatched trackers by the distance function.

    This function should take 2 input arguments, the first being tracked objects in the initialization phase of type TrackedObject, and the second being tracked objects that have been unmatched of type TrackedObject. It returns a float with the distance it calculates.

    None reid_distance_threshold float

    Defines what is the maximum ReID distance that can constitute a match.

    Tracked objects whose distance is above this threshold won't be merged, if they are the oldest tracked object will be maintained with the position of the new tracked object.

    0 reid_hit_counter_max Optional[int]

    Each tracked object keeps an internal ReID hit counter which tracks how often it's getting recognized by another tracker, each time it gets a match this counter goes up, and each time it doesn't it goes down. If it goes below 0 the object gets destroyed. If used, this argument (reid_hit_counter_max) defines how long an object can live without getting matched to any detections, before it is destroyed.

    None Source code in norfair/tracker.py
    class Tracker:\n    \"\"\"\n    The class in charge of performing the tracking of the detections produced by a detector.\n\n    Parameters\n    ----------\n    distance_function : Union[str, Callable[[Detection, TrackedObject], float]]\n        Function used by the tracker to determine the distance between newly detected objects and the objects that are currently being tracked.\n        This function should take 2 input arguments, the first being a [Detection][norfair.tracker.Detection], and the second a [TrackedObject][norfair.tracker.TrackedObject].\n        It has to return a `float` with the distance it calculates.\n        Some common distances are implemented in [distances][], as a shortcut the tracker accepts the name of these [predefined distances][norfair.distances.get_distance_by_name].\n        Scipy's predefined distances are also accepted. A `str` with one of the available metrics in\n        [`scipy.spatial.distance.cdist`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html).\n    distance_threshold : float\n        Defines what is the maximum distance that can constitute a match.\n        Detections and tracked objects whose distances are above this threshold won't be matched by the tracker.\n    hit_counter_max : int, optional\n        Each tracked objects keeps an internal hit counter which tracks how often it's getting matched to a detection,\n        each time it gets a match this counter goes up, and each time it doesn't it goes down.\n\n        If it goes below 0 the object gets destroyed. This argument defines how large this inertia can grow,\n        and therefore defines how long an object can live without getting matched to any detections, before it is displaced as a dead object, if no ReID distance function is implemented it will be destroyed.\n    initialization_delay : Optional[int], optional\n         Determines how large the object's hit counter must be in order to be considered as initialized, and get returned to the user as a real object.\n         It must be smaller than `hit_counter_max` or otherwise the object would never be initialized.\n\n         If set to 0, objects will get returned to the user as soon as they are detected for the first time,\n         which can be problematic as this can result in objects appearing and immediately dissapearing.\n\n         Defaults to `hit_counter_max / 2`\n    pointwise_hit_counter_max : int, optional\n        Each tracked object keeps track of how often the points it's tracking have been getting matched.\n        Points that are getting matched (`pointwise_hit_counter > 0`) are said to be live, and points which aren't (`pointwise_hit_counter = 0`)\n        are said to not be live.\n\n        This is used to determine things like which individual points in a tracked object get drawn by [`draw_tracked_objects`][norfair.drawing.draw_tracked_objects] and which don't.\n        This argument defines how large the inertia for each point of a tracker can grow.\n    detection_threshold : float, optional\n        Sets the threshold at which the scores of the points in a detection being fed into the tracker must dip below to be ignored by the tracker.\n    filter_factory : FilterFactory, optional\n        This parameter can be used to change what filter the [`TrackedObject`][norfair.tracker.TrackedObject] instances created by the tracker will use.\n        Defaults to [`OptimizedKalmanFilterFactory()`][norfair.filter.OptimizedKalmanFilterFactory]\n    past_detections_length : int, optional\n        How many past detections to save for each tracked object.\n        Norfair tries to distribute these past detections uniformly through the object's lifetime so they're more representative.\n        Very useful if you want to add metric learning to your model, as you can associate an embedding to each detection and access them in your distance function.\n    reid_distance_function: Optional[Callable[[\"TrackedObject\", \"TrackedObject\"], float]]\n        Function used by the tracker to determine the ReID distance between newly detected trackers and unmatched trackers by the distance function.\n\n        This function should take 2 input arguments, the first being tracked objects in the initialization phase of type [`TrackedObject`][norfair.tracker.TrackedObject],\n        and the second being tracked objects that have been unmatched of type [`TrackedObject`][norfair.tracker.TrackedObject]. It returns a `float` with the distance it\n        calculates.\n    reid_distance_threshold: float\n        Defines what is the maximum ReID distance that can constitute a match.\n\n        Tracked objects whose distance is above this threshold won't be merged, if they are the oldest tracked object will be maintained\n        with the position of the new tracked object.\n    reid_hit_counter_max: Optional[int]\n        Each tracked object keeps an internal ReID hit counter which tracks how often it's getting recognized by another tracker,\n        each time it gets a match this counter goes up, and each time it doesn't it goes down. If it goes below 0 the object gets destroyed.\n        If used, this argument (`reid_hit_counter_max`) defines how long an object can live without getting matched to any detections, before it is destroyed.\n    \"\"\"\n\n    def __init__(\n        self,\n        distance_function: Union[str, Callable[[\"Detection\", \"TrackedObject\"], float]],\n        distance_threshold: float,\n        hit_counter_max: int = 15,\n        initialization_delay: Optional[int] = None,\n        pointwise_hit_counter_max: int = 4,\n        detection_threshold: float = 0,\n        filter_factory: FilterFactory = OptimizedKalmanFilterFactory(),\n        past_detections_length: int = 4,\n        reid_distance_function: Optional[\n            Callable[[\"TrackedObject\", \"TrackedObject\"], float]\n        ] = None,\n        reid_distance_threshold: float = 0,\n        reid_hit_counter_max: Optional[int] = None,\n    ):\n        self.tracked_objects: Sequence[\"TrackedObject\"] = []\n\n        if isinstance(distance_function, str):\n            distance_function = get_distance_by_name(distance_function)\n        elif isinstance(distance_function, Callable):\n            warning(\n                \"You are using a scalar distance function. If you want to speed up the\"\n                \" tracking process please consider using a vectorized distance\"\n                f\" function such as {AVAILABLE_VECTORIZED_DISTANCES}.\"\n            )\n            distance_function = ScalarDistance(distance_function)\n        else:\n            raise ValueError(\n                \"Argument `distance_function` should be a string or function but is\"\n                f\" {type(distance_function)} instead.\"\n            )\n        self.distance_function = distance_function\n\n        self.hit_counter_max = hit_counter_max\n        self.reid_hit_counter_max = reid_hit_counter_max\n        self.pointwise_hit_counter_max = pointwise_hit_counter_max\n        self.filter_factory = filter_factory\n        if past_detections_length >= 0:\n            self.past_detections_length = past_detections_length\n        else:\n            raise ValueError(\n                f\"Argument `past_detections_length` is {past_detections_length} and should be larger than 0.\"\n            )\n\n        if initialization_delay is None:\n            self.initialization_delay = int(self.hit_counter_max / 2)\n        elif initialization_delay < 0 or initialization_delay >= self.hit_counter_max:\n            raise ValueError(\n                f\"Argument 'initialization_delay' for 'Tracker' class should be an int between 0 and (hit_counter_max = {hit_counter_max}). The selected value is {initialization_delay}.\\n\"\n            )\n        else:\n            self.initialization_delay = initialization_delay\n\n        self.distance_threshold = distance_threshold\n        self.detection_threshold = detection_threshold\n        if reid_distance_function is not None:\n            self.reid_distance_function = ScalarDistance(reid_distance_function)\n        else:\n            self.reid_distance_function = reid_distance_function\n        self.reid_distance_threshold = reid_distance_threshold\n        self._obj_factory = _TrackedObjectFactory()\n\n    def update(\n        self,\n        detections: Optional[List[\"Detection\"]] = None,\n        period: int = 1,\n        coord_transformations: Optional[CoordinatesTransformation] = None,\n    ) -> List[\"TrackedObject\"]:\n        \"\"\"\n        Process detections found in each frame.\n\n        The detections can be matched to previous tracked objects or new ones will be created\n        according to the configuration of the Tracker.\n        The currently alive and initialized tracked objects are returned\n\n        Parameters\n        ----------\n        detections : Optional[List[Detection]], optional\n            A list of [`Detection`][norfair.tracker.Detection] which represent the detections found in the current frame being processed.\n\n            If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time,\n            this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.\n        period : int, optional\n            The user can chose not to run their detector on all frames, so as to process video faster.\n            This parameter sets every how many frames the detector is getting ran,\n            so that the tracker is aware of this situation and can handle it properly.\n\n            This argument can be reset on each frame processed,\n            which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.\n        coord_transformations: Optional[CoordinatesTransformation]\n            The coordinate transformation calculated by the [MotionEstimator][norfair.camera_motion.MotionEstimator].\n\n        Returns\n        -------\n        List[TrackedObject]\n            The list of active tracked objects.\n        \"\"\"\n        if coord_transformations is not None:\n            for det in detections:\n                det.update_coordinate_transformation(coord_transformations)\n\n        # Remove stale trackers and make candidate object real if the hit counter is positive\n        alive_objects = []\n        dead_objects = []\n        if self.reid_hit_counter_max is None:\n            self.tracked_objects = [\n                o for o in self.tracked_objects if o.hit_counter_is_positive\n            ]\n            alive_objects = self.tracked_objects\n        else:\n            tracked_objects = []\n            for o in self.tracked_objects:\n                if o.reid_hit_counter_is_positive:\n                    tracked_objects.append(o)\n                    if o.hit_counter_is_positive:\n                        alive_objects.append(o)\n                    else:\n                        dead_objects.append(o)\n            self.tracked_objects = tracked_objects\n\n        # Update tracker\n        for obj in self.tracked_objects:\n            obj.tracker_step()\n            obj.update_coordinate_transformation(coord_transformations)\n\n        # Update initialized tracked objects with detections\n        (\n            unmatched_detections,\n            _,\n            unmatched_init_trackers,\n        ) = self._update_objects_in_place(\n            self.distance_function,\n            self.distance_threshold,\n            [o for o in alive_objects if not o.is_initializing],\n            detections,\n            period,\n        )\n\n        # Update not yet initialized tracked objects with yet unmatched detections\n        (\n            unmatched_detections,\n            matched_not_init_trackers,\n            _,\n        ) = self._update_objects_in_place(\n            self.distance_function,\n            self.distance_threshold,\n            [o for o in alive_objects if o.is_initializing],\n            unmatched_detections,\n            period,\n        )\n\n        if self.reid_distance_function is not None:\n            # Match unmatched initialized tracked objects with not yet initialized tracked objects\n            _, _, _ = self._update_objects_in_place(\n                self.reid_distance_function,\n                self.reid_distance_threshold,\n                unmatched_init_trackers + dead_objects,\n                matched_not_init_trackers,\n                period,\n            )\n\n        # Create new tracked objects from remaining unmatched detections\n        for detection in unmatched_detections:\n            self.tracked_objects.append(\n                self._obj_factory.create(\n                    initial_detection=detection,\n                    hit_counter_max=self.hit_counter_max,\n                    initialization_delay=self.initialization_delay,\n                    pointwise_hit_counter_max=self.pointwise_hit_counter_max,\n                    detection_threshold=self.detection_threshold,\n                    period=period,\n                    filter_factory=self.filter_factory,\n                    past_detections_length=self.past_detections_length,\n                    reid_hit_counter_max=self.reid_hit_counter_max,\n                    coord_transformations=coord_transformations,\n                )\n            )\n\n        return self.get_active_objects()\n\n    @property\n    def current_object_count(self) -> int:\n        \"\"\"Number of active TrackedObjects\"\"\"\n        return len(self.get_active_objects())\n\n    @property\n    def total_object_count(self) -> int:\n        \"\"\"Total number of TrackedObjects initialized in the by this Tracker\"\"\"\n        return self._obj_factory.count\n\n    def get_active_objects(self) -> List[\"TrackedObject\"]:\n        \"\"\"Get the list of active objects\n\n        Returns\n        -------\n        List[\"TrackedObject\"]\n            The list of active objects\n        \"\"\"\n        return [\n            o\n            for o in self.tracked_objects\n            if not o.is_initializing and o.hit_counter_is_positive\n        ]\n\n    def _update_objects_in_place(\n        self,\n        distance_function,\n        distance_threshold,\n        objects: Sequence[\"TrackedObject\"],\n        candidates: Optional[Union[List[\"Detection\"], List[\"TrackedObject\"]]],\n        period: int,\n    ):\n        if candidates is not None and len(candidates) > 0:\n            distance_matrix = distance_function.get_distances(objects, candidates)\n            if np.isnan(distance_matrix).any():\n                raise ValueError(\n                    \"\\nReceived nan values from distance function, please check your distance function for errors!\"\n                )\n\n            # Used just for debugging distance function\n            if distance_matrix.any():\n                for i, minimum in enumerate(distance_matrix.min(axis=0)):\n                    objects[i].current_min_distance = (\n                        minimum if minimum < distance_threshold else None\n                    )\n\n            matched_cand_indices, matched_obj_indices = self.match_dets_and_objs(\n                distance_matrix, distance_threshold\n            )\n            if len(matched_cand_indices) > 0:\n                unmatched_candidates = [\n                    d for i, d in enumerate(candidates) if i not in matched_cand_indices\n                ]\n                unmatched_objects = [\n                    d for i, d in enumerate(objects) if i not in matched_obj_indices\n                ]\n                matched_objects = []\n\n                # Handle matched people/detections\n                for (match_cand_idx, match_obj_idx) in zip(\n                    matched_cand_indices, matched_obj_indices\n                ):\n                    match_distance = distance_matrix[match_cand_idx, match_obj_idx]\n                    matched_candidate = candidates[match_cand_idx]\n                    matched_object = objects[match_obj_idx]\n                    if match_distance < distance_threshold:\n                        if isinstance(matched_candidate, Detection):\n                            matched_object.hit(matched_candidate, period=period)\n                            matched_object.last_distance = match_distance\n                            matched_objects.append(matched_object)\n                        elif isinstance(matched_candidate, TrackedObject):\n                            # Merge new TrackedObject with the old one\n                            matched_object.merge(matched_candidate)\n                            # If we are matching TrackedObject instances we want to get rid of the\n                            # already matched candidate to avoid matching it again in future frames\n                            self.tracked_objects.remove(matched_candidate)\n                    else:\n                        unmatched_candidates.append(matched_candidate)\n                        unmatched_objects.append(matched_object)\n            else:\n                unmatched_candidates, matched_objects, unmatched_objects = (\n                    candidates,\n                    [],\n                    objects,\n                )\n        else:\n            unmatched_candidates, matched_objects, unmatched_objects = [], [], objects\n\n        return unmatched_candidates, matched_objects, unmatched_objects\n\n    def match_dets_and_objs(self, distance_matrix: np.ndarray, distance_threshold):\n        \"\"\"Matches detections with tracked_objects from a distance matrix\n\n        I used to match by minimizing the global distances, but found several\n        cases in which this was not optimal. So now I just match by starting\n        with the global minimum distance and matching the det-obj corresponding\n        to that distance, then taking the second minimum, and so on until we\n        reach the distance_threshold.\n\n        This avoids the the algorithm getting cute with us and matching things\n        that shouldn't be matching just for the sake of minimizing the global\n        distance, which is what used to happen\n        \"\"\"\n        # NOTE: This implementation is terribly inefficient, but it doesn't\n        #       seem to affect the fps at all.\n        distance_matrix = distance_matrix.copy()\n        if distance_matrix.size > 0:\n            det_idxs = []\n            obj_idxs = []\n            current_min = distance_matrix.min()\n\n            while current_min < distance_threshold:\n                flattened_arg_min = distance_matrix.argmin()\n                det_idx = flattened_arg_min // distance_matrix.shape[1]\n                obj_idx = flattened_arg_min % distance_matrix.shape[1]\n                det_idxs.append(det_idx)\n                obj_idxs.append(obj_idx)\n                distance_matrix[det_idx, :] = distance_threshold + 1\n                distance_matrix[:, obj_idx] = distance_threshold + 1\n                current_min = distance_matrix.min()\n\n            return det_idxs, obj_idxs\n        else:\n            return [], []\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.current_object_count","title":"current_object_count: int property","text":"

    Number of active TrackedObjects

    "},{"location":"reference/tracker/#norfair.tracker.Tracker.total_object_count","title":"total_object_count: int property","text":"

    Total number of TrackedObjects initialized in the by this Tracker

    "},{"location":"reference/tracker/#norfair.tracker.Tracker.update","title":"update(detections=None, period=1, coord_transformations=None)","text":"

    Process detections found in each frame.

    The detections can be matched to previous tracked objects or new ones will be created according to the configuration of the Tracker. The currently alive and initialized tracked objects are returned

    Parameters:

    Name Type Description Default detections Optional[List[Detection]]

    A list of Detection which represent the detections found in the current frame being processed.

    If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time, this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.

    None period int

    The user can chose not to run their detector on all frames, so as to process video faster. This parameter sets every how many frames the detector is getting ran, so that the tracker is aware of this situation and can handle it properly.

    This argument can be reset on each frame processed, which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.

    1 coord_transformations Optional[CoordinatesTransformation]

    The coordinate transformation calculated by the MotionEstimator.

    None

    Returns:

    Type Description List[TrackedObject]

    The list of active tracked objects.

    Source code in norfair/tracker.py
    def update(\n    self,\n    detections: Optional[List[\"Detection\"]] = None,\n    period: int = 1,\n    coord_transformations: Optional[CoordinatesTransformation] = None,\n) -> List[\"TrackedObject\"]:\n    \"\"\"\n    Process detections found in each frame.\n\n    The detections can be matched to previous tracked objects or new ones will be created\n    according to the configuration of the Tracker.\n    The currently alive and initialized tracked objects are returned\n\n    Parameters\n    ----------\n    detections : Optional[List[Detection]], optional\n        A list of [`Detection`][norfair.tracker.Detection] which represent the detections found in the current frame being processed.\n\n        If no detections have been found in the current frame, or the user is purposely skipping frames to improve video processing time,\n        this argument should be set to None or ignored, as the update function is needed to advance the state of the Kalman Filters inside the tracker.\n    period : int, optional\n        The user can chose not to run their detector on all frames, so as to process video faster.\n        This parameter sets every how many frames the detector is getting ran,\n        so that the tracker is aware of this situation and can handle it properly.\n\n        This argument can be reset on each frame processed,\n        which is useful if the user is dynamically changing how many frames the detector is skipping on a video when working in real-time.\n    coord_transformations: Optional[CoordinatesTransformation]\n        The coordinate transformation calculated by the [MotionEstimator][norfair.camera_motion.MotionEstimator].\n\n    Returns\n    -------\n    List[TrackedObject]\n        The list of active tracked objects.\n    \"\"\"\n    if coord_transformations is not None:\n        for det in detections:\n            det.update_coordinate_transformation(coord_transformations)\n\n    # Remove stale trackers and make candidate object real if the hit counter is positive\n    alive_objects = []\n    dead_objects = []\n    if self.reid_hit_counter_max is None:\n        self.tracked_objects = [\n            o for o in self.tracked_objects if o.hit_counter_is_positive\n        ]\n        alive_objects = self.tracked_objects\n    else:\n        tracked_objects = []\n        for o in self.tracked_objects:\n            if o.reid_hit_counter_is_positive:\n                tracked_objects.append(o)\n                if o.hit_counter_is_positive:\n                    alive_objects.append(o)\n                else:\n                    dead_objects.append(o)\n        self.tracked_objects = tracked_objects\n\n    # Update tracker\n    for obj in self.tracked_objects:\n        obj.tracker_step()\n        obj.update_coordinate_transformation(coord_transformations)\n\n    # Update initialized tracked objects with detections\n    (\n        unmatched_detections,\n        _,\n        unmatched_init_trackers,\n    ) = self._update_objects_in_place(\n        self.distance_function,\n        self.distance_threshold,\n        [o for o in alive_objects if not o.is_initializing],\n        detections,\n        period,\n    )\n\n    # Update not yet initialized tracked objects with yet unmatched detections\n    (\n        unmatched_detections,\n        matched_not_init_trackers,\n        _,\n    ) = self._update_objects_in_place(\n        self.distance_function,\n        self.distance_threshold,\n        [o for o in alive_objects if o.is_initializing],\n        unmatched_detections,\n        period,\n    )\n\n    if self.reid_distance_function is not None:\n        # Match unmatched initialized tracked objects with not yet initialized tracked objects\n        _, _, _ = self._update_objects_in_place(\n            self.reid_distance_function,\n            self.reid_distance_threshold,\n            unmatched_init_trackers + dead_objects,\n            matched_not_init_trackers,\n            period,\n        )\n\n    # Create new tracked objects from remaining unmatched detections\n    for detection in unmatched_detections:\n        self.tracked_objects.append(\n            self._obj_factory.create(\n                initial_detection=detection,\n                hit_counter_max=self.hit_counter_max,\n                initialization_delay=self.initialization_delay,\n                pointwise_hit_counter_max=self.pointwise_hit_counter_max,\n                detection_threshold=self.detection_threshold,\n                period=period,\n                filter_factory=self.filter_factory,\n                past_detections_length=self.past_detections_length,\n                reid_hit_counter_max=self.reid_hit_counter_max,\n                coord_transformations=coord_transformations,\n            )\n        )\n\n    return self.get_active_objects()\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.get_active_objects","title":"get_active_objects()","text":"

    Get the list of active objects

    Returns:

    Type Description List[TrackedObject]

    The list of active objects

    Source code in norfair/tracker.py
    def get_active_objects(self) -> List[\"TrackedObject\"]:\n    \"\"\"Get the list of active objects\n\n    Returns\n    -------\n    List[\"TrackedObject\"]\n        The list of active objects\n    \"\"\"\n    return [\n        o\n        for o in self.tracked_objects\n        if not o.is_initializing and o.hit_counter_is_positive\n    ]\n
    "},{"location":"reference/tracker/#norfair.tracker.Tracker.match_dets_and_objs","title":"match_dets_and_objs(distance_matrix, distance_threshold)","text":"

    Matches detections with tracked_objects from a distance matrix

    I used to match by minimizing the global distances, but found several cases in which this was not optimal. So now I just match by starting with the global minimum distance and matching the det-obj corresponding to that distance, then taking the second minimum, and so on until we reach the distance_threshold.

    This avoids the the algorithm getting cute with us and matching things that shouldn't be matching just for the sake of minimizing the global distance, which is what used to happen

    Source code in norfair/tracker.py
    def match_dets_and_objs(self, distance_matrix: np.ndarray, distance_threshold):\n    \"\"\"Matches detections with tracked_objects from a distance matrix\n\n    I used to match by minimizing the global distances, but found several\n    cases in which this was not optimal. So now I just match by starting\n    with the global minimum distance and matching the det-obj corresponding\n    to that distance, then taking the second minimum, and so on until we\n    reach the distance_threshold.\n\n    This avoids the the algorithm getting cute with us and matching things\n    that shouldn't be matching just for the sake of minimizing the global\n    distance, which is what used to happen\n    \"\"\"\n    # NOTE: This implementation is terribly inefficient, but it doesn't\n    #       seem to affect the fps at all.\n    distance_matrix = distance_matrix.copy()\n    if distance_matrix.size > 0:\n        det_idxs = []\n        obj_idxs = []\n        current_min = distance_matrix.min()\n\n        while current_min < distance_threshold:\n            flattened_arg_min = distance_matrix.argmin()\n            det_idx = flattened_arg_min // distance_matrix.shape[1]\n            obj_idx = flattened_arg_min % distance_matrix.shape[1]\n            det_idxs.append(det_idx)\n            obj_idxs.append(obj_idx)\n            distance_matrix[det_idx, :] = distance_threshold + 1\n            distance_matrix[:, obj_idx] = distance_threshold + 1\n            current_min = distance_matrix.min()\n\n        return det_idxs, obj_idxs\n    else:\n        return [], []\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject","title":"TrackedObject","text":"

    The objects returned by the tracker's update function on each iteration.

    They represent the objects currently being tracked by the tracker.

    Users should not instantiate TrackedObjects manually; the Tracker will be in charge of creating them.

    Attributes:

    Name Type Description estimate ndarray

    Where the tracker predicts the point will be in the current frame based on past detections. A numpy array with the same shape as the detections being fed to the tracker that produced it.

    id Optional[int]

    The unique identifier assigned to this object by the tracker. Set to None if the object is initializing.

    global_id Optional[int]

    The globally unique identifier assigned to this object. Set to None if the object is initializing

    last_detection Detection

    The last detection that matched with this tracked object. Useful if you are storing embeddings in your detections and want to do metric learning, or for debugging.

    last_distance Optional[float]

    The distance the tracker had with the last object it matched with.

    age int

    The age of this object measured in number of frames.

    live_points

    A boolean mask with shape (n_points,). Points marked as True have recently been matched with detections. Points marked as False haven't and are to be considered stale, and should be ignored.

    Functions like draw_tracked_objects use this property to determine which points not to draw.

    initializing_id int

    On top of id, objects also have an initializing_id which is the id they are given internally by the Tracker; this id is used solely for debugging.

    Each new object created by the Tracker starts as an uninitialized TrackedObject, which needs to reach a certain match rate to be converted into a full blown TrackedObject. initializing_id is the id temporarily assigned to TrackedObject while they are getting initialized.

    Source code in norfair/tracker.py
    class TrackedObject:\n    \"\"\"\n    The objects returned by the tracker's `update` function on each iteration.\n\n    They represent the objects currently being tracked by the tracker.\n\n    Users should not instantiate TrackedObjects manually;\n    the Tracker will be in charge of creating them.\n\n    Attributes\n    ----------\n    estimate : np.ndarray\n        Where the tracker predicts the point will be in the current frame based on past detections.\n        A numpy array with the same shape as the detections being fed to the tracker that produced it.\n    id : Optional[int]\n        The unique identifier assigned to this object by the tracker. Set to `None` if the object is initializing.\n    global_id : Optional[int]\n        The globally unique identifier assigned to this object. Set to `None` if the object is initializing\n    last_detection : Detection\n        The last detection that matched with this tracked object.\n        Useful if you are storing embeddings in your detections and want to do metric learning, or for debugging.\n    last_distance : Optional[float]\n        The distance the tracker had with the last object it matched with.\n    age : int\n        The age of this object measured in number of frames.\n    live_points :\n        A boolean mask with shape `(n_points,)`. Points marked as `True` have recently been matched with detections.\n        Points marked as `False` haven't and are to be considered stale, and should be ignored.\n\n        Functions like [`draw_tracked_objects`][norfair.drawing.draw_tracked_objects] use this property to determine which points not to draw.\n    initializing_id : int\n        On top of `id`, objects also have an `initializing_id` which is the id they are given internally by the `Tracker`;\n        this id is used solely for debugging.\n\n        Each new object created by the `Tracker` starts as an uninitialized `TrackedObject`,\n        which needs to reach a certain match rate to be converted into a full blown `TrackedObject`.\n        `initializing_id` is the id temporarily assigned to `TrackedObject` while they are getting initialized.\n    \"\"\"\n\n    def __init__(\n        self,\n        obj_factory: _TrackedObjectFactory,\n        initial_detection: \"Detection\",\n        hit_counter_max: int,\n        initialization_delay: int,\n        pointwise_hit_counter_max: int,\n        detection_threshold: float,\n        period: int,\n        filter_factory: \"FilterFactory\",\n        past_detections_length: int,\n        reid_hit_counter_max: Optional[int],\n        coord_transformations: Optional[CoordinatesTransformation] = None,\n    ):\n        if not isinstance(initial_detection, Detection):\n            raise ValueError(\n                f\"\\n[red]ERROR[/red]: The detection list fed into `tracker.update()` should be composed of {Detection} objects not {type(initial_detection)}.\\n\"\n            )\n        self._obj_factory = obj_factory\n        self.dim_points = initial_detection.absolute_points.shape[1]\n        self.num_points = initial_detection.absolute_points.shape[0]\n        self.hit_counter_max: int = hit_counter_max\n        self.pointwise_hit_counter_max: int = max(pointwise_hit_counter_max, period)\n        self.initialization_delay = initialization_delay\n        self.detection_threshold: float = detection_threshold\n        self.initial_period: int = period\n        self.hit_counter: int = period\n        self.reid_hit_counter_max = reid_hit_counter_max\n        self.reid_hit_counter: Optional[int] = None\n        self.last_distance: Optional[float] = None\n        self.current_min_distance: Optional[float] = None\n        self.last_detection: \"Detection\" = initial_detection\n        self.age: int = 0\n        self.is_initializing: bool = self.hit_counter <= self.initialization_delay\n\n        self.initializing_id: Optional[int] = self._obj_factory.get_initializing_id()\n        self.id: Optional[int] = None\n        self.global_id: Optional[int] = None\n        if not self.is_initializing:\n            self._acquire_ids()\n\n        if initial_detection.scores is None:\n            self.detected_at_least_once_points = np.array([True] * self.num_points)\n        else:\n            self.detected_at_least_once_points = (\n                initial_detection.scores > self.detection_threshold\n            )\n        self.point_hit_counter: np.ndarray = self.detected_at_least_once_points.astype(\n            int\n        )\n        initial_detection.age = self.age\n        self.past_detections_length = past_detections_length\n        if past_detections_length > 0:\n            self.past_detections: Sequence[\"Detection\"] = [initial_detection]\n        else:\n            self.past_detections: Sequence[\"Detection\"] = []\n\n        # Create Kalman Filter\n        self.filter = filter_factory.create_filter(initial_detection.absolute_points)\n        self.dim_z = self.dim_points * self.num_points\n        self.label = initial_detection.label\n        self.abs_to_rel = None\n        if coord_transformations is not None:\n            self.update_coordinate_transformation(coord_transformations)\n\n    def tracker_step(self):\n        if self.reid_hit_counter is None:\n            if self.hit_counter <= 0:\n                self.reid_hit_counter = self.reid_hit_counter_max\n        else:\n            self.reid_hit_counter -= 1\n        self.hit_counter -= 1\n        self.point_hit_counter -= 1\n        self.age += 1\n        # Advances the tracker's state\n        self.filter.predict()\n\n    @property\n    def hit_counter_is_positive(self):\n        return self.hit_counter >= 0\n\n    @property\n    def reid_hit_counter_is_positive(self):\n        return self.reid_hit_counter is None or self.reid_hit_counter >= 0\n\n    @property\n    def estimate_velocity(self) -> np.ndarray:\n        \"\"\"Get the velocity estimate of the object from the Kalman filter. This velocity is in the absolute coordinate system.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the velocity estimate of the object on each axis.\n        \"\"\"\n        return self.filter.x.T.flatten()[self.dim_z :].reshape(-1, self.dim_points)\n\n    @property\n    def estimate(self) -> np.ndarray:\n        \"\"\"Get the position estimate of the object from the Kalman filter.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n        \"\"\"\n        return self.get_estimate()\n\n    def get_estimate(self, absolute=False) -> np.ndarray:\n        \"\"\"Get the position estimate of the object from the Kalman filter in an absolute or relative format.\n\n        Parameters\n        ----------\n        absolute : bool, optional\n            If true the coordinates are returned in absolute format, by default False, by default False.\n\n        Returns\n        -------\n        np.ndarray\n            An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n\n        Raises\n        ------\n        ValueError\n            Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.\n        \"\"\"\n        positions = self.filter.x.T.flatten()[: self.dim_z].reshape(-1, self.dim_points)\n        if self.abs_to_rel is None:\n            if not absolute:\n                return positions\n            else:\n                raise ValueError(\n                    \"You must provide 'coord_transformations' to the tracker to get absolute coordinates\"\n                )\n        else:\n            if absolute:\n                return positions\n            else:\n                return self.abs_to_rel(positions)\n\n    @property\n    def live_points(self):\n        return self.point_hit_counter > 0\n\n    def hit(self, detection: \"Detection\", period: int = 1):\n        \"\"\"Update tracked object with a new detection\n\n        Parameters\n        ----------\n        detection : Detection\n            the new detection matched to this tracked object\n        period : int, optional\n            frames corresponding to the period of time since last update.\n        \"\"\"\n        self._conditionally_add_to_past_detections(detection)\n\n        self.last_detection = detection\n        self.hit_counter = min(self.hit_counter + 2 * period, self.hit_counter_max)\n\n        if self.is_initializing and self.hit_counter > self.initialization_delay:\n            self.is_initializing = False\n            self._acquire_ids()\n\n        # We use a kalman filter in which we consider each coordinate on each point as a sensor.\n        # This is a hacky way to update only certain sensors (only x, y coordinates for\n        # points which were detected).\n        # TODO: Use keypoint confidence information to change R on each sensor instead?\n        if detection.scores is not None:\n            assert len(detection.scores.shape) == 1\n            points_over_threshold_mask = detection.scores > self.detection_threshold\n            matched_sensors_mask = np.array(\n                [(m,) * self.dim_points for m in points_over_threshold_mask]\n            ).flatten()\n            H_pos = np.diag(matched_sensors_mask).astype(\n                float\n            )  # We measure x, y positions\n            self.point_hit_counter[points_over_threshold_mask] += 2 * period\n        else:\n            points_over_threshold_mask = np.array([True] * self.num_points)\n            H_pos = np.identity(self.num_points * self.dim_points)\n            self.point_hit_counter += 2 * period\n        self.point_hit_counter[\n            self.point_hit_counter >= self.pointwise_hit_counter_max\n        ] = self.pointwise_hit_counter_max\n        self.point_hit_counter[self.point_hit_counter < 0] = 0\n        H_vel = np.zeros(H_pos.shape)  # But we don't directly measure velocity\n        H = np.hstack([H_pos, H_vel])\n        self.filter.update(\n            np.expand_dims(detection.absolute_points.flatten(), 0).T, None, H\n        )\n\n        detected_at_least_once_mask = np.array(\n            [(m,) * self.dim_points for m in self.detected_at_least_once_points]\n        ).flatten()\n        now_detected_mask = np.hstack(\n            (points_over_threshold_mask,) * self.dim_points\n        ).flatten()\n        first_detection_mask = np.logical_and(\n            now_detected_mask, np.logical_not(detected_at_least_once_mask)\n        )\n\n        self.filter.x[: self.dim_z][first_detection_mask] = np.expand_dims(\n            detection.absolute_points.flatten(), 0\n        ).T[first_detection_mask]\n\n        # Force points being detected for the first time to have velocity = 0\n        # This is needed because some detectors (like OpenPose) set points with\n        # low confidence to coordinates (0, 0). And when they then get their first\n        # real detection this creates a huge velocity vector in our KalmanFilter\n        # and causes the tracker to start with wildly inaccurate estimations which\n        # eventually coverge to the real detections.\n        self.filter.x[self.dim_z :][np.logical_not(detected_at_least_once_mask)] = 0\n        self.detected_at_least_once_points = np.logical_or(\n            self.detected_at_least_once_points, points_over_threshold_mask\n        )\n\n    def __repr__(self):\n        if self.last_distance is None:\n            placeholder_text = \"\\033[1mObject_{}\\033[0m(age: {}, hit_counter: {}, last_distance: {}, init_id: {})\"\n        else:\n            placeholder_text = \"\\033[1mObject_{}\\033[0m(age: {}, hit_counter: {}, last_distance: {:.2f}, init_id: {})\"\n        return placeholder_text.format(\n            self.id,\n            self.age,\n            self.hit_counter,\n            self.last_distance,\n            self.initializing_id,\n        )\n\n    def _conditionally_add_to_past_detections(self, detection):\n        \"\"\"Adds detections into (and pops detections away) from `past_detections`\n\n        It does so by keeping a fixed amount of past detections saved into each\n        TrackedObject, while maintaining them distributed uniformly through the object's\n        lifetime.\n        \"\"\"\n        if self.past_detections_length == 0:\n            return\n        if len(self.past_detections) < self.past_detections_length:\n            detection.age = self.age\n            self.past_detections.append(detection)\n        elif self.age >= self.past_detections[0].age * self.past_detections_length:\n            self.past_detections.pop(0)\n            detection.age = self.age\n            self.past_detections.append(detection)\n\n    def merge(self, tracked_object):\n        \"\"\"Merge with a not yet initialized TrackedObject instance\"\"\"\n        self.reid_hit_counter = None\n        self.hit_counter = self.initial_period * 2\n        self.point_hit_counter = tracked_object.point_hit_counter\n        self.last_distance = tracked_object.last_distance\n        self.current_min_distance = tracked_object.current_min_distance\n        self.last_detection = tracked_object.last_detection\n        self.detected_at_least_once_points = (\n            tracked_object.detected_at_least_once_points\n        )\n        self.filter = tracked_object.filter\n\n        for past_detection in tracked_object.past_detections:\n            self._conditionally_add_to_past_detections(past_detection)\n\n    def update_coordinate_transformation(\n        self, coordinate_transformation: CoordinatesTransformation\n    ):\n        if coordinate_transformation is not None:\n            self.abs_to_rel = coordinate_transformation.abs_to_rel\n\n    def _acquire_ids(self):\n        self.id, self.global_id = self._obj_factory.get_ids()\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.estimate_velocity","title":"estimate_velocity: np.ndarray property","text":"

    Get the velocity estimate of the object from the Kalman filter. This velocity is in the absolute coordinate system.

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the velocity estimate of the object on each axis.

    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.estimate","title":"estimate: np.ndarray property","text":"

    Get the position estimate of the object from the Kalman filter.

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.

    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.get_estimate","title":"get_estimate(absolute=False)","text":"

    Get the position estimate of the object from the Kalman filter in an absolute or relative format.

    Parameters:

    Name Type Description Default absolute bool

    If true the coordinates are returned in absolute format, by default False, by default False.

    False

    Returns:

    Type Description ndarray

    An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.

    Raises:

    Type Description ValueError

    Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.

    Source code in norfair/tracker.py
    def get_estimate(self, absolute=False) -> np.ndarray:\n    \"\"\"Get the position estimate of the object from the Kalman filter in an absolute or relative format.\n\n    Parameters\n    ----------\n    absolute : bool, optional\n        If true the coordinates are returned in absolute format, by default False, by default False.\n\n    Returns\n    -------\n    np.ndarray\n        An array of shape (self.num_points, self.dim_points) containing the position estimate of the object on each axis.\n\n    Raises\n    ------\n    ValueError\n        Alert if the coordinates are requested in absolute format but the tracker has no coordinate transformation.\n    \"\"\"\n    positions = self.filter.x.T.flatten()[: self.dim_z].reshape(-1, self.dim_points)\n    if self.abs_to_rel is None:\n        if not absolute:\n            return positions\n        else:\n            raise ValueError(\n                \"You must provide 'coord_transformations' to the tracker to get absolute coordinates\"\n            )\n    else:\n        if absolute:\n            return positions\n        else:\n            return self.abs_to_rel(positions)\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.hit","title":"hit(detection, period=1)","text":"

    Update tracked object with a new detection

    Parameters:

    Name Type Description Default detection Detection

    the new detection matched to this tracked object

    required period int

    frames corresponding to the period of time since last update.

    1 Source code in norfair/tracker.py
    def hit(self, detection: \"Detection\", period: int = 1):\n    \"\"\"Update tracked object with a new detection\n\n    Parameters\n    ----------\n    detection : Detection\n        the new detection matched to this tracked object\n    period : int, optional\n        frames corresponding to the period of time since last update.\n    \"\"\"\n    self._conditionally_add_to_past_detections(detection)\n\n    self.last_detection = detection\n    self.hit_counter = min(self.hit_counter + 2 * period, self.hit_counter_max)\n\n    if self.is_initializing and self.hit_counter > self.initialization_delay:\n        self.is_initializing = False\n        self._acquire_ids()\n\n    # We use a kalman filter in which we consider each coordinate on each point as a sensor.\n    # This is a hacky way to update only certain sensors (only x, y coordinates for\n    # points which were detected).\n    # TODO: Use keypoint confidence information to change R on each sensor instead?\n    if detection.scores is not None:\n        assert len(detection.scores.shape) == 1\n        points_over_threshold_mask = detection.scores > self.detection_threshold\n        matched_sensors_mask = np.array(\n            [(m,) * self.dim_points for m in points_over_threshold_mask]\n        ).flatten()\n        H_pos = np.diag(matched_sensors_mask).astype(\n            float\n        )  # We measure x, y positions\n        self.point_hit_counter[points_over_threshold_mask] += 2 * period\n    else:\n        points_over_threshold_mask = np.array([True] * self.num_points)\n        H_pos = np.identity(self.num_points * self.dim_points)\n        self.point_hit_counter += 2 * period\n    self.point_hit_counter[\n        self.point_hit_counter >= self.pointwise_hit_counter_max\n    ] = self.pointwise_hit_counter_max\n    self.point_hit_counter[self.point_hit_counter < 0] = 0\n    H_vel = np.zeros(H_pos.shape)  # But we don't directly measure velocity\n    H = np.hstack([H_pos, H_vel])\n    self.filter.update(\n        np.expand_dims(detection.absolute_points.flatten(), 0).T, None, H\n    )\n\n    detected_at_least_once_mask = np.array(\n        [(m,) * self.dim_points for m in self.detected_at_least_once_points]\n    ).flatten()\n    now_detected_mask = np.hstack(\n        (points_over_threshold_mask,) * self.dim_points\n    ).flatten()\n    first_detection_mask = np.logical_and(\n        now_detected_mask, np.logical_not(detected_at_least_once_mask)\n    )\n\n    self.filter.x[: self.dim_z][first_detection_mask] = np.expand_dims(\n        detection.absolute_points.flatten(), 0\n    ).T[first_detection_mask]\n\n    # Force points being detected for the first time to have velocity = 0\n    # This is needed because some detectors (like OpenPose) set points with\n    # low confidence to coordinates (0, 0). And when they then get their first\n    # real detection this creates a huge velocity vector in our KalmanFilter\n    # and causes the tracker to start with wildly inaccurate estimations which\n    # eventually coverge to the real detections.\n    self.filter.x[self.dim_z :][np.logical_not(detected_at_least_once_mask)] = 0\n    self.detected_at_least_once_points = np.logical_or(\n        self.detected_at_least_once_points, points_over_threshold_mask\n    )\n
    "},{"location":"reference/tracker/#norfair.tracker.TrackedObject.merge","title":"merge(tracked_object)","text":"

    Merge with a not yet initialized TrackedObject instance

    Source code in norfair/tracker.py
    def merge(self, tracked_object):\n    \"\"\"Merge with a not yet initialized TrackedObject instance\"\"\"\n    self.reid_hit_counter = None\n    self.hit_counter = self.initial_period * 2\n    self.point_hit_counter = tracked_object.point_hit_counter\n    self.last_distance = tracked_object.last_distance\n    self.current_min_distance = tracked_object.current_min_distance\n    self.last_detection = tracked_object.last_detection\n    self.detected_at_least_once_points = (\n        tracked_object.detected_at_least_once_points\n    )\n    self.filter = tracked_object.filter\n\n    for past_detection in tracked_object.past_detections:\n        self._conditionally_add_to_past_detections(past_detection)\n
    "},{"location":"reference/tracker/#norfair.tracker.Detection","title":"Detection","text":"

    Detections returned by the detector must be converted to a Detection object before being used by Norfair.

    Parameters:

    Name Type Description Default points ndarray

    Points detected. Must be a rank 2 array with shape (n_points, n_dimensions) where n_dimensions is 2 or 3.

    required scores ndarray

    An array of length n_points which assigns a score to each of the points defined in points.

    This is used to inform the tracker of which points to ignore; any point with a score below detection_threshold will be ignored.

    This useful for cases in which detections don't always have every point present, as is often the case in pose estimators.

    None data Any

    The place to store any extra data which may be useful when calculating the distance function. Anything stored here will be available to use inside the distance function.

    This enables the development of more interesting trackers which can do things like assign an appearance embedding to each detection to aid in its tracking.

    None label Hashable

    When working with multiple classes the detection's label can be stored to be used as a matching condition when associating tracked objects with new detections. Label's type must be hashable for drawing purposes.

    None embedding Any

    The embedding for the reid_distance.

    None Source code in norfair/tracker.py
    class Detection:\n    \"\"\"Detections returned by the detector must be converted to a `Detection` object before being used by Norfair.\n\n    Parameters\n    ----------\n    points : np.ndarray\n        Points detected. Must be a rank 2 array with shape `(n_points, n_dimensions)` where n_dimensions is 2 or 3.\n    scores : np.ndarray, optional\n        An array of length `n_points` which assigns a score to each of the points defined in `points`.\n\n        This is used to inform the tracker of which points to ignore;\n        any point with a score below `detection_threshold` will be ignored.\n\n        This useful for cases in which detections don't always have every point present, as is often the case in pose estimators.\n    data : Any, optional\n        The place to store any extra data which may be useful when calculating the distance function.\n        Anything stored here will be available to use inside the distance function.\n\n        This enables the development of more interesting trackers which can do things like assign an appearance embedding to each\n        detection to aid in its tracking.\n    label : Hashable, optional\n        When working with multiple classes the detection's label can be stored to be used as a matching condition when associating\n        tracked objects with new detections. Label's type must be hashable for drawing purposes.\n    embedding : Any, optional\n        The embedding for the reid_distance.\n    \"\"\"\n\n    def __init__(\n        self,\n        points: np.ndarray,\n        scores: np.ndarray = None,\n        data: Any = None,\n        label: Hashable = None,\n        embedding=None,\n    ):\n        self.points = validate_points(points)\n        self.scores = scores\n        self.data = data\n        self.label = label\n        self.absolute_points = self.points.copy()\n        self.embedding = embedding\n        self.age = None\n\n    def update_coordinate_transformation(\n        self, coordinate_transformation: CoordinatesTransformation\n    ):\n        if coordinate_transformation is not None:\n            self.absolute_points = coordinate_transformation.rel_to_abs(\n                self.absolute_points\n            )\n
    "},{"location":"reference/utils/","title":"Utils","text":""},{"location":"reference/utils/#norfair.utils.print_objects_as_table","title":"print_objects_as_table(tracked_objects)","text":"

    Used for helping in debugging

    Source code in norfair/utils.py
    def print_objects_as_table(tracked_objects: Sequence):\n    \"\"\"Used for helping in debugging\"\"\"\n    print()\n    console = Console()\n    table = Table(show_header=True, header_style=\"bold magenta\")\n    table.add_column(\"Id\", style=\"yellow\", justify=\"center\")\n    table.add_column(\"Age\", justify=\"right\")\n    table.add_column(\"Hit Counter\", justify=\"right\")\n    table.add_column(\"Last distance\", justify=\"right\")\n    table.add_column(\"Init Id\", justify=\"center\")\n    for obj in tracked_objects:\n        table.add_row(\n            str(obj.id),\n            str(obj.age),\n            str(obj.hit_counter),\n            f\"{obj.last_distance:.4f}\",\n            str(obj.initializing_id),\n        )\n    console.print(table)\n
    "},{"location":"reference/utils/#norfair.utils.get_cutout","title":"get_cutout(points, image)","text":"

    Returns a rectangular cut-out from a set of points on an image

    Source code in norfair/utils.py
    def get_cutout(points, image):\n    \"\"\"Returns a rectangular cut-out from a set of points on an image\"\"\"\n    max_x = int(max(points[:, 0]))\n    min_x = int(min(points[:, 0]))\n    max_y = int(max(points[:, 1]))\n    min_y = int(min(points[:, 1]))\n    return image[min_y:max_y, min_x:max_x]\n
    "},{"location":"reference/utils/#norfair.utils.warn_once","title":"warn_once(message) cached","text":"

    Write a warning message only once.

    Source code in norfair/utils.py
    @lru_cache(maxsize=None)\ndef warn_once(message):\n    \"\"\"\n    Write a warning message only once.\n    \"\"\"\n    warn(message)\n
    "},{"location":"reference/video/","title":"Video","text":""},{"location":"reference/video/#norfair.video.Video","title":"Video","text":"

    Class that provides a simple and pythonic way to interact with video.

    It returns regular OpenCV frames which enables the usage of the huge number of tools OpenCV provides to modify images.

    Parameters:

    Name Type Description Default camera Optional[int]

    An integer representing the device id of the camera to be used as the video source.

    Webcams tend to have an id of 0. Arguments camera and input_path can't be used at the same time, one must be chosen.

    None input_path Optional[str]

    A string consisting of the path to the video file to be used as the video source.

    Arguments camera and input_path can't be used at the same time, one must be chosen.

    None output_path str

    The path to the output video to be generated. Can be a folder were the file will be created or a full path with a file name.

    '.' output_fps Optional[float]

    The frames per second at which to encode the output video file.

    If not provided it is set to be equal to the input video source's fps. This argument is useful when using live video cameras as a video source, where the user may know the input fps, but where the frames are being fed to the output video at a rate that is lower than the video source's fps, due to the latency added by the detector.

    None label str

    Label to add to the progress bar that appears when processing the current video.

    '' output_fourcc Optional[str]

    OpenCV encoding for output video file. By default we use mp4v for .mp4 and XVID for .avi. This is a combination that works on most systems but it results in larger files. To get smaller files use avc1 or H264 if available. Notice that some fourcc are not compatible with some extensions.

    None output_extension str

    File extension used for the output video. Ignored if output_path is not a folder.

    'mp4'

    Examples:

    >>> video = Video(input_path=\"video.mp4\")\n>>> for frame in video:\n>>>     # << Your modifications to the frame would go here >>\n>>>     video.write(frame)\n
    Source code in norfair/video.py
    class Video:\n    \"\"\"\n    Class that provides a simple and pythonic way to interact with video.\n\n    It returns regular OpenCV frames which enables the usage of the huge number of tools OpenCV provides to modify images.\n\n    Parameters\n    ----------\n    camera : Optional[int], optional\n        An integer representing the device id of the camera to be used as the video source.\n\n        Webcams tend to have an id of `0`. Arguments `camera` and `input_path` can't be used at the same time, one must be chosen.\n    input_path : Optional[str], optional\n        A string consisting of the path to the video file to be used as the video source.\n\n        Arguments `camera` and `input_path` can't be used at the same time, one must be chosen.\n    output_path : str, optional\n        The path to the output video to be generated.\n        Can be a folder were the file will be created or a full path with a file name.\n    output_fps : Optional[float], optional\n        The frames per second at which to encode the output video file.\n\n        If not provided it is set to be equal to the input video source's fps.\n        This argument is useful when using live video cameras as a video source,\n        where the user may know the input fps,\n        but where the frames are being fed to the output video at a rate that is lower than the video source's fps,\n        due to the latency added by the detector.\n    label : str, optional\n        Label to add to the progress bar that appears when processing the current video.\n    output_fourcc : Optional[str], optional\n        OpenCV encoding for output video file.\n        By default we use `mp4v` for `.mp4` and `XVID` for `.avi`. This is a combination that works on most systems but\n        it results in larger files. To get smaller files use `avc1` or `H264` if available.\n        Notice that some fourcc are not compatible with some extensions.\n    output_extension : str, optional\n        File extension used for the output video. Ignored if `output_path` is not a folder.\n\n    Examples\n    --------\n    >>> video = Video(input_path=\"video.mp4\")\n    >>> for frame in video:\n    >>>     # << Your modifications to the frame would go here >>\n    >>>     video.write(frame)\n    \"\"\"\n\n    def __init__(\n        self,\n        camera: Optional[int] = None,\n        input_path: Optional[str] = None,\n        output_path: str = \".\",\n        output_fps: Optional[float] = None,\n        label: str = \"\",\n        output_fourcc: Optional[str] = None,\n        output_extension: str = \"mp4\",\n    ):\n        self.camera = camera\n        self.input_path = input_path\n        self.output_path = output_path\n        self.label = label\n        self.output_fourcc = output_fourcc\n        self.output_extension = output_extension\n        self.output_video: Optional[cv2.VideoWriter] = None\n\n        # Input validation\n        if (input_path is None and camera is None) or (\n            input_path is not None and camera is not None\n        ):\n            raise ValueError(\n                \"You must set either 'camera' or 'input_path' arguments when setting 'Video' class\"\n            )\n        if camera is not None and type(camera) is not int:\n            raise ValueError(\n                \"Argument 'camera' refers to the device-id of your camera, and must be an int. Setting it to 0 usually works if you don't know the id.\"\n            )\n\n        # Read Input Video\n        if self.input_path is not None:\n            if \"~\" in self.input_path:\n                self.input_path = os.path.expanduser(self.input_path)\n            if not os.path.isfile(self.input_path):\n                self._fail(\n                    f\"[bold red]Error:[/bold red] File '{self.input_path}' does not exist.\"\n                )\n            self.video_capture = cv2.VideoCapture(self.input_path)\n            total_frames = int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n            if total_frames == 0:\n                self._fail(\n                    f\"[bold red]Error:[/bold red] '{self.input_path}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly.\"\n                )\n            description = os.path.basename(self.input_path)\n        else:\n            self.video_capture = cv2.VideoCapture(self.camera)\n            total_frames = 0\n            description = f\"Camera({self.camera})\"\n        self.output_fps = (\n            output_fps\n            if output_fps is not None\n            else self.video_capture.get(cv2.CAP_PROP_FPS)\n        )\n        self.input_height = self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)\n        self.input_width = self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)\n        self.frame_counter = 0\n\n        # Setup progressbar\n        if self.label:\n            description += f\" | {self.label}\"\n        progress_bar_fields: List[Union[str, ProgressColumn]] = [\n            \"[progress.description]{task.description}\",\n            BarColumn(),\n            \"[yellow]{task.fields[process_fps]:.2f}fps[/yellow]\",\n        ]\n        if self.input_path is not None:\n            progress_bar_fields.insert(\n                2, \"[progress.percentage]{task.percentage:>3.0f}%\"\n            )\n            progress_bar_fields.insert(\n                3,\n                TimeRemainingColumn(),\n            )\n        self.progress_bar = Progress(\n            *progress_bar_fields,\n            auto_refresh=False,\n            redirect_stdout=False,\n            redirect_stderr=False,\n        )\n        self.task = self.progress_bar.add_task(\n            self.abbreviate_description(description),\n            total=total_frames,\n            start=self.input_path is not None,\n            process_fps=0,\n        )\n\n    # This is a generator, note the yield keyword below.\n    def __iter__(self):\n        with self.progress_bar as progress_bar:\n            start = time.time()\n\n            # Iterate over video\n            while True:\n                self.frame_counter += 1\n                ret, frame = self.video_capture.read()\n                if ret is False or frame is None:\n                    break\n                process_fps = self.frame_counter / (time.time() - start)\n                progress_bar.update(\n                    self.task, advance=1, refresh=True, process_fps=process_fps\n                )\n                yield frame\n\n        # Cleanup\n        if self.output_video is not None:\n            self.output_video.release()\n            print(\n                f\"[white]Output video file saved to: {self.get_output_file_path()}[/white]\"\n            )\n        self.video_capture.release()\n        cv2.destroyAllWindows()\n\n    def _fail(self, msg: str):\n        raise RuntimeError(msg)\n\n    def write(self, frame: np.ndarray) -> int:\n        \"\"\"\n        Write one frame to the output video.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to write to file.\n\n        Returns\n        -------\n        int\n            _description_\n        \"\"\"\n        if self.output_video is None:\n            # The user may need to access the output file path on their code\n            output_file_path = self.get_output_file_path()\n            fourcc = cv2.VideoWriter_fourcc(*self.get_codec_fourcc(output_file_path))\n            # Set on first frame write in case the user resizes the frame in some way\n            output_size = (\n                frame.shape[1],\n                frame.shape[0],\n            )  # OpenCV format is (width, height)\n            self.output_video = cv2.VideoWriter(\n                output_file_path,\n                fourcc,\n                self.output_fps,\n                output_size,\n            )\n\n        self.output_video.write(frame)\n        return cv2.waitKey(1)\n\n    def show(self, frame: np.ndarray, downsample_ratio: float = 1.0) -> int:\n        \"\"\"\n        Display a frame through a GUI. Usually used inside a video inference loop to show the output video.\n\n        Parameters\n        ----------\n        frame : np.ndarray\n            The OpenCV frame to be displayed.\n        downsample_ratio : float, optional\n            How much to downsample the frame being show.\n\n            Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.\n\n        Returns\n        -------\n        int\n            _description_\n        \"\"\"\n        # Resize to lower resolution for faster streaming over slow connections\n        if downsample_ratio != 1.0:\n            frame = cv2.resize(\n                frame,\n                (\n                    frame.shape[1] // downsample_ratio,\n                    frame.shape[0] // downsample_ratio,\n                ),\n            )\n        cv2.imshow(\"Output\", frame)\n        return cv2.waitKey(1)\n\n    def get_output_file_path(self) -> str:\n        \"\"\"\n        Calculate the output path being used in case you are writing your frames to a video file.\n\n        Useful if you didn't set `output_path`, and want to know what the autogenerated output file path by Norfair will be.\n\n        Returns\n        -------\n        str\n            The path to the file.\n        \"\"\"\n        if not os.path.isdir(self.output_path):\n            return self.output_path\n\n        if self.input_path is not None:\n            file_name = self.input_path.split(\"/\")[-1].split(\".\")[0]\n        else:\n            file_name = \"camera_{self.camera}\"\n        file_name = f\"{file_name}_out.{self.output_extension}\"\n\n        return os.path.join(self.output_path, file_name)\n\n    def get_codec_fourcc(self, filename: str) -> Optional[str]:\n        if self.output_fourcc is not None:\n            return self.output_fourcc\n\n        # Default codecs for each extension\n        extension = filename[-3:].lower()\n        if \"avi\" == extension:\n            return \"XVID\"\n        elif \"mp4\" == extension:\n            return \"mp4v\"  # When available, \"avc1\" is better\n        else:\n            self._fail(\n                f\"[bold red]Could not determine video codec for the provided output filename[/bold red]: \"\n                f\"[yellow]{filename}[/yellow]\\n\"\n                f\"Please use '.mp4', '.avi', or provide a custom OpenCV fourcc codec name.\"\n            )\n            return (\n                None  # Had to add this return to make mypya happy. I don't like this.\n            )\n\n    def abbreviate_description(self, description: str) -> str:\n        \"\"\"Conditionally abbreviate description so that progress bar fits in small terminals\"\"\"\n        terminal_columns, _ = get_terminal_size()\n        space_for_description = (\n            int(terminal_columns) - 25\n        )  # Leave 25 space for progressbar\n        if len(description) < space_for_description:\n            return description\n        else:\n            return \"{} ... {}\".format(\n                description[: space_for_description // 2 - 3],\n                description[-space_for_description // 2 + 3 :],\n            )\n
    "},{"location":"reference/video/#norfair.video.Video.write","title":"write(frame)","text":"

    Write one frame to the output video.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to write to file.

    required

    Returns:

    Type Description int

    description

    Source code in norfair/video.py
    def write(self, frame: np.ndarray) -> int:\n    \"\"\"\n    Write one frame to the output video.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to write to file.\n\n    Returns\n    -------\n    int\n        _description_\n    \"\"\"\n    if self.output_video is None:\n        # The user may need to access the output file path on their code\n        output_file_path = self.get_output_file_path()\n        fourcc = cv2.VideoWriter_fourcc(*self.get_codec_fourcc(output_file_path))\n        # Set on first frame write in case the user resizes the frame in some way\n        output_size = (\n            frame.shape[1],\n            frame.shape[0],\n        )  # OpenCV format is (width, height)\n        self.output_video = cv2.VideoWriter(\n            output_file_path,\n            fourcc,\n            self.output_fps,\n            output_size,\n        )\n\n    self.output_video.write(frame)\n    return cv2.waitKey(1)\n
    "},{"location":"reference/video/#norfair.video.Video.show","title":"show(frame, downsample_ratio=1.0)","text":"

    Display a frame through a GUI. Usually used inside a video inference loop to show the output video.

    Parameters:

    Name Type Description Default frame ndarray

    The OpenCV frame to be displayed.

    required downsample_ratio float

    How much to downsample the frame being show.

    Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.

    1.0

    Returns:

    Type Description int

    description

    Source code in norfair/video.py
    def show(self, frame: np.ndarray, downsample_ratio: float = 1.0) -> int:\n    \"\"\"\n    Display a frame through a GUI. Usually used inside a video inference loop to show the output video.\n\n    Parameters\n    ----------\n    frame : np.ndarray\n        The OpenCV frame to be displayed.\n    downsample_ratio : float, optional\n        How much to downsample the frame being show.\n\n        Useful when streaming the GUI video display through a slow internet connection using something like X11 forwarding on an ssh connection.\n\n    Returns\n    -------\n    int\n        _description_\n    \"\"\"\n    # Resize to lower resolution for faster streaming over slow connections\n    if downsample_ratio != 1.0:\n        frame = cv2.resize(\n            frame,\n            (\n                frame.shape[1] // downsample_ratio,\n                frame.shape[0] // downsample_ratio,\n            ),\n        )\n    cv2.imshow(\"Output\", frame)\n    return cv2.waitKey(1)\n
    "},{"location":"reference/video/#norfair.video.Video.get_output_file_path","title":"get_output_file_path()","text":"

    Calculate the output path being used in case you are writing your frames to a video file.

    Useful if you didn't set output_path, and want to know what the autogenerated output file path by Norfair will be.

    Returns:

    Type Description str

    The path to the file.

    Source code in norfair/video.py
    def get_output_file_path(self) -> str:\n    \"\"\"\n    Calculate the output path being used in case you are writing your frames to a video file.\n\n    Useful if you didn't set `output_path`, and want to know what the autogenerated output file path by Norfair will be.\n\n    Returns\n    -------\n    str\n        The path to the file.\n    \"\"\"\n    if not os.path.isdir(self.output_path):\n        return self.output_path\n\n    if self.input_path is not None:\n        file_name = self.input_path.split(\"/\")[-1].split(\".\")[0]\n    else:\n        file_name = \"camera_{self.camera}\"\n    file_name = f\"{file_name}_out.{self.output_extension}\"\n\n    return os.path.join(self.output_path, file_name)\n
    "},{"location":"reference/video/#norfair.video.Video.abbreviate_description","title":"abbreviate_description(description)","text":"

    Conditionally abbreviate description so that progress bar fits in small terminals

    Source code in norfair/video.py
    def abbreviate_description(self, description: str) -> str:\n    \"\"\"Conditionally abbreviate description so that progress bar fits in small terminals\"\"\"\n    terminal_columns, _ = get_terminal_size()\n    space_for_description = (\n        int(terminal_columns) - 25\n    )  # Leave 25 space for progressbar\n    if len(description) < space_for_description:\n        return description\n    else:\n        return \"{} ... {}\".format(\n            description[: space_for_description // 2 - 3],\n            description[-space_for_description // 2 + 3 :],\n        )\n
    "}]} \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 9ac9bc0827b45b0801175727fcc28334557bb4fa..ccc8366bd19087e379bb078f5814251014d97095 100644 GIT binary patch delta 14 Vcmb=g=aBE_;P@%CG;JbBIRGP|1n~d> delta 14 Vcmb=g=aBE_;Bd6wnL3f98~`7(1Zw~Q