Skip to content

Commit

Permalink
Convert directory fbcode/kats to use the Ruff Formatter
Browse files Browse the repository at this point in the history
Summary:
Converts the directory specified to use the Ruff formatter in pyfmt

ruff_dog

If this diff causes merge conflicts when rebasing, please run
`hg status -n -0 --change . -I '**/*.{py,pyi}' | xargs -0 arc pyfmt`
on your diff, and amend any changes before rebasing onto latest.
That should help reduce or eliminate any merge conflicts.

allow-large-files

Reviewed By: amyreese

Differential Revision: D66250543

fbshipit-source-id: 1b9ae94cb20f7a1b66468ec76dcf9a1a6735ad89
  • Loading branch information
Thomas Polasek authored and facebook-github-bot committed Nov 20, 2024
1 parent e595c8d commit 61f0651
Show file tree
Hide file tree
Showing 53 changed files with 105 additions and 183 deletions.
2 changes: 0 additions & 2 deletions kats/detectors/bocpd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1201,7 +1201,6 @@ class _NormalKnownPrec(_PredictiveModel):
_data_shape: Union[int, Tuple[int, int]]

def __init__(self, data: TimeSeriesData, parameters: NormalKnownParameters) -> None:

# \mu \sim N(\mu0, \frac{1}{\lambda0})
# x \sim N(\mu,\frac{1}{\lambda})

Expand Down Expand Up @@ -1560,7 +1559,6 @@ def _sample_bayesian_linreg(
b_n: float,
num_samples: int,
) -> Tuple[npt.NDArray, npt.NDArray]:

# this is to make sure the results are consistent
# and tests don't break randomly
seed_value = 100
Expand Down
34 changes: 17 additions & 17 deletions kats/detectors/cusum_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -780,7 +780,6 @@ def detector(self, **kwargs: Any) -> List[CUSUMChangePoint]:
# multivariate detection. We keep using change_direction = "increase"
# here to be consistent with the univariate detector.
for change_direction in ["increase"]:

change_meta = self._get_change_point(
ts,
max_iter=max_iter,
Expand Down Expand Up @@ -816,23 +815,25 @@ def _get_llr(
sigma0: Optional[float],
sigma1: Optional[float],
) -> float:

mu_tilde = np.mean(ts, axis=0)
sigma_pooled = np.cov(ts, rowvar=False)
llr = -2 * (
self._log_llr_multi(
ts[: (changepoint + 1)],
mu_tilde,
sigma_pooled,
mu0,
sigma0, # pyre-fixme
)
- self._log_llr_multi(
ts[(changepoint + 1) :],
mu_tilde,
sigma_pooled,
mu1,
sigma1, # pyre-fixme
llr = (
-2
* (
self._log_llr_multi(
ts[: (changepoint + 1)],
mu_tilde,
sigma_pooled,
mu0,
sigma0, # pyre-fixme
)
- self._log_llr_multi(
ts[(changepoint + 1) :],
mu_tilde,
sigma_pooled,
mu1,
sigma1, # pyre-fixme
)
)
)
return llr
Expand Down Expand Up @@ -868,7 +869,6 @@ def _get_change_point(
start_point: int,
change_direction: str = "increase",
) -> CUSUMChangePointVal:

# locate the change point using cusum method
changepoint_func = np.argmin
n = 0
Expand Down
1 change: 0 additions & 1 deletion kats/detectors/cusum_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -861,7 +861,6 @@ def fit_predict(
change_tsd.extend(change_tsd_vec, validate=False)

else:

for start_time in pd.date_range(
anomaly_start_time,
min(
Expand Down
3 changes: 1 addition & 2 deletions kats/detectors/detector_consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,6 @@ class ConfidenceBand:


class AnomalyResponse:

key_mapping: List[str]
num_series: int

Expand Down Expand Up @@ -625,7 +624,7 @@ def inplace_update(
self._inplace_update_ts(self.scores, time, score)
cb = self.confidence_band
if cb is not None:
self._inplace_update_ts(cb.lower, time, ci_lower),
(self._inplace_update_ts(cb.lower, time, ci_lower),)
self._inplace_update_ts(cb.upper, time, ci_upper)

if self.predicted_ts is not None:
Expand Down
1 change: 0 additions & 1 deletion kats/detectors/distribution_distance_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ def _percentile_to_prob(

# if all values in l2 are lager than l1
if n_idx_at2 == 0:

# use distance from l2[0] to l2[j] as base
j = 1
while j < len(l2_merge) and l2_merge[j] == l2_merge[0]:
Expand Down
2 changes: 0 additions & 2 deletions kats/detectors/dtwcpd.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,6 @@ def __init__(
min_value: float = 1e-9, # Controls when a value is considered to be zero
match_against_same_time_series: bool = False, # Whether to allow historical matches in the same time series.
) -> None:

self.data = data
self.sliding_window_size = sliding_window_size
self.MIN_TS_VALUE = min_value
Expand Down Expand Up @@ -263,7 +262,6 @@ def LB_Keogh(s1: List[float], s2: List[float], w: int) -> float:
# TODO: vectorize
LB_sum = 0.0
for ind, i in enumerate(s1):

# Rolling min/max
lower_bound = min(s2[(ind - w if ind - w >= 0 else 0) : (ind + w)])
upper_bound = max(s2[(ind - w if ind - w >= 0 else 0) : (ind + w)])
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ def metadata_detect_preprocessor(
rawdata: pd.DataFrame,
params_to_scale_down: Set[str] = PARAMS_TO_SCALE_DOWN,
) -> List[Dict[str, Any]]:

rawdata["features"] = rawdata["features"].map(change_str_to_dict)
rawdata["features"] = rawdata["features"].map(change_dtype)
rawdata["hpt_res"] = rawdata["hpt_res"].map(change_str_to_dict)
Expand Down
1 change: 1 addition & 0 deletions kats/detectors/multivariate_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"""
This module implements the multivariate Outlier Detection algorithm as a Detector Model.
"""

import json
from typing import Any, Optional

Expand Down
1 change: 1 addition & 0 deletions kats/detectors/outlier_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
"""
This module implements the univariate Outlier Detection algorithm as a Detector Model.
"""

import json
from typing import Any, Optional

Expand Down
10 changes: 4 additions & 6 deletions kats/detectors/prophet_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,6 @@ class SilentStdoutStderr(object):
stdout, stderr = sys.__stdout__.fileno(), sys.__stderr__.fileno() # type: ignore

def __enter__(self) -> None:

# pyre-fixme typing # type: ignore
self.devnull = os.open(os.devnull, os.O_RDWR)
# pyre-fixme typing
Expand Down Expand Up @@ -206,9 +205,8 @@ def seasonalities_to_dict(
List[SeasonalityTypes],
List[str],
Dict[SeasonalityTypes, Union[bool, str]],
]
],
) -> Dict[SeasonalityTypes, Union[bool, str]]:

if isinstance(seasonalities, SeasonalityTypes):
seasonalities = {seasonalities: True}
elif isinstance(seasonalities, list):
Expand Down Expand Up @@ -599,9 +597,9 @@ def predict(
)
if holidays_df is not None:
scores_ts = pd.Series(list(scores.value), index=data.time)
scores_ts.loc[
scores_ts.index.floor("d").isin(holidays_df)
] *= self.holiday_multiplier
scores_ts.loc[scores_ts.index.floor("d").isin(holidays_df)] *= (
self.holiday_multiplier
)
scores = TimeSeriesData(
time=pd.Series(scores_ts.index), value=scores_ts
)
Expand Down
1 change: 1 addition & 0 deletions kats/detectors/residual_translation.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
asymmetric). This module “learns” the distribution of the residual (using kernel
density estimation), and outputs a false-alarm probability based on it.
"""

from __future__ import annotations

from typing import Optional
Expand Down
3 changes: 0 additions & 3 deletions kats/detectors/stat_sig_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ def __init__(
anomaly_scores_only: bool = False,
min_perc_change: float = 0.0,
) -> None:

if serialized_model:
model_dict = json.loads(serialized_model)
self.n_test: int = model_dict["n_test"]
Expand Down Expand Up @@ -999,7 +998,6 @@ def __init__(
use_corrected_scores: bool = False,
min_perc_change: float = 0.0,
) -> None:

StatSigDetectorModel.__init__(
self,
n_control=n_control,
Expand Down Expand Up @@ -1180,7 +1178,6 @@ def fit_predict(
return self.response.get_last_n(self.last_N)

def _init_response(self, data: TimeSeriesData) -> None:

zeros_df = pd.DataFrame(
{
**{"time": data.time},
Expand Down
38 changes: 20 additions & 18 deletions kats/models/bayesian_var.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
# pyre-strict

"""
Bayesian estimation of Vector Autoregressive Model using
Minnesota prior on the coefficient matrix. This version is
useful for regularization when they are too many coefficients
to be estimated.
Implementation inspired by the following two articles/papers:
https://www.mathworks.com/help/econ/normalbvarm.html#mw_4a1ab118-9ef3-4380-8c5a-12b848254117
http://apps.eui.eu/Personal/Canova/Articles/ch10.pdf (page 5)
Bayesian estimation of Vector Autoregressive Model using
Minnesota prior on the coefficient matrix. This version is
useful for regularization when they are too many coefficients
to be estimated.
Implementation inspired by the following two articles/papers:
https://www.mathworks.com/help/econ/normalbvarm.html#mw_4a1ab118-9ef3-4380-8c5a-12b848254117
http://apps.eui.eu/Personal/Canova/Articles/ch10.pdf (page 5)
"""

import logging
Expand Down Expand Up @@ -208,11 +208,12 @@ def fit(self) -> None:
] # shape: [m * (m * p + r + 1)] x 1

assert (
num_mu,
num_mu,
) == z_sum_term.shape, (
f"Expected {(num_mu, num_mu)}, got {z_sum_term.shape}"
)
(
num_mu,
num_mu,
)
== z_sum_term.shape
), f"Expected {(num_mu, num_mu)}, got {z_sum_term.shape}"
assert (
num_mu,
) == y_sum_term.shape, f"Expected {(num_mu,)}, got {y_sum_term.shape}"
Expand Down Expand Up @@ -258,11 +259,12 @@ def _construct_Zt(self, X: npt.NDArray, Y: npt.NDArray, t: int) -> npt.NDArray:
Z_t = block_diag(*([z] * self.m))

assert (
self.m,
self.num_mu_coefficients,
) == Z_t.shape, (
f"Expected {(self.m, self.num_mu_coefficients)}, got {Z_t.shape}"
)
(
self.m,
self.num_mu_coefficients,
)
== Z_t.shape
), f"Expected {(self.m, self.num_mu_coefficients)}, got {Z_t.shape}"

return Z_t # shape: m x [m * (m * p + m + 1)]

Expand Down
5 changes: 3 additions & 2 deletions kats/models/ensemble/median_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,9 @@ def predict(self, steps: int, **kwargs) -> pd.DataFrame:
"""

logging.debug(
"Call predict() with parameters. "
"steps:{steps}, kwargs:{kwargs}".format(steps=steps, kwargs=kwargs)
"Call predict() with parameters. " "steps:{steps}, kwargs:{kwargs}".format(
steps=steps, kwargs=kwargs
)
)
# Keep freq in the parameters passed to _predict_all()
self.freq = freq = kwargs.get("freq", "D")
Expand Down
1 change: 1 addition & 0 deletions kats/models/ensemble/weighted_avg_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
back testing results, i.e., model with better performance should have higher
weight.
"""

import logging
import sys
from multiprocessing import cpu_count, Pool
Expand Down
1 change: 0 additions & 1 deletion kats/models/globalmodel/backtester.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ def __init__(
earliest_timestamp: Union[str, pd.Timestamp, None] = None,
max_core: Optional[int] = None,
) -> None:

if not isinstance(gmparam, GMParam):
msg = f"gmparam should be GMParam object but receives {type(gmparam)}."
logging.error(msg)
Expand Down
1 change: 0 additions & 1 deletion kats/models/globalmodel/data_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,6 @@ def __init__(
] = None,
mode: str = "train",
) -> None:

if not isinstance(params, GMParam):
msg = f"params should be a GMParam object but receives {type(params)}."
logging.error(msg)
Expand Down
2 changes: 0 additions & 2 deletions kats/models/globalmodel/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def __init__(
multi: bool = False,
max_core: Optional[int] = None,
) -> None:

if not isinstance(gmparam, GMParam):
msg = f"gmparam should be GMParam object but receives {type(gmparam)}."
logging.error(msg)
Expand Down Expand Up @@ -210,7 +209,6 @@ def train(
split_data = split(self.splits, self.overlap, train_TSs, valid_TSs)
# multi processing
if self.multi:

t0 = time.time()
rds = np.random.randint(1, int(10000 * self.model_num), self.model_num)
model_params = [
Expand Down
6 changes: 0 additions & 6 deletions kats/models/globalmodel/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ class GMModel:

# pyre-fixme[3]: Return type must be annotated.
def __init__(self, params: GMParam):

if not isinstance(params, GMParam):
msg = f"params should be a GMParam object but receives {type(params)}."
logging.error(msg)
Expand Down Expand Up @@ -321,7 +320,6 @@ def _process(
x_t = xi_t / anchor_level

if period > 1:

input_season = torch.cat(seasonality[idx - input_window : idx], dim=1)
x_t = x_t / input_season

Expand Down Expand Up @@ -544,7 +542,6 @@ def _format_fcst(
actual = np.column_stack(fcst_store["actual"])[:, :steps]

for i, idx in enumerate(ids):

df = pd.DataFrame(
fcst[i].transpose()[:steps,],
columns=cols,
Expand Down Expand Up @@ -607,7 +604,6 @@ def predict(
self._set_nn_status("test")
fcst_collects = {}
for i in range(m):

self._reset_nn_states()

ids = dl.get_batch(batch_size)
Expand Down Expand Up @@ -1083,7 +1079,6 @@ def _single_pass_s2s(
prev_idx = 0

while cur_step < total_step_num:

cur_idx = batch.indices[cur_step]
is_valid = cur_idx >= first_valid_idx

Expand Down Expand Up @@ -1125,7 +1120,6 @@ def _single_pass_s2s(
self._valid_tensor(tmp_encode, "encoder_output")

if training_mode or is_valid:

# pyre-fixme[29]: `Union[Module, Tensor]` is not a function.
encoder.prepare_decoder(decoder)
encoder_step = (
Expand Down
1 change: 0 additions & 1 deletion kats/models/globalmodel/serialize.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ def global_model_to_json(gme: Union[GMModel, GMEnsemble]) -> str:


def load_global_model_from_json(json_str: str) -> Union[GMModel, GMEnsemble]:

param_dict = json.loads(json_str)

# string for GMEnsemble
Expand Down
Loading

0 comments on commit 61f0651

Please sign in to comment.