diff --git a/chainladder/core/base.py b/chainladder/core/base.py index 3660c4c1..8a05fcbc 100644 --- a/chainladder/core/base.py +++ b/chainladder/core/base.py @@ -176,7 +176,7 @@ def __init__( self.key_labels = index self.is_cumulative = cumulative self.valuation_date = data_agg["development"].max() - if not AUTO_SPARSE or array_backend == 'cupy': + if not AUTO_SPARSE or array_backend == "cupy": self.set_backend(array_backend, inplace=True) else: self = self._auto_sparse() diff --git a/chainladder/core/slice.py b/chainladder/core/slice.py index ebbc3598..3abf2eb8 100644 --- a/chainladder/core/slice.py +++ b/chainladder/core/slice.py @@ -53,11 +53,31 @@ def _contig_slice(arr): min_arr, max_arr = max_arr - 1, min_arr - 1 if min_arr else min_arr return slice(min_arr, max_arr, step) + def __setitem__(self, key, values): + if isinstance(values, TriangleSlicer): + if values.array_backend == "sparse": + values = values.set_backend("numpy").values + else: + values = values.values + key = tuple( + [slice(item, item + 1) if type(item) is int else item for item in key] + ) + if self.obj.array_backend == "sparse": + self.obj.set_backend("numpy", inplace=True) + print(self.obj.values) + self.obj.values.__setitem__(normalize_index(key, self.obj.shape), values) + self.obj.set_backend("sparse", inplace=True) + else: + self.obj.values.__setitem__(normalize_index(key, self.obj.shape), values) + class Location(_LocBase): """ class to generate .loc[] functionality """ def __getitem__(self, key): + return self.get_idx(*self.format_key(key)) + + def format_key(self, key): key = (key,) if type(key) is not tuple else key key_mask = tuple([i if i is Ellipsis else 0 for i in key]) if len(key_mask) < len(self.obj.shape) and Ellipsis not in key_mask: @@ -93,7 +113,11 @@ def normalize(key, idx): return out key = [key[0]] + [normalize(key, 1), normalize(key, 2), normalize(key, 3)] - return self.get_idx(key, filter_idx) + return key, filter_idx + + def __setitem__(self, key, values): + key = self.format_key(key)[0] + super().__setitem__(key, values) class Ilocation(_LocBase): @@ -102,6 +126,10 @@ class Ilocation(_LocBase): def __getitem__(self, key): return self.get_idx(normalize_index(key, self.obj.shape)) + def __setitem__(self, key, values): + key = normalize_index(key, self.obj.shape) + super().__setitem__(key, values) + class TriangleSlicer: def __getitem__(self, key): diff --git a/chainladder/core/tests/test_triangle.py b/chainladder/core/tests/test_triangle.py index a94b4b1c..8254fe75 100644 --- a/chainladder/core/tests/test_triangle.py +++ b/chainladder/core/tests/test_triangle.py @@ -638,11 +638,14 @@ def test_init_vector(): assert np.all(a.valuation == b.valuation) assert a.valuation_date == b.valuation_date + def test_loc_ellipsis(): assert tri == tri_gt - assert tri.loc['Aegis Grp'] == tri.loc['Adriatic Ins Co':'Aegis Grp'].loc['Aegis Grp'] - assert tri.loc['Aegis Grp', ... , :] == tri.loc['Aegis Grp'] + assert ( + tri.loc["Aegis Grp"] == tri.loc["Adriatic Ins Co":"Aegis Grp"].loc["Aegis Grp"] + ) + assert tri.loc["Aegis Grp", ..., :] == tri.loc["Aegis Grp"] assert tri.loc[..., 24:] == tri.loc[..., :, 24:] assert tri.loc[:, ..., 24:] == tri.loc[..., :, 24:] - assert tri.loc[:, 'CumPaidLoss'] == tri.loc[:, 'CumPaidLoss', ...] - assert tri.loc[..., 'CumPaidLoss', :, :] == tri.loc[:, 'CumPaidLoss', :, :] + assert tri.loc[:, "CumPaidLoss"] == tri.loc[:, "CumPaidLoss", ...] + assert tri.loc[..., "CumPaidLoss", :, :] == tri.loc[:, "CumPaidLoss", :, :] diff --git a/chainladder/development/base.py b/chainladder/development/base.py index db85b4cd..73436992 100644 --- a/chainladder/development/base.py +++ b/chainladder/development/base.py @@ -22,9 +22,11 @@ class Development(DevelopmentBase): n_periods : integer, optional (default=-1) number of origin periods to be used in the ldf average calculation. For all origin periods, set n_periods=-1 - average : string, optional (default='volume') + average : string or float, optional (default='volume') type of averaging to use for ldf average calculation. Options include - 'volume', 'simple', and 'regression' + 'volume', 'simple', and 'regression'. If numeric values are supplied, + then (2-average) in the style of Zehnwirth & Barnett is used + for the exponent of the regression weights. sigma_interpolation : string optional (default='log-linear') Options include 'log-linear' and 'mack' drop : tuple or list of tuples @@ -260,7 +262,7 @@ def fit(self, X, y=None, sample_weight=None): weight_dict = {"regression": 0, "volume": 1, "simple": 2} x, y = tri_array[..., :-1], tri_array[..., 1:] val = xp.nan_to_num( - xp.array([weight_dict.get(item.lower(), 1) for item in self.average_])[ + xp.array([weight_dict.get(item, item) for item in self.average_])[ None, None, None ] * (y * 0 + 1) diff --git a/chainladder/methods/mack.py b/chainladder/methods/mack.py index b5e295bd..0882b7df 100644 --- a/chainladder/methods/mack.py +++ b/chainladder/methods/mack.py @@ -103,7 +103,7 @@ def _get_full_std_err_(self, X=None): lxp = X.ldf_.get_array_module() full = getattr(X, "_full_triangle_", self.full_triangle_) avg = {"regression": 0, "volume": 1, "simple": 2} - avg = [avg[item] for item in X.average_] + avg = [avg.get(item, item) for item in X.average_] val = xp.broadcast_to(xp.array(avg + [avg[-1]]), X.shape) weight = xp.sqrt(full.values[..., : len(X.ddims)] ** (2 - val)) obj.values = X.sigma_.values / num_to_nan(weight) @@ -165,8 +165,8 @@ def total_mack_std_err_(self): def _get_total_mack_std_err_(self, obj): obj = obj.total_process_risk_ ** 2 + obj.total_parameter_risk_ ** 2 - if obj.array_backend == 'sparse': - out = obj.set_backend('numpy').sqrt().values[..., 0, -1] + if obj.array_backend == "sparse": + out = obj.set_backend("numpy").sqrt().values[..., 0, -1] else: out = obj.sqrt().values[..., 0, -1] return pd.DataFrame(out, index=obj.index, columns=obj.columns) diff --git a/chainladder/tails/base.py b/chainladder/tails/base.py index 172b55f2..7fcbb5ef 100644 --- a/chainladder/tails/base.py +++ b/chainladder/tails/base.py @@ -57,7 +57,7 @@ def fit(self, X, y=None, sample_weight=None): self.sigma_.ddims = self.std_err_.ddims = np.concatenate( (obj.ldf_.ddims, np.array(["{}-9999".format(int(obj.ddims[-1]))])) ) - if hasattr(obj, 'average_'): + if hasattr(obj, "average_"): self.average_ = obj.average_ else: self.average_ = None diff --git a/chainladder/tails/bondy.py b/chainladder/tails/bondy.py index 8c176557..be05ac5c 100644 --- a/chainladder/tails/bondy.py +++ b/chainladder/tails/bondy.py @@ -74,7 +74,7 @@ def fit(self, X, y=None, sample_weight=None): backend = X.array_backend if X.array_backend == "cupy": X = X.set_backend("numpy", deep=True) - else : + else: X = X.set_backend("numpy") xp = X.get_array_module() super().fit(X, y, sample_weight)