From fac794e4907411398a4d05005fde05247762b61b Mon Sep 17 00:00:00 2001 From: huskydoge Date: Sun, 29 Oct 2023 15:06:21 +0800 Subject: [PATCH] reproduced the middle plot of fig2 in the paper, using a logistic regression model training on MINIST 10-class. For convenience, I have include all the files used in a sam e directory, so that you can directly run `leave_one_retraining.py' to see the result. --- fig2_linear_approx/LICENSE | 1 + fig2_linear_approx/__init__.py | 13 + .../influence_functions_toolkits/hvp_grad.py | 260 ++++++++ .../influence_functions.py | 554 ++++++++++++++++++ .../influence_functions_toolkits/utils.py | 274 +++++++++ fig2_linear_approx/leave_one_retraining.py | 166 ++++++ fig2_linear_approx/model.py | 40 ++ fig2_linear_approx/result.png | Bin 0 -> 29665 bytes fig2_linear_approx/utils.py | 68 +++ 9 files changed, 1376 insertions(+) create mode 100644 fig2_linear_approx/LICENSE create mode 100644 fig2_linear_approx/__init__.py create mode 100644 fig2_linear_approx/influence_functions_toolkits/hvp_grad.py create mode 100644 fig2_linear_approx/influence_functions_toolkits/influence_functions.py create mode 100644 fig2_linear_approx/influence_functions_toolkits/utils.py create mode 100644 fig2_linear_approx/leave_one_retraining.py create mode 100644 fig2_linear_approx/model.py create mode 100644 fig2_linear_approx/result.png create mode 100644 fig2_linear_approx/utils.py diff --git a/fig2_linear_approx/LICENSE b/fig2_linear_approx/LICENSE new file mode 100644 index 0000000..657aa68 --- /dev/null +++ b/fig2_linear_approx/LICENSE @@ -0,0 +1 @@ +GPL-2 or later \ No newline at end of file diff --git a/fig2_linear_approx/__init__.py b/fig2_linear_approx/__init__.py new file mode 100644 index 0000000..9cbfbdc --- /dev/null +++ b/fig2_linear_approx/__init__.py @@ -0,0 +1,13 @@ +# __init__.py + +from .influence_functions_toolkits.influence_functions import ( + calc_img_wise, + calc_all_grad_then_test, + calc_influence_single, + s_test_sample, +) +from .influence_functions_toolkits.utils import ( + init_logging, + display_progress, + get_default_config +) diff --git a/fig2_linear_approx/influence_functions_toolkits/hvp_grad.py b/fig2_linear_approx/influence_functions_toolkits/hvp_grad.py new file mode 100644 index 0000000..bd52698 --- /dev/null +++ b/fig2_linear_approx/influence_functions_toolkits/hvp_grad.py @@ -0,0 +1,260 @@ +#! /usr/bin/env python3 +import torch +import torch.nn.functional as F +from torch.nn.utils import parameters_to_vector +from torch.autograd import grad +from torch.autograd.functional import vhp +from torch.utils.data import DataLoader +from tqdm import tqdm + +from fig2_linear_approx.influence_functions_toolkits.utils import ( + conjugate_gradient, + load_weights, + make_functional, + tensor_to_tuple, +) + + +def s_test_cg(x_test, y_test, model, train_loader, damp, gpu=-1, verbose=True, loss_func="cross_entropy"): + + if gpu > 0: + x_test, y_test = x_test.cuda(), y_test.cuda() + + v_flat = parameters_to_vector(grad_z(x_test, y_test, model, gpu, loss_func=loss_func)) + + def hvp_fn(x): + + x_tensor = torch.tensor(x, requires_grad=False) + if gpu > 0: + x_tensor = x_tensor.cuda() + + params, names = make_functional(model) + # Make params regular Tensors instead of nn.Parameter + params = tuple(p.detach().requires_grad_() for p in params) + flat_params = parameters_to_vector(params) + + hvp = torch.zeros_like(flat_params) + + for x_train, y_train in train_loader: + + if gpu > 0: + x_train, y_train = x_train.cuda(), y_train.cuda() + + def f(flat_params_): + split_params = tensor_to_tuple(flat_params_, params) + load_weights(model, names, split_params) + out = model(x_train) + loss = calc_loss(out, y_train) + return loss + + batch_hvp = vhp(f, flat_params, x_tensor, strict=True)[1] + + hvp += batch_hvp / float(len(train_loader)) + + with torch.no_grad(): + load_weights(model, names, params, as_params=True) + damped_hvp = hvp + damp * v_flat + + return damped_hvp.cpu().numpy() + + def print_function_value(_, f_linear, f_quadratic): + print( + f"Conjugate function value: {f_linear + f_quadratic}, lin: {f_linear}, quad: {f_quadratic}" + ) + + debug_callback = print_function_value if verbose else None + + result = conjugate_gradient( + hvp_fn, + v_flat.cpu().numpy(), + debug_callback=debug_callback, + avextol=1e-8, + maxiter=100, + ) + + result = torch.tensor(result) + if gpu > 0: + result = result.cuda() + + return result + + +def s_test(x_test, y_test, model, i, samples_loader, gpu=-1, damp=0.01, scale=25.0, loss_func="cross_entropy"): + """s_test can be precomputed for each test point of interest, and then + multiplied with grad_z to get the desired value for each training point. + Here, stochastic estimation is used to calculate s_test. s_test is the + Inverse Hessian Vector Product. + + Arguments: + x_test: torch tensor, test data points, such as test images + y_test: torch tensor, contains all test data labels + model: torch NN, model used to evaluate the dataset + i: the sample number + samples_loader: torch DataLoader, can load the training dataset + gpu: int, GPU id to use if >=0 and -1 means use CPU + damp: float, dampening factor + scale: float, scaling factor + + Returns: + h_estimate: list of torch tensors, s_test""" + + v = grad_z(x_test, y_test, model, gpu, loss_func=loss_func) + h_estimate = v + + params, names = make_functional(model) + # Make params regular Tensors instead of nn.Parameter + params = tuple(p.detach().requires_grad_() for p in params) + + # TODO: Dynamically set the recursion depth so that iterations stop once h_estimate stabilises + progress_bar = tqdm(samples_loader, desc=f"IHVP sample {i}") + for i, (x_train, y_train) in enumerate(progress_bar): + + if gpu > 0: + x_train, y_train = x_train.cuda(), y_train.cuda() + + def f(*new_params): + load_weights(model, names, new_params) + out = model(x_train) + loss = calc_loss(out, y_train, loss_func=loss_func) + return loss + + hv = vhp(f, params, tuple(h_estimate), strict=True)[1] + + # Recursively calculate h_estimate + with torch.no_grad(): + h_estimate = [ + _v + (1 - damp) * _h_e - _hv / scale + for _v, _h_e, _hv in zip(v, h_estimate, hv) + ] + + if i % 100 == 0: + norm = sum([h_.norm() for h_ in h_estimate]) + progress_bar.set_postfix({"est_norm": norm.item()}) + + with torch.no_grad(): + load_weights(model, names, params, as_params=True) + + return h_estimate + + +def calc_loss(logits, labels, loss_func="cross_entropy"): + """Calculates the loss + + Arguments: + logits: torch tensor, input with size (minibatch, nr_of_classes) + labels: torch tensor, target expected by loss of size (0 to nr_of_classes-1) + loss_func: str, specify loss function name + + Returns: + loss: scalar, the loss""" + + if loss_func == "cross_entropy": + if logits.shape[-1] == 1: + loss = F.binary_cross_entropy_with_logits(logits, labels.type(torch.float)) + else: + criterion = torch.nn.CrossEntropyLoss() + loss = criterion(logits, labels) + elif loss_func == "mean": + loss = torch.mean(logits) + else: + raise ValueError("{} is not a valid value for loss_func".format(loss_func)) + + return loss + + +def grad_z(x, y, model, gpu=-1, loss_func="cross_entropy"): + """Calculates the gradient z. One grad_z should be computed for each + training sample. + + Arguments: + x: torch tensor, training data points + e.g. an image sample (batch_size, 3, 256, 256) + y: torch tensor, training data labels + model: torch NN, model used to evaluate the dataset + gpu: int, device id to use for GPU, -1 for CPU + + Returns: + grad_z: list of torch tensor, containing the gradients + from model parameters to loss""" + model.eval() + + # initialize + if gpu > 0: + print(gpu) + x, y = x.cuda(), y.cuda() + + prediction = model(x) + + loss = calc_loss(prediction, y, loss_func=loss_func) + + # Compute sum of gradients from model parameters to loss + return grad(loss, model.parameters()) + + +def s_test_sample( + model, + x_test, + y_test, + train_loader, + gpu=-1, + damp=0.01, + scale=25, + recursion_depth=5000, + r=1, + loss_func="cross_entropy", +): + """Calculates s_test for a single test image taking into account the whole + training dataset. s_test = invHessian * nabla(Loss(test_img, model params)) + + Arguments: + model: pytorch model, for which s_test should be calculated + x_test: test image + y_test: test image label + train_loader: pytorch dataloader, which can load the train data + gpu: int, device id to use for GPU, -1 for CPU (default) + damp: float, influence function damping factor | + scale: float, influence calculation scaling factor (to keep hessian <= I) | in the paper code use 25 + recursion_depth: int, number of recursions to perform during s_test + calculation, increases accuracy. r*recursion_depth should equal the + training dataset size. + r: int, number of iterations of which to take the avg. + of the h_estimate calculation; r*recursion_depth should equal the + training dataset size. + loss_func: cross_entropy + + Returns: + s_test_vec: torch tensor, contains s_test for a single test image + """ + + """ + initialize inverse_hvp as a list of tensors with zeros, which should be first s_test as described in the paper + H_0^(-1)v = v + """ + inverse_hvp = [ + torch.zeros_like(params, dtype=torch.float) for params in model.parameters() + ] + + for i in range(r): # repeat r times to get average + + hessian_loader = DataLoader( + train_loader.dataset, + sampler=torch.utils.data.RandomSampler( + train_loader.dataset, True, num_samples=recursion_depth # as mentioned in paper, use "enought" samples + ), + batch_size=1, + # num_workers=4, + ) + + cur_estimate = s_test( + x_test, y_test, model, i, hessian_loader, gpu=gpu, damp=damp, scale=scale, loss_func=loss_func, + ) + + with torch.no_grad(): + inverse_hvp = [ + old + (cur / scale) for old, cur in zip(inverse_hvp, cur_estimate) # update inverse_hvp by adding new cur_estimate + ] + + with torch.no_grad(): + inverse_hvp = [component / r for component in inverse_hvp] + + return inverse_hvp diff --git a/fig2_linear_approx/influence_functions_toolkits/influence_functions.py b/fig2_linear_approx/influence_functions_toolkits/influence_functions.py new file mode 100644 index 0000000..f47b7da --- /dev/null +++ b/fig2_linear_approx/influence_functions_toolkits/influence_functions.py @@ -0,0 +1,554 @@ +#! /usr/bin/env python3 + +import torch +import time +import datetime +import numpy as np +import copy +import logging +from tqdm import tqdm + +from pathlib import Path + +from fig2_linear_approx.influence_functions_toolkits.hvp_grad import ( + grad_z, + s_test_sample, +) +from fig2_linear_approx.influence_functions_toolkits.utils import ( + save_json, + display_progress, +) + + +def calc_s_test( + model, + test_loader, + train_loader, + save=False, + gpu=-1, + damp=0.01, + scale=25, + recursion_depth=5000, + r=1, + start=0, +): + """Calculates s_test for the whole test dataset taking into account all + training data images. + + Arguments: + model: pytorch model, for which s_test should be calculated + test_loader: pytorch dataloader, which can load the test data + train_loader: pytorch dataloader, which can load the train data + save: Path, path where to save the s_test files if desired. Omitting + this argument will skip saving + gpu: int, device id to use for GPU, -1 for CPU (default) + damp: float, influence function damping factor + scale: float, influence calculation scaling factor + recursion_depth: int, number of recursions to perform during s_test + calculation, increases accuracy. r*recursion_depth should equal the + training dataset size. + r: int, number of iterations of which to take the avg. + of the h_estimate calculation; r*recursion_depth should equal the + training dataset size. + start: int, index of the first test index to use. default is 0 + + Returns: + s_tests: list of torch vectors, contain all s_test for the whole + dataset. Can be huge. + save: Path, path to the folder where the s_test files were saved to or + False if they were not saved.""" + if save and not isinstance(save, Path): + save = Path(save) + if not save: + logging.info("ATTENTION: not saving s_test files.") + + s_tests = [] + for i in range(start, len(test_loader.dataset)): + z_test, t_test = test_loader.dataset[i] + z_test = test_loader.collate_fn([z_test]) + t_test = test_loader.collate_fn([t_test]) + + s_test_vec = s_test_sample( + model, z_test, t_test, train_loader, gpu, damp, scale, recursion_depth, r + ) + + if save: + s_test_vec = [s.cpu() for s in s_test_vec] + torch.save( + s_test_vec, save.joinpath(f"{i}_recdep{recursion_depth}_r{r}.s_test") + ) + else: + s_tests.append(s_test_vec) + display_progress( + "Calc. z_test (s_test): ", i - start, len(test_loader.dataset) - start + ) + + return s_tests, save + + +def calc_grad_z(model, train_loader, save_pth=False, gpu=-1, start=0): + """Calculates grad_z and can save the output to files. One grad_z should + be computed for each training data sample. + + Arguments: + model: pytorch model, for which s_test should be calculated + train_loader: pytorch dataloader, which can load the train data + save_pth: Path, path where to save the grad_z files if desired. + Omitting this argument will skip saving + gpu: int, device id to use for GPU, -1 for CPU (default) + start: int, index of the first test index to use. default is 0 + + Returns: + grad_zs: list of torch tensors, contains the grad_z tensors + save_pth: Path, path where grad_z files were saved to or + False if they were not saved.""" + if save_pth and isinstance(save_pth, str): + save_pth = Path(save_pth) + if not save_pth: + logging.info("ATTENTION: Not saving grad_z files!") + + grad_zs = [] + for i in range(start, len(train_loader.dataset)): + z, t = train_loader.dataset[i] + z = train_loader.collate_fn([z]) + t = train_loader.collate_fn([t]) + grad_z_vec = grad_z(z, t, model, gpu=gpu) + if save_pth: + grad_z_vec = [g.cpu() for g in grad_z_vec] + torch.save(grad_z_vec, save_pth.joinpath(f"{i}.grad_z")) + else: + grad_zs.append(grad_z_vec) + display_progress("Calc. grad_z: ", i - start, len(train_loader.dataset) - start) + + return grad_zs, save_pth + + +def load_s_test( + s_test_dir=Path("./s_test/"), s_test_id=0, r_sample_size=10, train_dataset_size=-1 +): + """Loads all s_test data required to calculate the influence function + and returns a list of it. + + Arguments: + s_test_dir: Path, folder containing files storing the s_test values + s_test_id: int, number of the test data sample s_test was calculated + for + r_sample_size: int, number of s_tests precalculated + per test dataset point + train_dataset_size: int, number of total samples in dataset; + -1 indicates to use all available grad_z files + + Returns: + e_s_test: list of torch vectors, contains all e_s_tests for the whole + dataset. + s_test: list of torch vectors, contain all s_test for the whole + dataset. Can be huge.""" + if isinstance(s_test_dir, str): + s_test_dir = Path(s_test_dir) + + s_test = [] + logging.info(f"Loading s_test from: {s_test_dir} ...") + num_s_test_files = len(s_test_dir.glob("*.s_test")) + if num_s_test_files != r_sample_size: + logging.warning( + "Load Influence Data: number of s_test sample files" + " mismatches the available samples" + ) + ######################## + # TODO: should prob. not hardcode the file name, use natsort+glob + ######################## + for i in range(num_s_test_files): + s_test.append(torch.load(s_test_dir / str(s_test_id) + f"_{i}.s_test")) + display_progress("s_test files loaded: ", i, r_sample_size) + + ######################### + # TODO: figure out/change why here element 0 is chosen by default + ######################### + e_s_test = s_test[0] + # Calculate the sum + for i in range(len(s_test)): + e_s_test = [i + j for i, j in zip(e_s_test, s_test[0])] + + # Calculate the average + ######################### + # TODO: figure out over what to calculate the average + # should either be r_sample_size OR e_s_test + ######################### + e_s_test = [i / len(s_test) for i in e_s_test] + + return e_s_test, s_test + + +def load_grad_z(grad_z_dir=Path("./grad_z/"), train_dataset_size=-1): + """Loads all grad_z data required to calculate the influence function and + returns it. + + Arguments: + grad_z_dir: Path, folder containing files storing the grad_z values + train_dataset_size: int, number of total samples in dataset; + -1 indicates to use all available grad_z files + + Returns: + grad_z_vecs: list of torch tensors, contains the grad_z tensors""" + if isinstance(grad_z_dir, str): + grad_z_dir = Path(grad_z_dir) + + grad_z_vecs = [] + logging.info(f"Loading grad_z from: {grad_z_dir} ...") + available_grad_z_files = len(grad_z_dir.glob("*.grad_z")) + if available_grad_z_files != train_dataset_size: + logging.warn( + "Load Influence Data: number of grad_z files mismatches" " the dataset size" + ) + if -1 == train_dataset_size: + train_dataset_size = available_grad_z_files + for i in range(train_dataset_size): + grad_z_vecs.append(torch.load(grad_z_dir / str(i) + ".grad_z")) + display_progress("grad_z files loaded: ", i, train_dataset_size) + + return grad_z_vecs + + +def calc_influence_function(train_dataset_size, grad_z_vecs=None, e_s_test=None): + """Calculates the influence function + + Arguments: + train_dataset_size: int, total train dataset size + grad_z_vecs: list of torch tensor, containing the gradients + from model parameters to loss + e_s_test: list of torch tensor, contains s_test vectors + + Returns: + influence: list of float, influences of all training data samples + for one test sample + harmful: list of float, influences sorted by harmfulness + helpful: list of float, influences sorted by helpfulness""" + if not grad_z_vecs and not e_s_test: + grad_z_vecs = load_grad_z() + e_s_test, _ = load_s_test(train_dataset_size=train_dataset_size) + + if len(grad_z_vecs) != train_dataset_size: + logging.warn( + "Training data size and the number of grad_z files are" " inconsistent." + ) + train_dataset_size = len(grad_z_vecs) + + influences = [] + for i in range(train_dataset_size): + tmp_influence = ( + -sum( + [ + ################################### + # TODO: verify if computation really needs to be done + # on the CPU or if GPU would work, too + ################################### + torch.sum(k * j).data.cpu().numpy() + for k, j in zip(grad_z_vecs[i], e_s_test) + ################################### + # Originally with [i] because each grad_z contained + # a list of tensors as long as e_s_test list + # There is one grad_z per training data sample + ################################### + ] + ) + / train_dataset_size + ) + influences.append(tmp_influence) + # display_progress("Calc. influence function: ", i, train_dataset_size) + + harmful = np.argsort(influences) + helpful = harmful[::-1] + + return influences, harmful.tolist(), helpful.tolist() + + +def calc_influence_single( + model, + train_loader, + test_loader, + test_id_num, + recursion_depth, + r, + gpu=0, + damp=0.01, + scale=25, + s_test_vec=None, + time_logging=False, + loss_func="cross_entropy", +): + """Calculates the influences of all training data points on a single + test dataset image. + + Arugments: + model: pytorch model + train_loader: DataLoader, loads the training dataset + test_loader: DataLoader, loads the test dataset + test_id_num: int, id of the test sample for which to calculate the + influence function + recursion_depth: int, number of recursions to perform during s_test + calculation, increases accuracy. r*recursion_depth should equal the + training dataset size. | in the paper use 5000 + r: int, number of repeatation of which to take the avg. | in the paper use 10 + of the h_estimate calculation; r*recursion_depth should be less or equal to the + training dataset size. + gpu: int, identifies the gpu id, 0 for cpu + s_test_vec: list of torch tensor, contains s_test vectors. If left + empty it will also be calculated + + Returns: + influence: list of float, influences of all training data samples + for one test sample + harmful: list of float, influences sorted by harmfulness + helpful: list of float, influences sorted by helpfulness + test_id_num: int, the number of the test dataset point + the influence was calculated for""" + # Calculate s_test vectors if not provided + if s_test_vec is None: + z_test, t_test = test_loader.dataset[test_id_num] # image, label + z_test = test_loader.collate_fn([z_test]) # collate_fn is a function that takes a list of samples from dataset and collate them into a batch, return a batched sample + t_test = test_loader.collate_fn([t_test]) + s_test_vec = s_test_sample( + model, + z_test, + t_test, + train_loader, + gpu, + recursion_depth=recursion_depth, + r=r, + damp=damp, + scale=scale, + loss_func=loss_func, + ) + + # Calculate the influence function + train_dataset_size = len(train_loader.dataset) + influences = [] + for i in tqdm(range(train_dataset_size)): + z, t = train_loader.dataset[i] + z = train_loader.collate_fn([z]) + t = train_loader.collate_fn([t]) + + if time_logging: + time_a = datetime.datetime.now() + + grad_z_vec = grad_z(z, t, model, gpu=gpu) + + if time_logging: + time_b = datetime.datetime.now() + time_delta = time_b - time_a + logging.info( + f"Time for grad_z iter:" f" {time_delta.total_seconds() * 1000}" + ) + with torch.no_grad(): + tmp_influence = ( + -sum( + [ + torch.sum(k * j).data + for k, j in zip(grad_z_vec, s_test_vec) + ] + ) + / train_dataset_size + ) + + influences.append(tmp_influence) + + harmful = np.argsort(influences) + helpful = harmful[::-1] + + return influences, harmful.tolist(), helpful.tolist(), test_id_num + + +def get_dataset_sample_ids_per_class(class_id, num_samples, test_loader, start_index=0): + """Gets the first num_samples from class class_id starting from + start_index. Returns a list with the indicies which can be passed to + test_loader.dataset[X] to retreive the actual data. + + Arguments: + class_id: int, name or id of the class label + num_samples: int, number of samples per class to process + test_loader: DataLoader, can load the test dataset. + start_index: int, means after which x occourance to add an index + to the list of indicies. E.g. if =3, then it would add the + 4th occourance of an item with the label class_nr to the list. + + Returns: + sample_list: list of int, contains indicies of the relevant samples""" + sample_list = [] + img_count = 0 + for i in range(len(test_loader.dataset)): + _, t = test_loader.dataset[i] + if class_id == t: + img_count += 1 + if (img_count > start_index) and (img_count <= start_index + num_samples): + sample_list.append(i) + elif img_count > start_index + num_samples: + break + + return sample_list + + +def get_dataset_sample_ids(num_samples, test_loader, num_classes=None, start_index=0): + """Gets the first num_sample indices of all classes starting from + start_index per class. Returns a list and a dict containing the indicies. + + Arguments: + num_samples: int, number of samples of each class to return + test_loader: DataLoader, can load the test dataset + num_classes: int, number of classes contained in the dataset + start_index: int, means after which x occourance to add an index + to the list of indicies. E.g. if =3, then it would add the + 4th occourance of an item with the label class_nr to the list. + + Returns: + sample_dict: dict, containing dict[class] = list_of_indices + sample_list: list, containing a continious list of indices""" + sample_dict = {} + sample_list = [] + if not num_classes: + num_classes = len(np.unique(test_loader.dataset.targets)) + for i in range(num_classes): + sample_dict[str(i)] = get_dataset_sample_ids_per_class( + i, num_samples, test_loader, start_index + ) + # Append the new list on the same level as the old list + # Avoids having a list of lists + sample_list[len(sample_list) : len(sample_list)] = sample_dict[str(i)] + return sample_dict, sample_list + + +def calc_img_wise(config, model, train_loader, test_loader, loss_func="cross_entropy"): + """Calculates the influence function one test point at a time. Calcualtes + the `s_test` and `grad_z` values on the fly and discards them afterwards. + + Arguments: + config: dict, contains the configuration from cli params""" + influences_meta = copy.deepcopy(config) + test_sample_num = config["test_sample_num"] + test_start_index = config["test_start_index"] + outdir = Path(config["outdir"]) + + # If calculating the influence for a subset of the whole dataset, + # calculate it evenly for the same number of samples from all classes. + # `test_start_index` is `False` when it hasn't been set by the user. It can + # also be set to `0`. + if test_sample_num and test_start_index is not False: + test_dataset_iter_len = test_sample_num * config["num_classes"] + _, sample_list = get_dataset_sample_ids( + test_sample_num, test_loader, config["num_classes"], test_start_index + ) + else: + test_dataset_iter_len = len(test_loader.dataset) + + # Set up logging and save the metadata conf file + logging.info(f"Running on: {test_sample_num} images per class.") + logging.info(f"Starting at img number: {test_start_index} per class.") + influences_meta["test_sample_index_list"] = sample_list + influences_meta_fn = ( + f"influences_results_meta_{test_start_index}-" f"{test_sample_num}.json" + ) + influences_meta_path = outdir.joinpath(influences_meta_fn) + save_json(influences_meta, influences_meta_path) + + influences = {} + # Main loop for calculating the influence function one test sample per + # iteration. + for j in range(test_dataset_iter_len): + # If we calculate evenly per class, choose the test img indicies + # from the sample_list instead + if test_sample_num and test_start_index: + if j >= len(sample_list): + logging.warning( + "ERROR: the test sample id is out of index of the" + " defined test set. Jumping to next test sample." + ) + i = sample_list[j] + else: + i = j + + start_time = time.time() + influence, harmful, helpful, _ = calc_influence_single( + model, + train_loader, + test_loader, + test_id_num=i, + gpu=config["gpu"], + recursion_depth=config["recursion_depth"], + r=config["r_averaging"], + loss_func=loss_func, + ) + end_time = time.time() + + ########### + # Different from `influence` above + ########### + influences[str(i)] = {} + _, label = test_loader.dataset[i] + influences[str(i)]["label"] = label + influences[str(i)]["num_in_dataset"] = j + influences[str(i)]["time_calc_influence_s"] = end_time - start_time + infl = [x.cpu().numpy().tolist() for x in influence] + influences[str(i)]["influence"] = infl + influences[str(i)]["harmful"] = harmful[:500] + influences[str(i)]["helpful"] = helpful[:500] + + tmp_influences_path = outdir.joinpath( + f"influence_results_tmp_" + f"{test_start_index}_" + f"{test_sample_num}" + f"_last-i_{i}.json" + ) + save_json(influences, tmp_influences_path) + display_progress("Test samples processed: ", j, test_dataset_iter_len) + + logging.info(f"The results for this run are:") + logging.info("Influences: ") + logging.info(influence[:3]) + logging.info("Most harmful img IDs: ") + logging.info(harmful[:3]) + logging.info("Most helpful img IDs: ") + logging.info(helpful[:3]) + + influences_path = outdir.joinpath( + f"influence_results_{test_start_index}_" f"{test_sample_num}.json" + ) + save_json(influences, influences_path) + + +def calc_all_grad_then_test(config, model, train_loader, test_loader): + """Calculates the influence function by first calculating + all grad_z, all s_test and then loading them to calc the influence""" + + outdir = Path(config["outdir"]) + s_test_outdir = outdir.joinpath("s_test/") + if not s_test_outdir.exists(): + s_test_outdir.mkdir() + grad_z_outdir = outdir.joinpath("grad_z/") + if not grad_z_outdir.exists(): + grad_z_outdir.mkdir() + + influence_results = {} + + calc_s_test( + model, + test_loader, + train_loader, + s_test_outdir, + config["gpu"], + config["damp"], + config["scale"], + config["recursion_depth"], + config["r_averaging"], + config["test_start_index"], + ) + calc_grad_z( + model, train_loader, grad_z_outdir, config["gpu"], config["test_start_index"] + ) + + train_dataset_len = len(train_loader.dataset) + influences, harmful, helpful = calc_influence_function(train_dataset_len) + + influence_results["influences"] = influences + influence_results["harmful"] = harmful + influence_results["helpful"] = helpful + influences_path = outdir.joinpath("influence_results.json") + save_json(influence_results, influences_path) diff --git a/fig2_linear_approx/influence_functions_toolkits/utils.py b/fig2_linear_approx/influence_functions_toolkits/utils.py new file mode 100644 index 0000000..bc8c1eb --- /dev/null +++ b/fig2_linear_approx/influence_functions_toolkits/utils.py @@ -0,0 +1,274 @@ +import sys +import json +import logging +from pathlib import Path +from datetime import datetime as dt +from typing import Sequence + +import numpy as np +import torch +from scipy.optimize import fmin_ncg + + +def save_json( + json_obj, + json_path, + append_if_exists=False, + overwrite_if_exists=False, + unique_fn_if_exists=True, +): + """Saves a json file + + Arguments: + json_obj: json, json object + json_path: Path, path including the file name where the json object + should be saved to + append_if_exists: bool, append to the existing json file with the same + name if it exists (keep the json structure intact) + overwrite_if_exists: bool, xor with append, overwrites any existing + target file + unique_fn_if_exsists: bool, appends the current date and time to the + file name if the target file exists already. + """ + if isinstance(json_path, str): + json_path = Path(json_path) + + if overwrite_if_exists: + append_if_exists = False + unique_fn_if_exists = False + + if unique_fn_if_exists: + overwrite_if_exists = False + append_if_exists = False + if json_path.exists(): + time = dt.now().strftime("%Y-%m-%d-%H-%M-%S") + json_path = ( + json_path.parents[0] / f"{str(json_path.stem)}_{time}" + f"{str(json_path.suffix)}" + ) + + if overwrite_if_exists: + append_if_exists = False + with open(json_path, "w+") as fout: + json.dump(json_obj, fout, indent=2) + return + + if append_if_exists: + if json_path.exists(): + with open(json_path, "r") as fin: + read_file = json.load(fin) + read_file.update(json_obj) + with open(json_path, "w+") as fout: + json.dump(read_file, fout, indent=2) + return + + with open(json_path, "w+") as fout: + json.dump(json_obj, fout, indent=2) + + +def display_progress(text, current_step, last_step, enabled=True, fix_zero_start=True): + """Draws a progress indicator on the screen with the text preceeding the + progress + + Arguments: + test: str, text displayed to describe the task being executed + current_step: int, current step of the iteration + last_step: int, last possible step of the iteration + enabled: bool, if false this function will not execute. This is + for running silently without stdout output. + fix_zero_start: bool, if true adds 1 to each current step so that the + display starts at 1 instead of 0, which it would for most loops + otherwise. + """ + if not enabled: + return + + # Fix display for most loops which start with 0, otherwise looks weird + if fix_zero_start: + current_step = current_step + 1 + + term_line_len = 80 + final_chars = [":", ";", " ", ".", ","] + if text[-1:] not in final_chars: + text = text + " " + if len(text) < term_line_len: + bar_len = term_line_len - ( + len(text) + len(str(current_step)) + len(str(last_step)) + len(" / ") + ) + else: + bar_len = 30 + filled_len = int(round(bar_len * current_step / float(last_step))) + bar = "=" * filled_len + "." * (bar_len - filled_len) + + bar = f"{text}[{bar:s}] {current_step:d} / {last_step:d}" + if current_step < last_step - 1: + # Erase to end of line and print + sys.stdout.write("\033[K" + bar + "\r") + else: + sys.stdout.write(bar + "\n") + + sys.stdout.flush() + + +def init_logging(filename=None): + """Initialises log/stdout output + + Arguments: + filename: str, a filename can be set to output the log information to + a file instead of stdout""" + log_lvl = logging.INFO + log_format = "%(asctime)s: %(message)s" + if filename: + logging.basicConfig( + handlers=[logging.FileHandler(filename), logging.StreamHandler(sys.stdout)], + level=log_lvl, + format=log_format, + ) + else: + logging.basicConfig(stream=sys.stdout, level=log_lvl, format=log_format) + + +def get_default_config(): + """Returns a default config file""" + config = { + "outdir": "outdir", + "seed": 42, + "gpu": 0, + "dataset": "CIFAR10", + "num_classes": 10, + "test_sample_num": 1, + "test_start_index": 0, + "recursion_depth": 1, + "r_averaging": 1, + "scale": None, + "damp": None, + "calc_method": "img_wise", + "log_filename": None, + } + + return config + + +def conjugate_gradient(ax_fn, b, debug_callback=None, avextol=None, maxiter=None): + """Computes the solution to Ax - b = 0 by minimizing the conjugate objective + f(x) = x^T A x / 2 - b^T x. This does not require evaluating the matrix A + explicitly, only the matrix vector product Ax. + + From https://github.com/kohpangwei/group-influence-release/blob/master/influence/conjugate.py. + + Args: + ax_fn: A function that return Ax given x. + b: The vector b. + debug_callback: An optional debugging function that reports the current optimization function. Takes two + parameters: the current solution and a helper function that evaluates the quadratic and linear parts of the + conjugate objective separately. (Default value = None) + avextol: (Default value = None) + maxiter: (Default value = None) + + Returns: + The conjugate optimization solution. + + """ + + cg_callback = None + if debug_callback: + cg_callback = lambda x: debug_callback( + x, -np.dot(b, x), 0.5 * np.dot(x, ax_fn(x)) + ) + + result = fmin_ncg( + f=lambda x: 0.5 * np.dot(x, ax_fn(x)) - np.dot(b, x), + x0=np.zeros_like(b), + fprime=lambda x: ax_fn(x) - b, + fhess_p=lambda x, p: ax_fn(p), + callback=cg_callback, + avextol=avextol, + maxiter=maxiter, + ) + + return result + + +def del_attr(obj, names): + if len(names) == 1: + delattr(obj, names[0]) + else: + del_attr(getattr(obj, names[0]), names[1:]) + + +def set_attr(obj, names, val): + if len(names) == 1: + setattr(obj, names[0], val) + else: + set_attr(getattr(obj, names[0]), names[1:], val) + + +def make_functional(model): + orig_params = tuple(model.parameters()) + # Remove all the parameters in the model + names = [] + + for name, p in list(model.named_parameters()): + del_attr(model, name.split(".")) + names.append(name) + + return orig_params, names + + +def load_weights(model, names, params, as_params=False): + for name, p in zip(names, params): + if not as_params: + set_attr(model, name.split("."), p) + else: + set_attr(model, name.split("."), torch.nn.Parameter(p)) + + +def tensor_to_tuple(vec, parameters): + r"""Convert one vector to the parameters + + Adapted from + https://pytorch.org/docs/master/generated/torch.nn.utils.vector_to_parameters.html#torch.nn.utils.vector_to_parameters + + Arguments: + vec (Tensor): a single vector represents the parameters of a model. + parameters (Iterable[Tensor]): an iterator of Tensors that are the + parameters of a model. + """ + if not isinstance(vec, torch.Tensor): + raise TypeError('expected torch.Tensor, but got: {}' + .format(torch.typename(vec))) + + # Pointer for slicing the vector for each parameter + pointer = 0 + + split_tensors = [] + for param in parameters: + + # The length of the parameter + num_param = param.numel() + # Slice the vector, reshape it, and replace the old data of the parameter + split_tensors.append(vec[pointer:pointer + num_param].view_as(param)) + + # Increment the pointer + pointer += num_param + + return tuple(split_tensors) + + +def parameters_to_vector(parameters): + r"""Convert parameters to one vector + + Arguments: + parameters (Iterable[Tensor]): an iterator of Tensors that are the + parameters of a model. + + Returns: + The parameters represented by a single vector + """ + # Flag for the device where the parameter is located + + vec = [] + for param in parameters: + vec.append(param.view(-1)) + + return torch.cat(vec) diff --git a/fig2_linear_approx/leave_one_retraining.py b/fig2_linear_approx/leave_one_retraining.py new file mode 100644 index 0000000..e5fd25f --- /dev/null +++ b/fig2_linear_approx/leave_one_retraining.py @@ -0,0 +1,166 @@ +""" +reproduce the fig2 middle plot in the paper, remove one training sample and retrain the logistic regression model on MINIST 10 classes +2023-10-29 +""" + +import torch +from sklearn import linear_model +import numpy as np +from tqdm import tqdm +import pickle +from utils import get_mnist_data, visualize_result +from model import LogisticRegression as LR + +from influence_functions_toolkits.influence_functions import ( + calc_influence_single, +) + +# HYPARAMS +EPOCH = 10 +BATCH_SIZE = 100 +CLASS_A, CLASS_B = 1, 7 +TEST_INDEX = 5 +WEIGHT_DECAY = 0.01 # same as original paper +OUTPUT_DIR = '../results' +SAMPLE_NUM = 100 +RECURSION_DEPTH = 1000 +R = 10 +SEED = 17 + +# set seed +np.random.seed(SEED) +torch.manual_seed(SEED) +torch.cuda.manual_seed(SEED) +torch.cuda.manual_seed_all(SEED) + + +class DataSet: + def __init__(self, data, targets): + self.data = data + self.targets = targets + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + out_data = self.data[idx] + out_label = self.targets[idx] + + return out_data, out_label + + +def get_accuracy(model, test_loader): + """ + test whether the weight transferred from sklearn model to pytorch model is correct + """ + correct = 0 + total = 0 + + with torch.no_grad(): + for data in tqdm(test_loader): + images, labels = data + + outputs = model(images) + _, predicted = torch.max(outputs.data, 1) + + total += labels.size(0) + correct += (predicted == labels).sum().item() + print('Accuracy of the model on the test images: %d %%' % (100 * correct / total)) + return correct / total + + +def leave_one_out(): + (x_train, y_train), (x_test, y_test) = get_mnist_data() + # print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) + train_sample_num = len(x_train) + print("len(x_train):", len(x_train)) + + train_data = DataSet(x_train, y_train) + test_data = DataSet(x_test, y_test) + train_loader = torch.utils.data.DataLoader(train_data, batch_size=1, shuffle=False) + test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False) + + # prepare sklearn model to train w as used in original paper code + C = 1.0 / (train_sample_num * WEIGHT_DECAY) + sklearn_model = linear_model.LogisticRegression(C=C, solver='lbfgs', tol=1e-8, fit_intercept=False, + multi_class='multinomial', warm_start=True) + + # prepare pytorch model to compute influence function + torch_model = LR(weight_decay=WEIGHT_DECAY, is_multi=True) + + # train + sklearn_model.fit(x_train, y_train.ravel()) + print('LBFGS training took %s iter.' % sklearn_model.n_iter_) + + # assign W into pytorch model + w_opt = sklearn_model.coef_ + with torch.no_grad(): + torch_model.w = torch.nn.Parameter( + torch.tensor(w_opt, dtype=torch.float) # torch.Size([10, 784]) + ) + get_accuracy(torch_model, test_loader) + + # calculate original loss + x_test_input = torch.FloatTensor(x_test[TEST_INDEX: TEST_INDEX + 1]) + y_test_input = torch.LongTensor(y_test[TEST_INDEX: TEST_INDEX + 1]) + + test_data = DataSet(x_test[TEST_INDEX: TEST_INDEX + 1], y_test[TEST_INDEX: TEST_INDEX + 1]) + test_loader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=True) + + + test_loss_ori = torch_model.loss(torch_model(x_test_input), y_test_input, train=False).detach().cpu().numpy() + + print('Original loss :{}'.format(test_loss_ori)) + + loss_diff_approx, _, _, _, = calc_influence_single(torch_model, train_loader, test_loader, test_id_num=0, + recursion_depth=RECURSION_DEPTH, r=R, damp=0, scale=25) + loss_diff_approx = - torch.FloatTensor(loss_diff_approx).cpu().numpy() + + # get high and low loss diff indice, checking stability + sorted_indice = np.argsort(loss_diff_approx) + sample_indice = np.concatenate([sorted_indice[-int(SAMPLE_NUM / 2):], sorted_indice[:int(SAMPLE_NUM / 2)]]) + + # calculate true loss diff + loss_diff_true = np.zeros(SAMPLE_NUM) + for i, index in zip(range(SAMPLE_NUM), sample_indice): + print('[{}/{}]'.format(i + 1, SAMPLE_NUM)) + + # get minus one dataset + x_train_minus_one = np.delete(x_train, index, axis=0) + y_train_minus_one = np.delete(y_train, index, axis=0) + + # retrain + C = 1.0 / ((train_sample_num - 1) * WEIGHT_DECAY) + sklearn_model_minus_one = linear_model.LogisticRegression(C=C, fit_intercept=False, tol=1e-8, solver='lbfgs') + sklearn_model_minus_one.fit(x_train_minus_one, y_train_minus_one.ravel()) + print('LBFGS training took {} iter.'.format(sklearn_model_minus_one.n_iter_)) + + # assign w on tensorflow model + w_retrain = sklearn_model_minus_one.coef_ + with torch.no_grad(): + torch_model.w = torch.nn.Parameter( + torch.tensor(w_retrain, dtype=torch.float) + ) + + # get retrain loss + test_loss_retrain = torch_model.loss(torch_model(x_test_input), y_test_input, + train=False).detach().cpu().numpy() + + # get true loss diff + loss_diff_true[i] = test_loss_retrain - test_loss_ori + + print('Original loss :{}'.format(test_loss_ori)) + print('Retrain loss :{}'.format(test_loss_retrain)) + print('True loss diff :{}'.format(loss_diff_true[i])) + print('Estimated loss diff :{}'.format(loss_diff_approx[index])) + + pickle.dump(loss_diff_true, open('loss_diff_true.pkl', 'wb')) + pickle.dump(loss_diff_approx[sample_indice], open('loss_diff_approx.pkl', 'wb')) + r2_score = visualize_result(loss_diff_true, loss_diff_approx[sample_indice], OUTPUT_DIR) + + +if __name__ == "__main__": + leave_one_out() + loss_diff_true = pickle.load(open('loss_diff_true.pkl', 'rb')) + loss_diff_approx = pickle.load(open('loss_diff_approx.pkl', 'rb')) + visualize_result(loss_diff_true, loss_diff_approx, OUTPUT_DIR) diff --git a/fig2_linear_approx/model.py b/fig2_linear_approx/model.py new file mode 100644 index 0000000..62b3785 --- /dev/null +++ b/fig2_linear_approx/model.py @@ -0,0 +1,40 @@ +import torch +import numpy as np + +def log_clip(x): + return torch.log(torch.clamp(x, 1e-10, None)) + + +class LogisticRegression(torch.nn.Module): + def __init__(self, weight_decay, is_multi=False): + super(LogisticRegression, self).__init__() + self.is_multi = is_multi + # self.wd = torch.FloatTensor([weight_decay]).cuda() + if self.is_multi: + self.w = torch.nn.Parameter(torch.zeros([10, 784], requires_grad=True)) + else: + self.w = torch.nn.Parameter(torch.zeros([784], requires_grad=True)) + + def forward(self, x): + if self.is_multi: + logits = torch.matmul(x, self.w.T) + else: + logits = torch.matmul(x, torch.reshape(self.w, [-1, 1])) + return logits + + def loss(self, logits, y, train=True): + if self.is_multi: + criterion = torch.nn.CrossEntropyLoss() + # set dtype to float + y = y.type(torch.FloatTensor) + loss = criterion(logits, y.long()) + else: + preds = torch.sigmoid(logits) + + if train: + loss = -torch.mean( + y * log_clip(preds) + (1 - y) * log_clip(1 - preds)) # + torch.norm(self.w, 2) * self.wd + else: + loss = -torch.mean(y * log_clip(preds) + (1 - y) * log_clip(1 - preds)) + + return loss diff --git a/fig2_linear_approx/result.png b/fig2_linear_approx/result.png new file mode 100644 index 0000000000000000000000000000000000000000..4e4cbecc1f959d17d12222fa4f94c654b31c6a43 GIT binary patch literal 29665 zcmd43c{rBs+ctV3L&z*t=8}X;B+8I(l1io|R7e9QROYb^A(YCaSs5~ADx!>$xe_Hs z=7dz{km=h`&+mKJ`+nQn)>?nAXM1{v`@XN~yw3AD_I*G0;|w*>-?fZ&1uI2S%XaJ1 zj3|oUpQ7kGS(xyNQp4A8_?OBBZL*W>5T+c}=y zEUzrDAS-t2!Ud=Esv9;q{MQfU9nYQIuT(-uY7el=EdLl{_KK$i#3B^tB8f?eFK?FwXPd>>ryt9f_t_Vn-4`!k*= z-U)?rY59lXukc`#4n{6+ZtkeFnIKfPFcl!8VWWPPto&)E@4|P``&Gx zj<~Y2fV#T+#?6~|+S(=**NH25;?bCX3}%+#n2R)t2Df!gYsJOst}K_k6cE6mK0kHL zC*~Mmg3-DY@9u^E{CQ^YZ3XR)UM?Xu44kAk_gKIA zSI-??*%JSg#`_4gnli1p|%xy1ScJ|grPvHDQ zTb;!A?%nG&HEjL5qGF(6l|+06rE5zYUAcruP>>~*b}6%DeuWmxlS9v#-riN-RT0K- zee77kayfIW!B6RoB0ay`3IsKLxBvK*&UX6rX)|;4xU+39r^Y|*SDF{~J$qLE-2=5N zb{`(U>gW)~)dOF=P)RjOx#aIpJ+XYjbLh|^`@D$IP{ZwPLcF{I1#Uw&ACnE8+}tkj zl?u3f_ikJ|7dG8VvMP6?19v-i1Ygs>i`jmVy^GI<=&)c!7 zA5H7u#A=8xTe&4LI-39I&!5A8S{-|ayUJJOxt%{BK$Cb?8L=|z!Gp+w4@u>%tt1rt{o5qCQ!=s~@5)%_^$Ko~|Eq*=O;dk{PIg7G) z?>Lq(U+(1MQgKaur_;ob1JOQ1Utbt!FZj&-Nl}|`N;_z6ZB2(^dvU%m^+~3Au1rgw zozF;_M7<8RQBf0-Ud^t8=M{oK+bBYdpp`Wl%T zQFZTpN=)WphHQ6t_svggUVru)p0~1kSY121HSIk>m0iew7q-7tKYumfAQi)_osA= zxT7V$VO)YMSEl7S;1Ku8Hn)m*_qSDOEX>BUa10C#*pvq^E32v5O>yzA)i+T&+fJQw zaFDyB>?p6bb0`0xq@*PO!d%Z)4OXh`%NLc7vGI{A(JRwqeOE$4SSnPiYioJ@<|bnL z8xyZkV)yRd`^aQ8?U9q4%dc>IFXpsSq5Fsyi#W~F@?L*yUS?eTQQv9p<6a&V`H8Em zt4(vpVTtwY_uwZqTA)yQKQ?mca zMJfK-yHx!3wxL2vDr70I)TzVIxU>R;gE1DhwR+r-zl}0!9z59W%}~+#Du9t{h~ITp zOIRZlajhZCdMBOcnJ-VZkNdQbCI7bk_U&5;0un0~ccjQuX0Vrz%YSODUgN+g4ZnHI zj~Rz8C@#by<3q7|c}nW)M}E0|&OUZIuFbJz?(tJ~3=lWhU~^{X)A=7wXR2MKq;%ldp8#e~ows+Cnlf5noL@^l)V{Jjti$dIUiuWm07WUE z{c4bsZ5XdJg-}IDJv&)ni72ztbNm!lS|#ci?$+UR@RxHhRtL#az4g(0L)lO?ew7pxR)n8~gam0{@E}R*sIGyu7?r zX1T@Lvl%U7QhojXw}zaL7JD!4ba6c4`&j>es^b3pp`lBW2apswDEB`=0IAS%%B-o4v>YFJ!E-8;G8$NPM1;Wul;8(6HAdz*1eji}%3BNo47 zRgo1FgUwO5Z-=hk|3KRQ_Eu+`i@AK0{65nTnQx2PR0UdouPt%CtL$jHBp=bo+cC6c zcl#CDh5KBKn9MNyK$__?N>RQ3pV%i)WPi352vEa#12&(tti$GNdcS=WQ&D;G*`+pi z&TDMB>yd4)KPl?M@2^62|H$pc6CWu1Sbmw4ZM&&&-sR1kH;+t>9HZ`d{TOXkA9{Jo zQ>`h=kylhSGy5$@g%?en^!i)KAJjwZ@?h!+^EB`s{ndrh-aXn*vhT)j(M&Gj z-I|;k?Rz^v-50AhK~IRLZwLAhot?In17{%bn-t|Y4}=_xtCy+QmHy7sqq~lNa?wXb9Z1|<%zd< z)}_}~SC^JvWEcS$z&sVf%n6K*6+Keue(@p$bE=Q)=FGyr5l8~5Sz!GVN($nW)Y51ZESb<{{4uB**=XmMkkS0`KucEjLtYXD1D-* z)*sAxqpVq5QBh(2;qgA1y7TAH^E*~2TjXW5(211}zV`x{i0&om0n3=V(`Cc)M7?O% zC%yX^nAj=mo`#=D$-;a=?O#OC*~yXxMha_``mU zSCy4aehYKT>I<`D-d8vH-PqlfdZ6pqufzR+LzAa~ylXEa=z9A-Q1>xUt;);FDn()u zkJ5F*A|(-WSL7boUnBCfwH3HkrCa1#+1SLf z*abBu?H8>$HH3^Msxdcy!^Ud=g$s&*#|HxNP8j?ftpqzKD|>>X!BU3k9%Cmemej1= za>k%#%Mk5-Lqh}O1Lf({zfZim^~iND?O-NfrW1qMYM#)nXU~iaSu?yQe|KNmQ2gFq zWVDzy!)5SOSYG_+FJIQ(4wiCHKz8 zJ0W_UUQ}rEYwzJ#%=zd0IEtsb*$O8mwxZT|;v`IQPYbq($*JvjE!R^~Zz6*11-V;p+ z2`I+@267FLjg^P-uOCKIwpS|q^!4j1MCuA0jrkbyFYt>4vfb)FEOw&)!Tq5>fIj)| zBNC7I-DMIG5CEWwJJ(waJhzq*1Wt|FtW#?{7n+g{8IWem;xe&g}&7B2gG{ z!+Co2xc6wy+OR*sMwnM(M#jd5CB8lpV{>vA&)IYBo3zfIQwU}kitB$Hz0G5EH4hJu zLA=hgabPC1j*B5-`Vt=x(}jlArU1o)KXie*Y(<3Weo=ZITqoql+Wp;WnZ;$zNBs`> z|7!dCm22h7Dmn2dbql4#W6pch{x+yoZ=cc!h*psRiT?%gXhlSAdmvm+u0LCaTZu_PoVmDkiP zHCi_l5Wb8-jK5}XX0q$&&nH6)AtA}Px_f#IpPks}6SG4V&!7!R!O%Go6foui<{ykX zyL!zUI`_GkuaHsKW%Suv3R25TK?#N@nveD%@g)O_KYAV!7FPED(axxQ_rmp}WvmhS zLD?P{-Q9P0i}=>9g5u)h<|TKHeOvPobdf7)Ok(QlA{&kr`sd~Woyjejdd%m+B#S!a_A6vZ+;no}E<>AS)Z<3jsnTesjpwUd2 zDypg^H*MmkI)D6N_44vkJ4pLmNtp#7KXIZP(W4R}GI~B}IX{08rbGOpXD3d5Oy>S) z!_f@anb)srmM_jR?%1(|@>loyE2gTtV$-HgVO!#LBFefeu87~L(VHmX;*Rp&I2)x} z$Y>etGu<1*!peH7prF9!kiI?#)3OzTn3@mu@2k4c&#KJM&2?c`OYOTO?I0>D%DHy! z+FN&u=au%X=E={`7n7IgID7W2S+)(Mx3~BELXR=}(=~72&`z9q+fBSaX(xprLzrNTU zDXPJN@vUZy$hE-=V#N`zgjD7X7RhT}8 zd1nvjzj)DA7Q`mF$z}lGqnRr_z6H8fmQvV^3AHDqKKl`w|a!@DY85i!ixN^m|oe^C!(XDJtrUh?nvuZDN6 z30gd!+Bj>t+lB~aGClZd!lI%UZO!gyvSRV5T5?u7vLa~5xo`q0a)LO8;I6BBD~Y^j zC{#AQ>t!jqmWd9gV8-ITMAKo0>{&d@vl8Ev*h5R$g%3IZ{SXKJEnEDM86WOn{7@CY z#Ht{_+==TiUp&(QxoiG^zsJiuzI5@T>yAyuKV8)ElKC(Ihh8sUUk?xPq?;99{Ui{FO`&Q}{O zo{FCi$15xzuVphOxwC5Vsw}va$l|@)g?3%7Wm_C!ISi}E;scscxW41!^+z%6z1-vh zqL^^dk;RjykPnG4#@(A6?5yRYh9kFs z6i_?e5DdQNJ2N24B4d4UQ=!p%SK!+kE6#_@U4MTK{qw7}2fCn(USsX*IPISDzQpU2ag&vz%^DK4!)e_{;`n;<#aZgru_AAH&6K3nU z4X4ZTETe){?zLODLRKi*OUrHZo!z=hShyj@crE3B|GqFpt$^fYX`h?AG!uDp0S;UM zLS;9gMOgj*2itep+ODRjr@z?(xV;p@!b|z9A<34QW-@B*{^rBO!=?CTRCF{W^|`=p z110Ub@C_V}W!J7<_nUXoOxp2YqUCW;c2V_J;CQE6pUWY}2O_GLx3sJnDxS%R+vn=K z1wz7-4I4J7nd;GOr!ee#qJ4B9%R;y%oo1(;f41bZK~@p~-p)Glb_J-Zc7xRF)k{)R zQciUg7m2K2BcX7H-H-NCoLh^jHp?Pd~VkUJjuOsC7Yk0AJOCX^Z(E{ zH(vpw5_RWJ*ni-K471?;g)7a@J(U)vK}d&TAWSKN8Wi44MZr+9g-zE`Z(U4(Ql$vIzg?0E535V;NLqj@pMFi%kwSpFeO za#xm9mlk;-;IuU_hoEkVD=6@QJZOV$Z?ylItTlA(|0Q*sd_ji5a!K&Q8vsjD*}i?d z)Au(*iMkDrI&sL0*YpU`fjZ4(nd(+tbd>T2D<%a@N~jfG7>^8^;uydkyE%m!?j$;n5GUN$~8PSSsc%YZIY)a|{&;G+_%sxN6* zi^yAszHpgSG&nJFO&}MjL=~nXzXq5RU{D$Ksjk66Hn5K~pR?%Ry?dvtt81|Ru5_TL zxA%5SOG_t=&sV5zU`jy%0m$h7+>$z0xG(!bqA391^J@T3ulcJMTJ4TiHX!Onaq^ zA2LA%V(J+Z0PUMIA~v6VCkBb0%5rGkqPv<0+K+b1VJ$8CRH!c!ReN=HOHao71cMvL z9WHRi`qCcz{?-}{`r0)vvM@+!yLy#VCvsIN1d8OgdK^bkD_Ds&5MAP*a0zb19f%RN z9QsSrgM_!aTD^a`1CdA*+O_j==MK=yE{NLTcuW+LmN+;#PC<6l($X5YbROtg5=S!; zWcNGHNl;Lpf9^)BQVKwhUt@Nd0WkM6=(!Chao8|3Rp9!o3>14>W~)S;GKpkrKj~zb zOq)pKlrn=L03+XOc-=icWf;px!xql1mT=fO8-2Jia;5SgYWXF^=ausmL4$a`su{@X zKS|sy5Tw`!@Orko{a*FEql6Rn5X9^F<8}YYe0a^pEwmY_=xGFh>iU4DxCoviGzr%5|WaJ5Lqw$__!2UnezYq*^Jac2)xTU zTo4R#vGmg?(-KBr?t!J4$ktZW$FkY!G1MtTv4*-JshSM*?$Ot;%y;g|I^8VqJ@!`l z);70{2Qq6U8bz_>%3m3lnnP9RAd1m8x8J4UvJQ8lqLcK`*Nsj?r!k zl*e{;0U3_$6EhEqh|q>gq8O|}k>Vi!lS@D#sk|028_BqOcq3*1NGpg{S6^Q~>o91l zj7jptfH0(TCUVi&2sU8G-QQ|%(j@7m!{7LEg13K9}?zz_nJ=EQ zL&5Nt%6X}8NqZ!sDF4PjESF;tSUl4VW2S~TMvd4^trv9{RzAz_ALIlzSN8BrzLW3n zy#k`AqY$}HeR#Z#lrt4if7)Bm|AS-0Dq3Y|SLSRdMqm?V6g6D7!`54HzEcaILGw$SNi+U6&bH5_tRPku2VSolErU~v*UH`jY+=PswM7L=FKnH_^y&@cw)A9nIDxk`b{mcBJt)_c8Kmaa&{fPgY~o8YftjotNdG-l8{WOqZ9r8$27=X zpO#4QSao&3XO81BfohxJhM@o>TBkZlM(Jju zbS8XIhn8@^JGdk+^4tASx$>$rCOy(taSB=zfn0y(O%mBnT5q* zfvki?dXdn4ckJEHkI_rgy?PWQzWw@X98u&!V+z6gDw!Q+OEpao!wPx`VFn^YNU}jY zyWi=ZH(1+l#B}JkD>I0#^_G&EAy5mL0?SQdmOJW|Ve{C*n-8`->>3^2vMI{C6B!G| zot0SFf8_&kj*n_%2S2Jz=Lpr*EsYIjXD0XPJL^vLsOHS0yHHRV$72{?R9rSC{SI<= zvTq!0BpNpWRuRI6dEVcWyZ-x|+W~nUzAI6xdhd3KMzil#ZaLU~BPK5? zW8?WzCD-hocni6Cp4($rg2w)6tIK zcy3(8ck-c*nI&txg|Ghc>EdiT=hd^1^z!f3o}}tllgW%*?)>@F(rQ>zqA{=R&Dh$Z z8ipgxFL|Rf^Vr5+WjAttcT0=mRp%9A!ri`p{kp$kiA0z3;1Xr8S;cjy zzR5dI@Ca>T(o}+qz(@g)2LuHLbw7KKqMyZzsm;-8`R&zGA=&`U{_S9wq= zx?BDDqR!Sh_L+aTE#2{#Jx{cx>|+0_dlC8va-y!bw|T9hyWx8HgJ(ysaQbjnLa9;m zomz!|M-@yxU)izm)6>lr_xYNZ_8ojsc-d=ry@5rYMbSCKP@!A-o7u0-D9gL1(`*H| zE@S78;s~lpv>J*I^iDeD@oesH-ub2=jTaq~JlQ{|+O(Q;H~cLAq{JQ7bquOpROBg7 zzYOmLc`DlOfMStLydt0C1RHm>C|T615ACdiAKM-JRzG&=d%rwoznOfsNRU+xK0hdo zUOV)v`3NiXz~=GM=db6h`zloDm3>x}zM-YEc%0|Z={)z-xWZF^PK!uhp&=I@N*A3j zkYC%`b0YfvpqyLv8KdkmBdw^&w&F;Jzslzg&7b$ho67KN^G1zWQ6`d5dWdns^2O8Q zJ>qfawC3k69XejqQB^iax*O^)1ZsGjJvnyO@uZAGF+ zFqjR-$NAVL{ff^q3!8?Sh%Co5d%~9vOFP{izFgtOl=WQFYcfQa--~ME(h^fvJ~-k5 zD1ZXZ4vDz#>ej(epDXk?u})nu;rSEPwqc}4d(8AmDTQO^V+4YCCXpT0N3DL?(!2mU9CO0=Xsua*~q+?+|uhd};&r-Nu z4z;Y|F;tQ8%NuGH`6rQwQYm8NE(#r@H|+reBg(JHc6Wn*@?+#Hy+^4}gNClHmJu2S zD9OmkFk5*iLDAq_JjJP-$dlgj+px8P)i88HU3tpTpZT$Cy2Vp%;}@la`Oe2UAFkMU zN0E>?U~XTBAt9`3(p{VsnFP1r zV`)im#qE-;fXWBo;>!g39Uw1rL)eP>kPtl{EZy(lzgJgJO^Vx#FL6)07ZL`2A*7bI zo!&IwG~Ilw=eV@~gY7A|n7$)}!xHns-b+BBJx*NWI1;DHKTcdBEB5>&`wHScYv{L$ zYE|*T?h-Z$D<3)m$T#1^j)oh&R6K(R5r z>nb}P0iYjBxwxanfO8a0G?J%WOAHzWj#LIpS7Z|^>jgZHb=02ZPiS(zIWB@u=z zkUEMwdGcg>(nWEJ9tak+;J`(YaLZeRm%F!ud=Q+6SQ>T_qr*xog$xnc)B%mcN#y=DK&Yr5P&Yh)n0Q8EW z22ejJ|NQ)|1UPH4TmX@TKG3kMhL#{n9u&e;YPNHaPGUW$$aepd5{-7>83zKIjvP6n zH|PjW8vcpSmfX|%PF>$ZJvp&Ma`P zrkz1FlNR8d{|8%a~8z~dh!*>ayE3Fuc+gd z4mP}N&2y}Rur1Dq5A@ZAupH>lmu#zrE; z^2H@+%akT;NLr=bV*R9dglF1pi?_Q$FdbwFzWgK%&| zYuLE-27|aZDl1o^$n_o!Q9`)qt=-8$E+lIfPR=TSItISF4kQavv(`V>bd-v_*=Ha;nf@Om$1){-4ST*5QdOYFF z%qbH7{=GrPY!J$UIv8-e!rw~XFkv=4+`&LWPGNYTxF_cN!Z{ifXh%qRDDB&1r$7{w zot<4)Zmzbyy^M_MljNauAz?ufg}F)i1K_JpCXs0qQGw%0EUF9xZ3u_73n~mUqo9iQ zX!{dTqBbpY=8g!N^?JX*3xnnVcj6m z-cwzdPknism{GST&YN65#Re(|QDaI=HTT|E<%ff94IuQ*Pw*O9;h+$m8`1a!f|$ssgU}pm85@vxW7TtvcOfY!xaU1EfiWtW`;@zlNIK=f&yw)K?Y~Z z-ECzxV}t%dgYSgx%t4fLEeqWZnfbX5eR2OGqX^82&}X7`S4zkn#t6|EF{`yObDZJ3 z^q%>Zx1v?nGM@|D@~O6$xg6^y;|j@fIprs*++i3Ah~D5n(#=>{RKy?ek0FF+ntiff zgaj>kKT9zq-ov%#6Jz;f>L*LI5-t%`PSQEn&QZU)Am9jyZ}d}c^BU6wW22*Gn7An5 zW#+AL2^HXw*@KI*UL|s>2bKG=(BnLG(azVQyj#zIXD6bE_!9VY-oJD?opZWb-_Gci zMB_5d$+$A0Q~sq$q{mUyFl<;H2bKgD1(z`?G5r1h7<^qe z3-fbWI!O;wSA#TTC@V0yL887>beQlo$g-r?4L6EK;yy64&fmWy;Q|_bq$SaVY7EUp zfxIc`j5-Kk*~h9lOp*y3X*}sZT9*HYU{HOV*)~+b}`@vCdQ1G@qoH<>it7HfH>khig18UlsH$2 zmqGPOc(OO@EfO-q_!X%tLL21f;|siblP`J#=PR#{k|dTNa780dex7Iza=paMB-dW( zH(0gZoeRE`;DUl}h<(c_B1>TzTtU8SZN29$g-yvp>%=QYk0M;{;@NAcT zym|Xhau%7*7s#M#u|&Hqd~f~-^&PwpB8sn(hWrS}X~%IcA$APD#9Zf`-ZJ8>Ro8&C z_V{f}R2-m|k#$%8;e)Y2FSpS4#oqx`l)int&rUA}7F;Wg-Sk<>z|za2zGf|n|NA#K z)W?TV*29RgaQ9YK)i~fO`_?;m?-H-TP~jNI+DlS06ox8leiLQy1fNrd-=0LH3%^?U zVcKjyx`WnBzBT+(idhD`(V_&w9;YBFE6>IAmQxirdCfPLoPD2rYV0hhZxpFq7nTozmbRxGt<(@5Wy{R*`UXNvsL|ikW z_=rVIz(Npn_8q%>x;X>MO&Drt0HQ);dR9JPB3Q-x{p~x=EW;`t^dx8BYIySw$!T2P zU69dL)Z4dlVfd}h(2bDlTL4AYSS;XMyAWyMW^8+@^!4l4EXNLYVN)HGbFRUa)|Krg z*_%z~F6^SX?%#XlI^B58&^{){wEn2|PH7Nw#E&wVn_$~7qHmp^n(Bn)s+t^9%Lul2cgSKIWAmRhw0mf%)@7F{<;9)7$o&>jL4Ep>|h z;d1yEt&S(oFHDU-&B-AcgF{9_MrGU19w#OB19o3PX1u+=E#EHuB>F-2>7O>2AInEO zsr*i<_0^|I$+Xz<&WDZ4T0?KJ5l!Rtyi>N0#CYoCrDxAJqr_ukRREkwdJxD>ezUyd zm+CtGv}fVZiMsMbI4IRDP4wG^@A?W-#lO;d{86zO- zIIlJyu{rwLC&EF#O1;AIQIyj)bq4qf(wFTsYOQmVP%!+Z{`mKcj7HNVX)lMBAMKP| z;<*j#6E{ED(sEdu!hlS>;gG9<8*r2TM`^%aWX4WFJ+mTDC8**C**4lpWd}NXdHDO^ zwkxi@y|i)j`O!A1lHs)iBC&y*&8%S$G+c_j@B5_M9f&V4ElY>&;AT7VjQQ6`y4{^?tV07pQ^5{Inb&< z@7-~W;*6*t?{#3)bnsH2MC6A1GKf@6(00OT{CF?gQ)uRflRv*^TkJM>)Tpi(nrlC& zgNNN}nq#okD&l7%%hmmtZsdjGkfu zw@dG^-T8LtF5SviA9v6aj^Z@%*=VBp_T~+H>EJ~Xo={r-0d9WTlA7ze&I8`3S}!D1?GWYu|ZLw0VD^ z=cwB?Uxrg{GWh0MKlDh)e%RmUdi%?ZMTu&)T>esSA^Lsw^*$ex=$03Cu*_6Gf9(}< zU-7J8x2r;ApvG&w?P!;kJ9#(Fede9zkzD7L^P#XZpWIp}D z43m4L7m|}S@IrO^<9bNpvQ7roP{WCEbm31*FmF_-Owq-=J4HK-POO_Qez5O;Vg00C zi@YPJIQL?GTM1>cD@y&6Ym;n`q-iI}O`mtiYDu{-`(k5}3&x?pEkR1YEwVXZy`=eB zZ?E|a`eM=dX6rsXFWI=5CsH@rR&zyDQ%LTswmMPZ_^@*ZU#S~B68(R}vTa|tdq4R7 z*3QDeOii@^tYZ-LD6_uQj>iw1pS}2E(PchstavUXq2K4t6-lT796ML#-y5O&wJ>g3*IMV4u<2szj+6_yCmO5*y)e{+`VdS$M*YRot-QzuZ=q1zj=VmjjU{r8WSBS19i zPh8K7!~|yh>9AU&oXA@}a#nJyRL(6yd&398dFmmGNTd=Ixk@Fjyf(S;PX6Y-cBR3O zA0D{XHvl=1?RJXEFU1WR2c9Ut!<3x6`?#TCbaxz_AWlvz1m*C|;_!c$x`OoU?%&_M zL)+v5$^z8e4++(=Np`Uc2t$mhMOkse+qx>ka>>8!=qY39%n-APDAL10=vkRiAF z{8c2qLeJmc-h7gzf9NGaF5Iwn%a-M6)!Dqou{|6m!GDAH^@}q7 z=_{rg<)$6X9lk9K|3^iLF}P&*@wKl0!xI0N&tTBALZ8vC?H&pc$$}t8t|#Fc##FT8 zX+c{UH(4X}^wRf%!M4vSv;fm%0tE|x-#07_GoLE1*oKs{*SodgwLSEWHXx+`58U~1 z$MNeIYll=`wnyC0O7^Q!H|G)+J=(e1@7x_v5hb<#s8$^pWv~*5NYGufFbA9F!*kLl zlT)d8_rBQ5$~#YW>Q3#9K3+Jnhi3bEE9mAP42lY+#VIOxqU5^?CBkZ3g>Vry^P_l_^*FXnT5pPhRk=YC zRwj;i`cGvWN@g#S?!BfaemF;W85!}kdj}kW@?!1icol-eV3MxHG64?WJAetq0Kt=P1iTQS6p_*f1sa3cF`opD%;2VZ`_2fB70oGvG%z6`g^%C#jCH z=QM-zFffM%enKfQ$G``d`OS_nqmu=d!T^wuI7nTmQ1J~F?t!OAaHAE4nQdif7i!;> zY7Jq9F;dHPt&9vSqLwx(M;}NH#57CYo|JF*K?*iYNk~j;p5yQBOA4KCE<<3D)UpQ}5{)ANF&4ZpaC0=tJR(fGM_{qR6UQ5Ae8)fE=bk1zDvFw)pU-z2>ag%3#liNXZFuTn7?G`~M|+> zGL}06n5^-1#1eqsN)Gs|;4X@X%%#^#Y_H%$j~{3Hy+t;IUJNxJZL2FypP13XX_|ia z=*bf&BsOn09#N5#OJ7nV&c7V2h?}1osFjIhRS39cH|*eq<>+5kbNg)x@=RV1<^SL8 zJ6ry>?-(rgr}&CO&W-L;kB^VfhDYq4nwM#sDpJd3RATriP8%$R;)X$v@Y_s|HoseRgUSq`VNg)L&Iq?Mf78u=+w&-8B(J z@qrn>jBVGevTh?VKsr!36pz-sGb{sRPgWcz(iTf#GGF2JKav08tB)q(Q-maOk#3_X zx;M)(hA35W!cT=-5oylC^lNsrGYLk8Q#we4GV>S}fh%CW$$l|1WulieX)aTr>STc8 z$xvEioCNw|1?>{PFXncSK4G7Kb{#jn@bs^WWc-cIIUN3rqkl(`YBI|^EBxHCg9H66 z>>}F*vBn|us}jF8-XyU>LBS15Pfsu2lc-SL)}Crm*FkD6v_zsjW+N5=bwkk?1Q#v` z7}@Ca_X6hKauPOh9<&yw4Jo9A+kTJcMD`dl$v3~S@(ipYBKJj2_s zElTZK#7VD^>wRyd{%-w0dsh=JghmX`#NLb(aC-BGpc8_RCp1C#KXv{XC+*{$C$DW# z$LwfaOQK~g-N%Q?FK&6<$yp5ie__30iXDn?Tzu-kl_#mt3n7b%gt(Q3i9iwe6mDf* zcqbA2W#ONdA$2yI$tNc+7NMJ2`8TzSj_m9B_ge_O0h<47_@?R&)U+H~B@sp6Ln_?Fe3he(D?%hm?F#@k@ zs+8Zq7u-Z$-vQ}FCTrggaYN$v4XNhZh|f;_JNnbl;TZh~Mrw3+@S*>My)q}mrQu7` zintq_om6FZkIafv-h7>pnlF+<9nDsEwHC{xA&KhY{|SVT*h{ZBE`y#MB`(9(?A$2& zhr0d>FF0v(;OM{C91+rtlOVB1Vkeh6P4Yeg$??w*EsSh^iPXe%>raD|s| z`nk*b^Tw$=+V)#nS#cL18ZgT_#qBxyI|9|UazJ=!il`V1s$P&TD=)v2Q;(_&Q3Ha~ z($Z}FNM9d(G`TILIUk)TA}~;*fi%oyr(s*62QN@(0Mvi@vHVf$u^Q>FAi=7&_+iH+ zM1>3H8qr7ZF<+~_KYiKJ2B0CV{uurCjp?V`Bz~bPI|>uVO7omkk6hc(d7j%|4|C+r zd9TjWi;QP+O;PAxX@?JjilkpX-17b1{VQlTRy^~WYuO4#iX#3@R6NKMMyt2sglyvFxWoEK9!SJw??4hSiNYFsOFhC0-{prSCPA{Fh=v6QD`y+vtqx^>w z7Pg1rBfeua5qCqY#C{Zp1;1bv1Demxm3w==<`zqw1gt~+6wTh=K5Ei=&7aWE=T(ka zhWZ%UxEv1kk+4h>F_>Ogy9qMlYg9SN9~wGcLz{Gi8U0aMA)VZ1=s~kRemo^-x4!;z z6lvp-3hDi^AwpAr$?MlN6p>^A{({g`NcJ#*vfTK&9lH-uTrh>M8FtX?09tx|MPP#o zdTfd(KdB7v)n@C5?sXVd}Cf-i;iBi zT}_RV%0}X-7)5J)L!34no#vCHB}a~Ln;x`bOhnBy!PQHA3BXAZ>41X6Q+q~B8||s;aKQCBaQ2xm3RI?J4DSFN zgV=y2czmOrIY|abLbqr3opn5r-1~!c8;N=k(_ziDSQMc^g27g5_nB;?pOh?_Tgk_l zz!Dzx!skr&qU{Nas(G%4_tZvoku{(mPTb*k_xDQegz_5&d;^_7t&Oz8kg`Sb%&0F!4ISeG3G1iASb!I(LqIQae{ZM`{o-dJunlSC`>16NOd7M4f6( z2yK}d?HzUXP;(!qkYjctYUKX7{kFQA^rVlV5l;fzJQlQ$m6aIcN&w30q*1jo_51Z? zVfVIjl@o6S_ukq_hptW?O!-HV`%SpeOLu>L=#iJRdk4)0Aa#gCxP$Kx)(M+Smo5=I zIEzLvG#gbsHXLqmEe#q-{P0IG=nD!Tc0F^G9aY2)0Qu zKNF>=W`u^gFv331M8s>gwY4ez_CnMoTfXG}<-nW(pLlBO(5o1s{;nRw>@?nQSOM|$ zB=&}21e$8{6diqFx=$?)6)nAA-+ZTg1;{WS^_d`@SghOi;R*PZahMHiK3WNxP~*sc z_v#fL9JQA+GdFxnI;=0vLbjU9#}@3 zM#?Udw%>4=6k!~&lpkTquq<8r3NxR$g#fjI1Pw>L?@B=_(?ZD~@RJj!^@>ppcsacJ zcq?Ra5W&NBX7=&L6tpjX>aYg$megkI7I(uah++BO|F;kUT=#h3i7x${VDbD+gCGUX%dEdO>-Y}j4&o(QrFww{l)LoJ?aib~28P}zr49~f&QOKE z>ih2_p)eTyesm9gc)-rX$Rw%D&i|-pws~TfEzx07NqXmICrRU7#y+aL^up()>Y`XfzQDO%8SJLM%~I@82+qtpa|w z7ooe~q7sdIdV?Pt8!3t?K>x@ePIYlw$LjJbQ4F?J**^>FZ{EClU*hLyS?n!D+C~{Q z(Mn8dYH3~iclA?TJUsMlY;12HsI5d6H2v}&DL+g(>B-@O<)l3rEt$|}v*Et~_+tha zwfOby<|x$+-+#q2WnBY9XR}^3ku5Psjjt z6bvtMu0`I{cj*SPa7hPpi$fq+n9FG2&^HDJFr3^6cQYRjSb&WLqLS4`+;X4N?|#bd zTf3%r!sL*)ht!S!efOAx!#QwM&B&4f1;nr+U_U;D?UBR+4Q8I9kU%ABthK`4IF&0D z=Wc#XKj|@ybonvW2QD?SXg%V_saQDCi@Gzs5cFta9=CrnTP!-PT3V-37h7@W-(5VM z;D^$7v~sRtzEbVwJI2b!R*n>HfE}sQy+(ny;H+P|XHT8N#19SPPq#HeBi(A^Q6d7P zp5CRjw9~~7y^fq{oHg9GF3S=^oYjdF!HDWzumr6+{8ay*y3Y!5JesTP^Ttsa?TKoO zdAlQsm7l!p#0ic0DajunQ{oj)ks1|g{3dpH=qob6+-h&G!&S=AwSh>LYtwWX{oX{q zfU<#|S4_RG%)&Rb^+Nx(Pcz;_MU$kkjUly0yb9}uDl`Rof{sFCj8V&XOkLu=uPvUR zpK%-?Z06KrAya!WG$)jF$=WIk&g+VTN=4!x$%Yw`k&!D}4`f&hA=7W+5LOL@2xAS4 zlHMRD@A-(ldD3*^Jw3{kULgIu9TfpMZcr>j1k}a`OV`j1jZ$e3nk2HYQ3G)npzAt^ zv<8r>Et~++K14Kud5oU;G|=bOnV#3KWGqx2-|pNOJ&zh)}MuB@{n!awUf8ecHhdWP2RDyMz;> zUl(XDC@EOTIB^2(kf}kPs8oM6=9N)Z$4eJ3&Ciw}!xfG`R;m9f1(jFehnvC1^I} zMQMN+v4#(6LdK!~{Jj?=yN#p03v7 z8)dvqb62x(`0w?E8UZ2~ng}(z+|v_@rAs)64x$iPa>0Hq2xX%EK-glc@$BAux!%_J8RTrUpIPDI{SI*?t*@{Zd54GC=o+ zCx|z^GzaurtUJNmTR}YFwT-iefJJ6d!6;(5pfnL` zAx1j={;Klr`**Sv#MH064l5wzy6ePhcH#sRv4>nn7af)f_wwbz@MmCuGj?>i85d`I zNw#@Fe*v?)h!+odDcvr{1DIrSQCx^ohUPJ#BV^|=2r6Q5G4s`7V~*E; z^JAiPn#ktELs{Y2O(TmE16h?rvgfPA{M+Wb*@iUpemzy%$3{knv$!fi?ndmf~_{F+|39t@q6NmJ%J z1Tm|r__#(sDu|0XCF%Xqg4gKjhb9{0-oOF`P~RQBTYDU_1$$Tpq1O<*5j|>riSx5E zGc#k8qqwK~HxS8KsBXW7nb_Kw=(S{_;;^iOQnLZPa7s?j=F~ap=-8_bVr_EIwv{>E#C$SOkK?W+UWP=0o&|ki9-&JfcGb3~2>({rUOP^W|vT z!=+fniO)dM{v!+abs>$$IUeYWB3q~c+^}!ivW0j=G1oxSnGJ&xM`V=fvXt?`&qSzN{fiBA!mFECK6goW91 z+Ypv13bg7&>Kvd5+0BFqnLvMJZv~2M_5jc9WpMo2*;&3{&{Ez5ijk0wrc2eA7X~iD zQUrEM_SSKS)s5eJH7qd*Xyw2RU7el&AeGpqivjGW9H_$?LM(A9+7GVSe1d(;na}>% zVG*;5>@Y%4+1lDZMA{>C0Y>-+fHtUlWd9+_U6Hg?r=($OJ(PUj)cn7ZDLCrM)>Zt! zSg%J#(NScFn?om$A7{j3Lq{BJHp>ur8Gk!Mz<|>%FeXL-9lES%LWR2ZM4_^tif;tdDWRkAWN{N)NM9MA16zA!Ot@m@S0W*it{?~6xLoK4)Nko&P5n}5vIF?aI?KQ9VjY|ceV zA@wkppW{I;tPs+x3id`ecxrqJ&JSgu7n=z3f)x<951LSo&8N`Gi!jN7^*RembWM+~ zm^%+$qAcPli8O01vlOr8WDp~sEg3niP13-MkmCH~+|$`TR2HJoCKgndiLkIq#e^$2qz0-1q;wuHW+get+Lf3`eT5A}aq|k3?xB z{2M)_ahP<*^MBD=ZHme5+nlG;NI{Xi-y_i1H?{DP36J#_n8?66_x83bA2j<`QgvW( zS;aPxlV!9eD$Aimhx)*h_}w|S&n2(qm8Z(EHcV?{v$#XXot{OuP>rdsA8GO;eAK$O zI|t+s8bNkVM&GR&$uyHbamf9qv>7-B;>*>!YggM>eiMGh639`}gAVKyl!Fu6rA3FW z92}w*6Ec(?+s6A>z&*u;T4~bp+ZnSqD<+n;|LC$lFC#s@C8w^}VM`B+on49vtz_E5 z4_IIJRaWy@W@hA2&*)%nreP&kteBokrB9(Y4;`r~DPCLYAMx>h=(|`#EpJHO8;E3G zyHg!oMl*7+^S&P!*Hosuy!BtZ+-egbKWeNrnWC7w)VI7~kB!>nq-Lq%L(z>`7)^ou;Pi9X{ zbrA6(G31B%_}xc})M`85EvFLT1O2vdw;(2neE-HCU4oqQF>GPkIHF|}CAFyFz05Fd z8yJ9Kz)&m~kiO&=6tsjFv}2_zgs^n1U*|QI%Ey|Q3CA`d;En+!0)zy=?7J71)Dr;q z6ux$Fo^ne2YhMf?;&!vyRAEyOdufR1in?qUzperMdxa_>J^Vumbr^&4AI+{kc!s`R zj11r7l_WDO;`0RV=Y4vawurc;J1@*Ozj}G*Z!R7kXxid@kxnw8v=c(a-@_)na$C7u z?4StGtru)&|Iy*SICH4G%>RSohzkXsJN&hlmexZ$WI)qCiq+E_zjm)YZA3~NHF~rT z$9MN}k8i%&;9a=T(eZ~H8Lej8J@EKp`cL!cmPTfV?K@_Y7_1Ck@S>}-n(BP!y>Nmp zSmyls27pki^b^bgn<9kXdnFUsrHTI*tb9bW*KHoI#Q1 zz&Y}mTP^uG+B3W6SqIXZASMJHRVirPdf1aBHUs9BtBY^U9V1Fy=Thnf@=DxSe*Xgic`% zQ|cs$xSNL!)l6dJe6fPPyNiwF9=HfWs8aLb#^(A%U|`V${I{BU){fpWz1rcm=Z2Rx zyRNO&4B5omc2ibMht{W_Ir{CdM`NBTbsNQbGP$OM%xj-hR}mXjp6&@n8H4ErQ$H`;(!i&Noo~{f@{XyKfyB zu>Y61dp~=QC^$HIkb2365j9Wr<3c|BERwEPJO`o|b;_Li+o(5qh;cx~7)3|%J`>f{ zJ#3K&om#7egapFxn>Ple={Djy80xYptR0<|Vv=3^qF3Ak#g>-9mPB9&wzMKMGgFZs zD0!gKoLh%SDtc8_Rms>*cwh_a59?9o6uQ*|6>2GT5=Xsvta@**w1<>c z{dnz{Ah9VjkP2CSDJJz027@$e2JI5J|yJLy;nX&k>g=wzi2@N<^cR}#o zC%pD~kBqv$X3A$3Cwk2L>#i_$>V+d>lj{Axx&rUtZ{Zzu0WSMP=f-{CZL_Dc-C}OC zA68J3280ixoD-{PSz-ToSd9$t?{mIc0qJJ~#@;kJ-H8?$J;aDRB9u;kW)&sYXG$^1 zy4jm|H(EDJnj5P#Vdd0^qZ@bmx6$skStBf8qx7EKEv#dYvu8${G)%>R(h!Eo1WAJk zJVoZXY{jc&bVvi6nKGhTWXkuSeh4);oiSCStUz zv%QtxXJwL092nH<)C+a!dRK)88gZV45mxtc#h^hxhgxwP^(Qkn z{3pLyHd3qrI;=-Gm!Y42=Xd+dG)HGiKL=I~v+nAi z@L#1I4eADrbTOaJ`3{)^up))QY0&y!E(aoU>Y&G)>NT7gE)Ke(-{8@5_tUd`$%w}C zD~Tgw7#hNS+|$(agb->@q9JwM&Y7%DLxXvDaC(@sJFxh(|PySjWK!~Vq!`HR!lte{gm3p8^#)6 zo%{S^nUSvP4+#lU*^f>&YUMS1vF=aNwyL<+)&0(+bgxXFn*Ya39}n&uw)ILnd|s#h zJC8(eO#C-1Mnd6SAQ&M$XyvoV_RRd#+lJ+(G;Oj}u2^W-PMiSbPlO2k_opis#`OBX z9MM_kQ?oDd*99!*}kNUXKk1W6S3I_vGJK(+ft?He9nSfmuw31}X+pZ3P`*KsD!dG6uh& zrW^qZ9^8yNGc+$`>GOKJ-B&8+;Y=_ITUUEVzkPcZ6trPt@P@VBb6)R!HTb%La&kdw zcTQnyH3MAJE3UhGHZ_fEJ9Oazx5Hjbz3+p{pQAe%`*KOU=u;7S?>qEuxAl;ESdO!E zsG-r*(u@6aY{Ba8-M=3fFEe%|)muk=bpgGX3%$?P2}DdGIVavl1#K~S@ae)gZxgq1 z;RN%1koAmEuKbse)Xm%PRB7?Ozg-5us`+qm-_OUDbU{?h1rfixlsL!!X>L`#)S?&g zxE)Hdl=4{i;F9@;dRgRog&r*OK8v=>m|dL0E&~P_@Q3TSZSc`SkI<~Gxm9{^dJndz ze|?N|=Z$7zt?Ng8nEtQu79aF>fPs_I!V87=LdpkRUpwsRT1b2S^=rO3ptl2YWUrh+*8Um3@6jNR}Tx93i z->)&dBs+(0+Ju%csjU_(DF;1D&storLB7qJYYq;r4%!!*b7a*02mR|;j$QR$Hnp1E z?d~*4CxslDj6hLz83J@SP*_#;m1iD2@Dz)E8aiiY0c`??`kX93mzM|Jc=oJK#m@_H z4^xLEJWl>?zU8sud$wwNal~Z1VlkzMUeBAn<4`)|^44zb$rad{LRp6F3(Y*zJtxQK zWAn|Y1~pk){(jtb$ zqP8M(Yt*B7KK-L`pFRRvE%cEeT7PYuf;;4d!O|6hS}|sn#5=`iMR>ic)bf;X5p)E- zel||F?VN*VTZt09`Akzcpus}NIcO=fzCNjm(;|azbcR_eX+c$_+Pks`~(< z^h^r~5T84UXX{rFz5(KrzPP%!Hd#QC5)HBxMxvSfOB#NlVpgv@<~&I%2IS02FS^* z;F5*q5Zan{OZX`7x?;-*Rm5>Gg7)4?X|3UmA}I=r330bF1C2#+$v>M!tE3*%SP|z! z8AwIABh9y|76eAr;lt(!ZJbG{63#_-%KbJXu{n^&?_2qK$7rCLhg5q4AX5zPc5^kN zZXZ%mMH|ej2UL+OQ04;fI3I9piItp|Vl;GNGk_a4TJ@!% zp3IS}?kvJm4GEMD?t3jci11}Z6H5F>K=Yxg^9g$BI>gN@meeA$mPye zuYW&T77aq58a5m&!YNqCR|K=aAt&kd9ES6jR-y>lzmSP#P#x#bpJaXLHHe-7>l4AN zxr}1V#5(Am26aBIQc*;GScCl?qIMMhb=x-2QT=IT3xea$r3D{Y9w-(p9o#6fAIW6;@2R`&eK&X_X?m1gfFd_XEX;bbS ziMyj%(Nsm=%slrrYuP;ERlsi@?ou438Qlac1Q+P%^2*kE7++!fLxQnro|rXhhjzoM z_oAR6D>jDM5x%^e*+TgC19cD>8VV`*MsQZJbagD>S-$4RO`B2{_bua_D3LIFP(w+h z>#pSIpOGv8Xg&X;%N#aaNc~4`V9({iK>71(j!2Oh`~V~#UA+y5u5jfsS_(nIWV)SU z6c}oJN_7FR1b_$kwSh*a>)*AozS7)+UYy4*lMnvk+GGMnG^ZJ8cK%_9h`BB z1ykx%+23I@CkBZnPK@P4tlC@$PtmY?JpDRUDEn@4?VFzB1ps>>j`NhLYEGwl&_gTI zoOqxJBhNxNrc72>fgI+d1gv18b^&!o57q~qjKUMs@2fEbTgG+bvXL)>C|kA5Lo9#g zTlD%MWkvLYz^c}Nv)>FT}6S2j9?^)r+5H})d>Z$?D9>=Cqn_xDLS==;M^EHF|jA1gM5g? zKi_im8&}h0YbHUz@DdLP+Fmx&Sb{{_n=TQfdUH`4TyCuQP(r$nX%OquYM)Kf*{Wwp zSLWtqtq*1k>59~Ea$wd9YS$!6I+^^|T2D=5c(OaW7WCyYeXIYT&@to3_xmj?BK0!U z7T~KL52la9Js|StRjp@FU%p%?cg*0TZ^woJ%M&P*LTlf4Con!4d&MnhB(e&C+|^{K zxSG3Vwoj2LZ`6ow@eu&(izq$+2)W-tsjo87bo~0A4B;N!(51yjdeyS78QTn?BiTDir{!~On>Q}a zzjEa~NkrpRr@tqSA>2N~K!svLHR?L3)*d!?F6H&4GZR>4YTuM&Ul&d8S{4xM#N>zK z#Q=~)Y?DB4kP7%aDWf$Zf={@T6R55fB_TX9xoyq)GN-ncQ+I=xOnlyYF$y@YZ1Cuu zXFeTJt%)LY64?1gKW$-bc@CJLu#w9nuMlO}y&Ey>*Qk+B+!k0hgPqvAEfdV)e>xWgKma;fVggOx%wK