From 0e5d1170f99a8c77409de1e58a9375fee38c3000 Mon Sep 17 00:00:00 2001 From: Bethany Lusch Date: Mon, 16 Apr 2018 16:42:30 -0700 Subject: [PATCH] add version of experiments that uses the parameters chosen in paper instead of doing parameter search --- ...reteSpectrumExampleExperimentBestParams.py | 58 +++++++++++++++++ FluidFlowBoxExperimentBestParams.py | 63 +++++++++++++++++++ FluidFlowOnAttractorExperimentBestParams.py | 59 +++++++++++++++++ PendulumExperimentBestParams.py | 63 +++++++++++++++++++ 4 files changed, 243 insertions(+) create mode 100644 DiscreteSpectrumExampleExperimentBestParams.py create mode 100644 FluidFlowBoxExperimentBestParams.py create mode 100644 FluidFlowOnAttractorExperimentBestParams.py create mode 100644 PendulumExperimentBestParams.py diff --git a/DiscreteSpectrumExampleExperimentBestParams.py b/DiscreteSpectrumExampleExperimentBestParams.py new file mode 100644 index 0000000..53aa6e4 --- /dev/null +++ b/DiscreteSpectrumExampleExperimentBestParams.py @@ -0,0 +1,58 @@ +import copy + +import training + +params = {} + +# settings related to dataset +params['data_name'] = 'DiscreteSpectrumExample' +params['data_train_len'] = 1 +params['len_time'] = 51 +n = 2 # dimension of system (and input layer) +num_initial_conditions = 5000 # per training file +params['delta_t'] = 0.02 + +# settings related to saving results +params['folder_name'] = 'exp1_best' + +# settings related to network architecture +params['num_real'] = 2 +params['num_complex_pairs'] = 0 +params['num_evals'] = 2 +k = params['num_evals'] # dimension of y-coordinates +w = 30 +params['widths'] = [2, w, w, k, k, w, w, 2] +wo = 10 +params['hidden_widths_omega'] = [wo, wo, wo] + +# settings related to loss function +params['num_shifts'] = 30 +params['num_shifts_middle'] = params['len_time'] - 1 +max_shifts = max(params['num_shifts'], params['num_shifts_middle']) +num_examples = num_initial_conditions * (params['len_time'] - max_shifts) +params['recon_lam'] = .1 +params['Linf_lam'] = 10 ** (-7) +params['L1_lam'] = 0.0 +params['L2_lam'] = 10 ** (-15) +params['auto_first'] = 0 + +# settings related to the training +params['num_passes_per_file'] = 15 * 6 * 10 +params['num_steps_per_batch'] = 2 +params['learning_rate'] = 10 ** (-3) +params['batch_size'] = 256 +steps_to_see_all = num_examples / params['batch_size'] +params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch'] + +# settings related to the timing +params['max_time'] = 4 * 60 * 60 # 4 hours +params['min_5min'] = .5 +params['min_20min'] = .0004 +params['min_40min'] = .00008 +params['min_1hr'] = .00003 +params['min_2hr'] = .00001 +params['min_3hr'] = .000006 +params['min_halfway'] = .000006 + +for count in range(200): # loop to do random experiments + training.main_exp(copy.deepcopy(params)) diff --git a/FluidFlowBoxExperimentBestParams.py b/FluidFlowBoxExperimentBestParams.py new file mode 100644 index 0000000..275fe2d --- /dev/null +++ b/FluidFlowBoxExperimentBestParams.py @@ -0,0 +1,63 @@ +import copy + +import training + +params = {} + +# settings related to dataset +params['data_name'] = 'FluidFlowBox' +params['data_train_len'] = 4 +params['len_time'] = 101 +n = 3 # dimension of system (and input layer) +num_initial_conditions = 5000 # per training file +params['delta_t'] = 0.01 + +# settings related to saving results +params['folder_name'] = 'exp4_best' + +# settings related to network architecture +params['num_real'] = 1 +params['num_complex_pairs'] = 1 +params['num_evals'] = 3 +k = params['num_evals'] # dimension of y-coordinates +w = 130 +params['widths'] = [3, w, k, k, w, 3] +wo = 20 +params['hidden_widths_omega'] = [wo, wo] + +# defaults related to initialization of parameters +params['dist_weights'] = 'dl' +params['dist_weights_omega'] = 'dl' + +# settings related to loss function +params['num_shifts'] = 30 +params['num_shifts_middle'] = params['len_time'] - 1 +max_shifts = max(params['num_shifts'], params['num_shifts_middle']) +num_examples = num_initial_conditions * (params['len_time'] - max_shifts) +params['recon_lam'] = .1 +params['Linf_lam'] = 10 ** (-9) +params['L1_lam'] = 0.0 +params['L2_lam'] = 10 ** (-13) +params['auto_first'] = 1 + +# settings related to training +params['num_passes_per_file'] = 15 * 6 * 10 +params['num_steps_per_batch'] = 2 +params['learning_rate'] = 10 ** (-3) +params['batch_size'] = 128 +steps_to_see_all = num_examples / params['batch_size'] +params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch'] + +# settings related to timing +params['max_time'] = 6 * 60 * 60 # 6 hours +params['min_5min'] = .45 +params['min_20min'] = .005 +params['min_40min'] = .0005 +params['min_1hr'] = .00025 +params['min_2hr'] = .00005 +params['min_3hr'] = .000007 +params['min_4hr'] = .000005 +params['min_halfway'] = 1 + +for count in range(200): # loop to do random experiments + training.main_exp(copy.deepcopy(params)) diff --git a/FluidFlowOnAttractorExperimentBestParams.py b/FluidFlowOnAttractorExperimentBestParams.py new file mode 100644 index 0000000..ec55108 --- /dev/null +++ b/FluidFlowOnAttractorExperimentBestParams.py @@ -0,0 +1,59 @@ +import copy + +import training + +params = {} + +# settings related to dataset +params['data_name'] = 'FluidFlowOnAttractor' +params['data_train_len'] = 3 +params['len_time'] = 121 +n = 3 # dimension of system (and input layer) +num_initial_conditions = 5000 # per training file +params['delta_t'] = 0.05 + +# settings related to saving results +params['folder_name'] = 'exp3_best' + +# settings related to network architecture +params['num_real'] = 0 +params['num_complex_pairs'] = 1 +params['num_evals'] = 2 +k = params['num_evals'] # dimension of y-coordinates +w = 105 +params['widths'] = [3, w, k, k, w, 3] +wo = 300 +params['hidden_widths_omega'] = [wo, ] + +# settings related to loss function +params['num_shifts'] = 30 +params['num_shifts_middle'] = params['len_time'] - 1 +max_shifts = max(params['num_shifts'], params['num_shifts_middle']) +num_examples = num_initial_conditions * (params['len_time'] - max_shifts) +params['recon_lam'] = .1 +params['Linf_lam'] = 10 ** (-7) +params['L1_lam'] = 0.0 +params['L2_lam'] = 10 ** (-13) +params['auto_first'] = 1 + +# settings related to training +params['num_passes_per_file'] = 15 * 6 * 10 +params['num_steps_per_batch'] = 2 +params['learning_rate'] = 10 ** (-3) +params['batch_size'] = 256 +steps_to_see_all = num_examples / params['batch_size'] +params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch'] + +# settings related to timing +params['max_time'] = 6 * 60 * 60 # 6 hours +params['min_5min'] = .45 +params['min_20min'] = .001 +params['min_40min'] = .0005 +params['min_1hr'] = .00025 +params['min_2hr'] = .00005 +params['min_3hr'] = .000005 +params['min_4hr'] = .0000007 +params['min_halfway'] = 1 + +for count in range(200): # loop to do random experiments + training.main_exp(copy.deepcopy(params)) diff --git a/PendulumExperimentBestParams.py b/PendulumExperimentBestParams.py new file mode 100644 index 0000000..851cf5d --- /dev/null +++ b/PendulumExperimentBestParams.py @@ -0,0 +1,63 @@ +import copy + +import training + +params = {} + +# settings related to dataset +params['data_name'] = 'Pendulum' +params['data_train_len'] = 3 +params['len_time'] = 51 +n = 2 # dimension of system (and input layer) +num_initial_conditions = 5000 # per training file +params['delta_t'] = 0.02 + +# settings related to saving results +params['folder_name'] = 'exp2_best' + +# settings related to network architecture +params['num_real'] = 0 +params['num_complex_pairs'] = 1 +params['num_evals'] = 2 +k = params['num_evals'] # dimension of y-coordinates +w = 80 +params['widths'] = [2, w, w, k, k, w, w, 2] +wo = 170 +params['hidden_widths_omega'] = [wo, ] + +# defaults related to initialization of parameters +params['dist_weights'] = 'dl' +params['dist_weights_omega'] = 'dl' + +# settings related to loss function +params['num_shifts'] = 30 +params['num_shifts_middle'] = params['len_time'] - 1 +max_shifts = max(params['num_shifts'], params['num_shifts_middle']) +num_examples = num_initial_conditions * (params['len_time'] - max_shifts) +params['recon_lam'] = .001 +params['Linf_lam'] = 10 ** (-9) +params['L1_lam'] = 0.0 +params['L2_lam'] = 10 ** (-14) +params['auto_first'] = 1 + +# settings related to training +params['num_passes_per_file'] = 15 * 6 * 50 +params['num_steps_per_batch'] = 2 +params['learning_rate'] = 10 ** (-3) +params['batch_size'] = 128 +steps_to_see_all = num_examples / params['batch_size'] +params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch'] + +# settings related to timing +params['max_time'] = 6 * 60 * 60 # 6 hours +params['min_5min'] = .25 +params['min_20min'] = .02 +params['min_40min'] = .002 +params['min_1hr'] = .0002 +params['min_2hr'] = .00002 +params['min_3hr'] = .000004 +params['min_4hr'] = .0000005 +params['min_halfway'] = 1 + +for count in range(200): # loop to do random experiments + training.main_exp(copy.deepcopy(params))