Skip to content

Commit

Permalink
upload four experiments from v2 of paper
Browse files Browse the repository at this point in the history
  • Loading branch information
BethanyL committed Apr 16, 2018
1 parent deac2b8 commit bec93c7
Show file tree
Hide file tree
Showing 4 changed files with 340 additions and 107 deletions.
150 changes: 101 additions & 49 deletions DiscreteSpectrumExampleExperiment.py
Original file line number Diff line number Diff line change
@@ -1,49 +1,101 @@
import copy
import random as r

import training

params = {}

# settings related to dataset
params['data_name'] = 'DiscreteSpectrumExample'
params['len_time'] = 51
n = 2 # dimension of system (and input layer)
params['data_train_len'] = 10
num_initial_conditions = 29400 # per training file (10 training data files)
params['delta_t'] = 0.02

# settings related to saving results
params['folder_name'] = 'exp1'

# settings related to network architecture
w = 100 # width of hidden layers in encoder and decoder
k = 2 # dimension of y-coordinates
params['widths'] = [n, w, w, w, k, k, w, w, w, n]
params['widths_omega'] = [2, w, w, w, 1]

# settings related to loss function
params['num_shifts'] = 3
params['Linf_lam'] = 10 ** (-6)

# settings related to the training
params['num_passes_per_file'] = 15 * 4
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)

# settings related to the timing
params['max_time'] = 4 * 60 * 60 # 4 hours
params['min_halfway'] = 10 ** (-5)

for count in range(200): # loop to do random experiments
params['num_shifts_middle'] = r.randint(5, params['len_time'] - 1)
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['batch_size'] = int(2 ** (r.uniform(6, 9)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']

params['L2_lam'] = 10 ** (-r.uniform(12, 18))
params['L1_lam'] = 10 ** (-r.uniform(14, 18))

training.main_exp(copy.deepcopy(params))
import copy
import random as r

import numpy as np

import training

params = {}

# settings related to dataset
params['data_name'] = 'DiscreteSpectrumExample'
params['len_time'] = 51
n = 2 # dimension of system (and input layer)
num_initial_conditions = 5000 # per training file
params['delta_t'] = 0.02

# settings related to saving results
params['folder_name'] = 'exp1'

# settings related to network architecture
params['num_real'] = 2
params['num_complex_pairs'] = 0
params['num_evals'] = 2
k = params['num_evals'] # dimension of y-coordinates

# settings related to loss function
params['num_shifts'] = 30
params['num_shifts_middle'] = params['len_time'] - 1
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['recon_lam'] = .1
params['L1_lam'] = 0.0

# settings related to the training
params['num_passes_per_file'] = 15 * 6 * 10
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)

# settings related to the timing
params['max_time'] = 4 * 60 * 60 # 4 hours
params['min_5min'] = .5
params['min_20min'] = .0004
params['min_40min'] = .00008
params['min_1hr'] = .00003
params['min_2hr'] = .00001
params['min_3hr'] = .000006
params['min_halfway'] = .000006

for count in range(200): # loop to do random experiments
params['data_train_len'] = r.randint(1, 3)
params['batch_size'] = int(2 ** (r.randint(7, 9)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']
if r.random() < .5:
params['auto_first'] = 1
else:
params['auto_first'] = 0

params['L2_lam'] = 10 ** (-r.randint(13, 15))
if r.random() < .5:
params['Linf_lam'] = 0.0
else:
params['Linf_lam'] = 10 ** (-r.randint(6, 10))

d = r.randint(1, 4)
if d == 1:
wopts = np.arange(50, 160, 10)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [2, w, k, k, w, 2]
elif d == 2:
wopts = np.arange(15, 45, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [2, w, w, k, k, w, w, 2]
elif d == 3:
wopts = np.arange(10, 25, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [2, w, w, w, k, k, w, w, w, 2]
elif d == 4:
wopts = np.arange(10, 20, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [2, w, w, w, w, k, k, w, w, w, w, 2]

do = r.randint(1, 4)
if do == 1:
wopts = np.arange(20, 110, 10)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, ]
elif do == 2:
wopts = np.arange(10, 25, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo]
elif do == 3:
wopts = np.arange(5, 20, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo, wo]
elif do == 4:
wopts = np.arange(5, 15, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo, wo, wo]

training.main_exp(copy.deepcopy(params))
78 changes: 78 additions & 0 deletions FluidFlowBoxExperiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import copy
import random as r

import numpy as np

import training

params = {}

# settings related to dataset
params['data_name'] = 'FluidFlowBox'
params['len_time'] = 101
n = 3 # dimension of system (and input layer)
num_initial_conditions = 5000 # per training file
params['delta_t'] = 0.01

# settings related to saving results
params['folder_name'] = 'exp4'

# settings related to network architecture
params['num_real'] = 1
params['num_complex_pairs'] = 1
params['num_evals'] = 3
k = params['num_evals'] # dimension of y-coordinates

# defaults related to initialization of parameters
params['dist_weights'] = 'dl'
params['dist_weights_omega'] = 'dl'

# settings related to loss function
params['num_shifts'] = 30
params['num_shifts_middle'] = params['len_time'] - 1
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['recon_lam'] = .1
params['L1_lam'] = 0.0
params['auto_first'] = 1

# settings related to training
params['num_passes_per_file'] = 15 * 6 * 10
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)

# settings related to timing
params['max_time'] = 6 * 60 * 60 # 6 hours
params['min_5min'] = .45
params['min_20min'] = .005
params['min_40min'] = .0005
params['min_1hr'] = .00025
params['min_2hr'] = .00005
params['min_3hr'] = .000007
params['min_4hr'] = .000005
params['min_halfway'] = 1

for count in range(200): # loop to do random experiments
params['data_train_len'] = r.randint(1, 4)
params['batch_size'] = int(2 ** (r.randint(7, 8)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']
params['L2_lam'] = 10 ** (-r.randint(13, 14))
params['Linf_lam'] = 10 ** (-r.randint(7, 9))

d = 1
wopts = np.arange(50, 200, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, k, k, w, n]

do = r.randint(1, 2)
if do == 1:
wopts = np.arange(20, 135, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, ]
elif do == 2:
wopts = np.arange(10, 30, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo]

training.main_exp(copy.deepcopy(params))
78 changes: 78 additions & 0 deletions FluidFlowOnAttractorExperiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
import copy
import random as r

import numpy as np

import training

params = {}

# settings related to dataset
params['data_name'] = 'FluidFlowOnAttractor'
params['len_time'] = 121
n = 3 # dimension of system (and input layer)
num_initial_conditions = 5000 # per training file
params['delta_t'] = 0.05

# settings related to saving results
params['folder_name'] = 'exp3'

# settings related to network architecture
params['num_real'] = 0
params['num_complex_pairs'] = 1
params['num_evals'] = 2
k = params['num_evals'] # dimension of y-coordinates

# settings related to loss function
params['num_shifts'] = 30
params['num_shifts_middle'] = params['len_time'] - 1
max_shifts = max(params['num_shifts'], params['num_shifts_middle'])
num_examples = num_initial_conditions * (params['len_time'] - max_shifts)
params['recon_lam'] = .1
params['L1_lam'] = 0.0
params['auto_first'] = 1

# settings related to training
params['num_passes_per_file'] = 15 * 6 * 10
params['num_steps_per_batch'] = 2
params['learning_rate'] = 10 ** (-3)

# settings related to timing
params['max_time'] = 6 * 60 * 60 # 6 hours
params['min_5min'] = .45
params['min_20min'] = .001
params['min_40min'] = .0005
params['min_1hr'] = .00025
params['min_2hr'] = .00005
params['min_3hr'] = .000005
params['min_4hr'] = .0000007
params['min_halfway'] = 1

for count in range(200): # loop to do random experiments
params['data_train_len'] = r.randint(1, 3)
params['batch_size'] = int(2 ** (r.randint(7, 8)))
steps_to_see_all = num_examples / params['batch_size']
params['num_steps_per_file_pass'] = (int(steps_to_see_all) + 1) * params['num_steps_per_batch']
params['L2_lam'] = 10 ** (-r.randint(13, 14))
params['Linf_lam'] = 10 ** (-r.randint(7, 10))

d = r.randint(1, 2)
if d == 1:
wopts = np.arange(70, 135, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, k, k, w, n]
elif d == 2:
wopts = np.arange(15, 30, 5)
w = wopts[r.randint(0, len(wopts) - 1)]
params['widths'] = [n, w, w, k, k, w, w, n]
do = r.randint(1, 2)
if do == 1:
wopts = np.arange(230, 450, 10)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, ]
elif do == 2:
wopts = np.arange(25, 40, 5)
wo = wopts[r.randint(0, len(wopts) - 1)]
params['hidden_widths_omega'] = [wo, wo]

training.main_exp(copy.deepcopy(params))
Loading

0 comments on commit bec93c7

Please sign in to comment.