Skip to content

Commit

Permalink
update model training to include phi; train separate models density a…
Browse files Browse the repository at this point in the history
…nd vorticity
  • Loading branch information
akrause2014 committed May 17, 2024
1 parent d3b16b5 commit 9d94c7f
Show file tree
Hide file tree
Showing 3 changed files with 27 additions and 36 deletions.
23 changes: 8 additions & 15 deletions files/5-training/data_read.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,19 @@ def extract_array_data(file_path: str, args) -> np.ndarray:
# number of ghost cells in x dimension
gx = 2
# extract vorticity and density without ghost cells and remove unit y direction
vort_array = np.squeeze(dataset.variables['vort'][:,gx:-gx,:,:])
dens_array = np.squeeze(dataset.variables['n'][:,gx:-gx,:,:])
var_arrays = []
for var in args.variables:
var_array = np.squeeze(dataset.variables[var][:,gx:-gx,:,:])
var_arrays.append(var_array)
dataset.close()

if args.vort_only:
flow_image = np.stack([vort_array], axis=-1)
elif args.dens_only:
flow_image = np.stack([dens_array], axis=-1)
else:
flow_image = np.stack([vort_array, dens_array], axis=-1)
flow_image = np.stack(var_arrays, axis=-1)
return flow_image

def translate_augmentation(fields: Dict[str, tf.Tensor]) -> Dict[str, tf.Tensor]:
coarse_image, error_image = fields['coarse'], fields['error']
#commented out for testing
#if coarse_image.shape != error_image.shape:
# raise ValueError(f"Coarse grained data and error should be same shape (got {coarse_image.shape} and {error_image.shape} respectively).")
if coarse_image.shape != error_image.shape:
raise ValueError(f"Coarse grained data and error should be same shape (got {coarse_image.shape} and {error_image.shape} respectively).")
shape = tf.shape(coarse_image)
nx, nz = shape[0], shape[1]
shift_x = tf.random.uniform(shape=[], minval=0, maxval=nx-1, dtype=tf.int32)
Expand Down Expand Up @@ -57,10 +53,7 @@ def generate_augmented_dataset(
coarse_grained_file_names: List[str],
args,
) -> tf.data.Dataset:
if args.vort_only or args.dens_only:
channels = 1
else:
channels = 2
channels = len(args.variables)
dataset = tf.data.Dataset.from_generator(
lambda: data_generator(ground_truth_file_names, coarse_grained_file_names, args),
output_signature={'coarse': tf.TensorSpec(shape=(None, None, channels), dtype=tf.float64),
Expand Down
11 changes: 10 additions & 1 deletion files/5-training/submit-training.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ CUDNN_VERSION=8.6.0-cuda-${CUDA_VERSION}
TENSORRT_VERSION=8.4.3.1-u2

export WORK=${HOME/home/work}
SIMLINT_HOME=${WORK}/SiMLInt

eval "$(${WORK}/miniconda3/bin/conda shell.bash hook)"
conda activate boutsmartsim
Expand All @@ -21,4 +22,12 @@ module load nvidia/nvhpc
cd ${WORK}/data/training

# choose appropriate parameters here
python ${SIMLINT_HOME}/files/5-training/training.py --epochs 100 --batch-size 32 --learning-rate 0.0001
# create a model each for vorticity and density error correction

# model the vorticity error
python ${SIMLINT_HOME}/files/5-training/training.py --epochs 100 --batch-size 32 --learning-rate 0.0001 \
--trajectories 10 --data-directory ${WORK}/data/training/ --variables vort --task-id vort

# model the density error
python ${SIMLINT_HOME}/files/5-training/training.py --epochs 100 --batch-size 32 --learning-rate 0.0001 \
--trajectories 10 --data-directory ${WORK}/data/training/ --variables n --task-id n
29 changes: 9 additions & 20 deletions files/5-training/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,25 @@
tf.keras.utils.disable_interactive_logging()

parser = argparse.ArgumentParser(description='Model training')
parser.add_argument('-d', '--data-directory', required=True)
parser.add_argument('-t', '--trajectories', type=int, required=True)
parser.add_argument('-lr', '--learning-rate', type=float, required=True)
parser.add_argument('-b', '--batch-size', type=int, required=True)
parser.add_argument('-ep', '--epochs', type=int, required=True)
parser.add_argument('-id', '--task-id', default='')
parser.add_argument('--vort-only', action='store_true',)
parser.add_argument('--dens-only', action='store_true',)
parser.add_argument('-v', '--variables', nargs='+')
args = parser.parse_args()

# problem size
Nx = 256
Nz = 256
if args.vort_only or args.dens_only:
channels = 1
else:
channels = 2
channels = len(args.variables)
val_frac = 0.2

samples_per_file = 1000 # to estimate train/val split
data_location = '/scratch/space1/d175/amy/resize_again/training_data/'
file_nums = list(range(1,33)) # [ x+1 for x in range(32) ]
data_location = args.data_directory
file_nums = list(range(1, args.trajectories))

# training protocol
#learning_rate = 1e-3
#epochs = 2
#batch_size = 32
learning_rate = args.learning_rate
epochs = args.epochs
batch_size = args.batch_size
Expand All @@ -54,15 +48,10 @@
print(f'epochs: {epochs}')
print(f'batch size: {batch_size}')
print(f'training run: {trun_label}')
print(f'data location: {data_location}')
print(f'data files: {file_nums}')
print(f'channels: {channels}', end='')
if args.vort_only:
print(' (vort)')
elif args.dens_only:
print(' (dens)')
else:
print('')
print('****************************************************')
print(f'channels: {channels} {args.variables}')
print('****************************************************', flush=True)

# compile model
model = kochkov_cnn((Nx, Nz, channels))
Expand Down

0 comments on commit 9d94c7f

Please sign in to comment.