Skip to content

Commit

Permalink
updates to scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
davidBelanger committed Jan 5, 2017
1 parent 3448608 commit ec9eced
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 16 deletions.
4 changes: 2 additions & 2 deletions denoise_cmd.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ problem_options="-problem_config $problem_config"
#These are the only options that are specific to the problem domain and architecture
inverse_noise_variance=1.0 #the inverse ariance of assumed noise model. The local potentials are multipled by this.
problem_options_str="-use_random_crops 1 -local_term_weight $inverse_noise_variance" #there are other options. they're just using default values for now
th flags/DenoiseOptions.lua $problem_options_str -serialize $problem_config
problem_options="$problem_options -problem Denoise -continuous_outputs 1 "
th flags/DepthOptions.lua $problem_options_str -serialize $problem_config
problem_options="$problem_options -problem Depth -continuous_outputs 1 "

#There are many other hyperparameters that you may want to play with
inference_options="-init_at_local_prediction 1 -inference_learning_rate 0.1 -max_inference_iters 20 -inference_learning_rate_decay 0 -inference_momentum 0 -learn_inference_hyperparams 0 -unconstrained_iterates 1 -line_search 1 -entropy_weight 0"
Expand Down
2 changes: 1 addition & 1 deletion mlc_cmd.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

GPUID=-1 #set this to something >= 0 to use the GPU
GPUID=0 #set this to something >= 0 to use the GPU

torch_gpu_id=0 #torch always thinks it's running on GPUID 0, but the CUDA_VISIBLE_DEVICES environment variable (set below) changes what that means.
if [ "$GPUID" == -1 ]; then
Expand Down
49 changes: 36 additions & 13 deletions tag_cmd.sh
Original file line number Diff line number Diff line change
@@ -1,39 +1,60 @@
mkdir -p runs
d=runs/`date | sed 's| |_|g'`

GPUID=0 #set this to something >= 0 to use the GPU

torch_gpu_id=0 #torch always thinks it's running on GPUID 0, but the CUDA_VISIBLE_DEVICES environment variable (set below) changes what that means.
if [ "$GPUID" == -1 ]; then
torch_gpu_id=-1
fi

d=tag-runs/`date | sed 's| |_|g'`
log=$d/log.txt
mkdir $d
mkdir -p $d

results_file_dir=$d/results
mkdir $results_file_dir


#Use the synthetic training data generated by test/test_data_serialization_and_loading.lua
echo ./data/sequence/crf-data.train > ./data/sequence/crf-data.train.list
echo ./data/sequence/crf-data.test > ./data/sequence/crf-data.test.list

data_options="-train_list ./data/sequence/crf-data.train.list -test_list ./data/sequence/crf-data.test.list -out_dir $results_file_dir -model_file $d/model- $iC"
system_options=" -batch_size 10 -gpuid -1 -profile 0"

#These are the only options that are specific to the problem domain and architecture
#These options are for restoring stuff from previous runs
#Restore the local classifier (features and 'local potentials')
#init_classifier= # for example, $previous_run_dir/model--pretrain_unaries.50.classifier
#iC="-init_classifier $init_classifier"

#Restore a full unrolled network for gradient-based prediction
#init_full_net= # for example, $previous_run_dir/model--update_all.50.predictor
#iF="-init_full_net $init_full_net"

#Restore the optimization state of a previous run
#init_opt_state= # for example, $previous_run_dir/model--update_all.50.opt_state
#iO="-init_opt_state $init_opt_state"

data_options="-train_list ./data/sequence/crf-data.train.list -test_list ./data/sequence/crf-data.test.list -out_dir $results_file_dir -model_file $d/model- $iC $iF $iO"
system_options=" -batch_size 10 -gpuid $torch_gpu_id -profile 0 "

problem_config=$d/problem-config
problem_options="-problem_config $problem_config"


problem_options_str=""
#These are the only options that are specific to the problem domain and architecture
problem_options_str="" #just using default values for now
th flags/SequenceTaggingOptions.lua $problem_options_str -serialize $problem_config
problem_options="$problem_options -problem SequenceTagging"


inference_options="-init_at_local_prediction 1 -inference_learning_rate 0.1 -max_inference_iters 20 -inference_learning_rate_decay 0 -inference_momentum 0.5 -learn_inference_hyperparams 1 -unconstrained_iterates 1"
inference_options="-init_at_local_prediction 0 -inference_learning_rate 0.1 -max_inference_iters 20 -inference_learning_rate_decay 0 -inference_momentum 0.5 -learn_inference_hyperparams 1 -unconstrained_iterates 1"


general_training_options="-training_method E2E"
base_training_config="-gradient_clip 1.0 -optim_method adam -evaluation_frequency 25 -save_frequency 50 -adam_epsilon 1e-8 -gradient_noise_scale 0 \
-batches_per_epoch 100 -learning_rate_decay 0.0 -learning_rate_decay_start 20 -l2 0 "
base_training_config="-gradient_clip 1.0 -optim_method adam -evaluation_frequency 25 -save_frequency 25 -adam_epsilon 1e-8 -batches_per_epoch 100 -learning_rate_decay 0.0"


pretrain_configs="$base_training_config -learning_rate 0.001 -num_epochs 100 -training_mode pretrain_unaries"
first_pass_configs="$base_training_config -learning_rate 0.001 -num_epochs 100 -training_mode clamp_features"
second_pass_configs="$base_training_config -learning_rate 0.0005 -num_epochs 500 -training_mode update_all"

#This packages up lua tables for the options that are specific to the different stages of training.
training_config=$d/train-config
echo $pretrain_configs
th flags/TrainingOptions.lua $pretrain_configs -serialize $training_config.0
Expand All @@ -44,7 +65,9 @@ training_options="-training_configs $training_config"

cmd="th main.lua $data_options $system_options $problem_options $inference_options $training_options $general_training_options"

echo echo running in $d > $d/cmd.sh
echo echo running in $d > $d/cmd.sh
echo export CUDA_VISIBLE_DEVICES=$GPUID >> $d/cmd.sh

echo $cmd >> $d/cmd.sh
sh $d/cmd.sh 2>&1 | tee $log | tee latest-run.log

0 comments on commit ec9eced

Please sign in to comment.