diff --git a/reproduce/ace_run_table6.sh b/reproduce/ace_run_table6.sh index d674bee..cad30f6 100755 --- a/reproduce/ace_run_table6.sh +++ b/reproduce/ace_run_table6.sh @@ -1,16 +1,16 @@ #!/bin/bash cd "$(dirname "$0")" -bash table6/ace_run_expert_vgg11_cut1.sh -bash table6/ace_run_expert_vgg11_cut2.sh -bash table6/ace_run_expert_vgg11_cut3.sh +# bash table6/ace_run_expert_vgg11_cut1.sh +# bash table6/ace_run_expert_vgg11_cut2.sh +# bash table6/ace_run_expert_vgg11_cut3.sh bash table6/ace_run_finetune_lowLRlite_vgg.sh -bash table6/ace_run_expert_resnet20_cut2.sh -bash table6/ace_run_expert_resnet20_cut3.sh -bash table6/ace_run_expert_resnet20_cut4.sh -bash table6/ace_run_finetune_lowLRlite_resnet.sh +# bash table6/ace_run_expert_resnet20_cut2.sh +# bash table6/ace_run_expert_resnet20_cut3.sh +# bash table6/ace_run_expert_resnet20_cut4.sh +# bash table6/ace_run_finetune_lowLRlite_resnet.sh -bash table6/ace_run_expert_mobilenet_cut2.sh -bash table6/ace_run_expert_mobilenet_cut3.sh -bash table6/ace_run_expert_mobilenet_cut4.sh -bash table6/ace_run_finetune_lowLRlite_mobilenetv2.sh \ No newline at end of file +# bash table6/ace_run_expert_mobilenet_cut2.sh +# bash table6/ace_run_expert_mobilenet_cut3.sh +# bash table6/ace_run_expert_mobilenet_cut4.sh +# bash table6/ace_run_finetune_lowLRlite_mobilenetv2.sh \ No newline at end of file diff --git a/reproduce/supplementary/MIA_optim/ace_run_MIAoptim_vgg11_cifar100.sh b/reproduce/supplementary/MIA_optim/ace_run_MIAoptim_vgg11_cifar100.sh index 87e2074..63a15f4 100755 --- a/reproduce/supplementary/MIA_optim/ace_run_MIAoptim_vgg11_cifar100.sh +++ b/reproduce/supplementary/MIA_optim/ace_run_MIAoptim_vgg11_cifar100.sh @@ -1,6 +1,6 @@ #!/bin/bash cd "$(dirname "$0")" -cd ../../ +cd ../../../ GPU_id=0 arch=vgg11_bn batch_size=128 @@ -19,29 +19,33 @@ ssim_threshold=0.5 train_gan_AE_type=conv_normN0C16 bc_list="8" gan_loss_type=SSIM -folder_name="supple_saves/MIA_optim" -for bc in $bc_list; do - bottleneck_option=noRELU_C${bc}S1 - for dataset in $dataset_list; do - for random_seed in $random_seed_list; do - for regularization_strength in $regularization_strength_list; do - for cutlayer in $cutlayer_list; do - for num_client in $num_client_list; do - filename=ace_${scheme}_${arch}_cutlayer_${cutlayer}_client_${num_client}_seed${random_seed}_dataset_${dataset}_lr_${learning_rate}_${regularization}_both_${train_gan_AE_type}_${regularization_strength}_${num_epochs}epoch_bottleneck_${bottleneck_option}_ssim_${ssim_threshold}_advtrain + +folder_name="new_saves/finetune_lite" +local_lr_list="0.005" +bottleneck_option=norelu_C8S1 +interval=5 +transfer_source_task=cifar10 +for random_seed in $random_seed_list; do + for regularization_strength in $regularization_strength_list; do + for cutlayer in $cutlayer_list; do + for num_client in $num_client_list; do + for local_lr in $local_lr_list; do + for dataset in $dataset_list; do + filename=ace_${scheme}_${arch}_cutlayer_${cutlayer}_client_${num_client}_seed${random_seed}_dataset_${dataset}_lr_${learning_rate}_${regularization}_both_${train_gan_AE_type}_${regularization_strength}_${num_epochs}epoch_bottleneck_${bottleneck_option}_servertune_${local_lr}_loadserver_source_${transfer_source_task} CUDA_VISIBLE_DEVICES=${GPU_id} python main_MIA.py --arch=${arch} --cutlayer=$cutlayer --batch_size=${batch_size} \ --filename=$filename --num_client=$num_client --num_epochs=$num_epochs --save_more_checkpoints\ --dataset=$dataset --scheme=$scheme --regularization=${regularization} --regularization_strength=${regularization_strength}\ --random_seed=$random_seed --learning_rate=$learning_rate --gan_AE_type ${train_gan_AE_type} --gan_loss_type ${gan_loss_type}\ - --local_lr $local_lr --bottleneck_option ${bottleneck_option} --folder ${folder_name} --ssim_threshold ${ssim_threshold} + --load_from_checkpoint --local_lr $local_lr --bottleneck_option ${bottleneck_option} --folder ${folder_name} --ssim_threshold ${ssim_threshold} \ + --load_from_checkpoint_server --transfer_source_task ${transfer_source_task} --optimize_computation ${interval} target_client=0 attack_scheme=MIA_mf attack_epochs=50 average_time=1 - - + attack_from_later_layer_list="-1" num_epochs_list="0 1 2 5 10 20 50 100 200" for attack_from_later_layer in ${attack_from_later_layer_list}; do @@ -65,29 +69,29 @@ for bc in $bc_list; do done done -local_lr_list="0.005" -bottleneck_option=norelu_C8S1 -interval=5 -transfer_source_task=cifar10 -for random_seed in $random_seed_list; do - for regularization_strength in $regularization_strength_list; do - for cutlayer in $cutlayer_list; do - for num_client in $num_client_list; do - for local_lr in $local_lr_list; do - for dataset in $dataset_list; do - filename=ace_${scheme}_${arch}_cutlayer_${cutlayer}_client_${num_client}_seed${random_seed}_dataset_${dataset}_lr_${learning_rate}_${regularization}_both_${train_gan_AE_type}_${regularization_strength}_${num_epochs}epoch_bottleneck_${bottleneck_option}_servertune_${local_lr}_loadserver_source_${transfer_source_task} +folder_name="supple_saves/MIA_optim" + + +for bc in $bc_list; do + bottleneck_option=noRELU_C${bc}S1 + for dataset in $dataset_list; do + for random_seed in $random_seed_list; do + for regularization_strength in $regularization_strength_list; do + for cutlayer in $cutlayer_list; do + for num_client in $num_client_list; do + filename=ace_${scheme}_${arch}_cutlayer_${cutlayer}_client_${num_client}_seed${random_seed}_dataset_${dataset}_lr_${learning_rate}_${regularization}_both_${train_gan_AE_type}_${regularization_strength}_${num_epochs}epoch_bottleneck_${bottleneck_option}_ssim_${ssim_threshold}_advtrain CUDA_VISIBLE_DEVICES=${GPU_id} python main_MIA.py --arch=${arch} --cutlayer=$cutlayer --batch_size=${batch_size} \ --filename=$filename --num_client=$num_client --num_epochs=$num_epochs --save_more_checkpoints\ --dataset=$dataset --scheme=$scheme --regularization=${regularization} --regularization_strength=${regularization_strength}\ --random_seed=$random_seed --learning_rate=$learning_rate --gan_AE_type ${train_gan_AE_type} --gan_loss_type ${gan_loss_type}\ - --load_from_checkpoint --local_lr $local_lr --bottleneck_option ${bottleneck_option} --folder ${folder_name} --ssim_threshold ${ssim_threshold} \ - --load_from_checkpoint_server --transfer_source_task ${transfer_source_task} --optimize_computation ${interval} + --local_lr $local_lr --bottleneck_option ${bottleneck_option} --folder ${folder_name} --ssim_threshold ${ssim_threshold} target_client=0 attack_scheme=MIA_mf attack_epochs=50 average_time=1 - + + attack_from_later_layer_list="-1" num_epochs_list="0 1 2 5 10 20 50 100 200" for attack_from_later_layer in ${attack_from_later_layer_list}; do @@ -110,5 +114,3 @@ for random_seed in $random_seed_list; do done done done - - diff --git a/reproduce/supplementary/other_evidence/ace_run_expert_vgg11_cut4.sh b/reproduce/supplementary/other_evidence/ace_run_expert_vgg11_cut4.sh index 49118fc..90c70ac 100755 --- a/reproduce/supplementary/other_evidence/ace_run_expert_vgg11_cut4.sh +++ b/reproduce/supplementary/other_evidence/ace_run_expert_vgg11_cut4.sh @@ -1,6 +1,6 @@ #!/bin/bash cd "$(dirname "$0")" -cd ../../ +cd ../../../ GPU_id=0 arch=vgg11_bn batch_size=128 diff --git a/reproduce/supplementary/other_evidence/ace_run_finetune_lowLRlite_source_svhn.sh b/reproduce/supplementary/other_evidence/ace_run_finetune_lowLRlite_source_svhn.sh index 6acd07f..d59c63c 100755 --- a/reproduce/supplementary/other_evidence/ace_run_finetune_lowLRlite_source_svhn.sh +++ b/reproduce/supplementary/other_evidence/ace_run_finetune_lowLRlite_source_svhn.sh @@ -1,6 +1,6 @@ #!/bin/bash cd "$(dirname "$0")" -cd ../../ +cd ../../../ GPU_id=0 arch=vgg11_bn batch_size=128 diff --git a/reproduce/table6/ace_run_expert_vgg11_cut1.sh b/reproduce/table6/ace_run_expert_vgg11_cut1.sh index ab6f55f..b40bf81 100755 --- a/reproduce/table6/ace_run_expert_vgg11_cut1.sh +++ b/reproduce/table6/ace_run_expert_vgg11_cut1.sh @@ -7,7 +7,7 @@ batch_size=128 num_client=2 num_epochs=200 -dataset_list="cifar10" +dataset_list="cifar10 cifar100" scheme=V2_epoch random_seed_list="125" #Extra argement (store_true): --collude_use_public, --initialize_different --collude_not_regularize --collude_not_regularize --num_client_regularize ${num_client_regularize} diff --git a/reproduce/table6/ace_run_expert_vgg11_cut3.sh b/reproduce/table6/ace_run_expert_vgg11_cut3.sh index 614dda6..f06156b 100755 --- a/reproduce/table6/ace_run_expert_vgg11_cut3.sh +++ b/reproduce/table6/ace_run_expert_vgg11_cut3.sh @@ -7,7 +7,7 @@ batch_size=128 num_client=2 num_epochs=200 -dataset_list="cifar10" +dataset_list="cifar10 cifar100" scheme=V2_epoch random_seed_list="125" #Extra argement (store_true): --collude_use_public, --initialize_different --collude_not_regularize --collude_not_regularize --num_client_regularize ${num_client_regularize} diff --git a/reproduce/table6/ace_run_finetune_lowLRlite_vgg.sh b/reproduce/table6/ace_run_finetune_lowLRlite_vgg.sh index 80b2a23..1ffc043 100755 --- a/reproduce/table6/ace_run_finetune_lowLRlite_vgg.sh +++ b/reproduce/table6/ace_run_finetune_lowLRlite_vgg.sh @@ -24,7 +24,7 @@ gan_loss_type=SSIM transfer_source_task=cifar10 dataset_list="cifar100" -learning_rate=0.02 +learning_rate=0.05 local_lr_list="0.005" bottleneck_option=norelu_C1S1