Skip to content

Commit

Permalink
Update references to old-style arguments (#5063)
Browse files Browse the repository at this point in the history
* Fixing references to old style arguments that were still in documentation and error messages.
  • Loading branch information
sersorrel authored and lbergelson committed Aug 14, 2018
1 parent 6e44c60 commit 1e98c6d
Show file tree
Hide file tree
Showing 22 changed files with 60 additions and 59 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ private Map<Path, String> loadGvcfToHeaderSampleMap() {

if ( ! gvcfPathsFromSampleNameMap.isEmpty() ) {
throw new SampleNameFixingCannotProceedException("Not all GVCF paths from the --" + GenomicsDBImport.SAMPLE_NAME_MAP_LONG_NAME +
" were found in the provided --gvcfToHeaderSampleMapFile");
" were found in the provided --gvcf-to-header-sample-map-file");
}

return mapping;
Expand Down Expand Up @@ -273,11 +273,11 @@ private void assertThatTheyReallyWantToProceed(){
"You should be certain you want to do this before proceeding.\n" +
"If the following description does not apply to your VCF then the newly generated vcf will be \n\n \t\tHORRIBLY CORRUPTED: by having its sample names shuffled so that the genotypes don't correspond to the correct samples\n\n" +
"1: your vcf was generated using a GenomicsDBImport released before gatk version 4.beta.6\n" +
"2: you set --batchSize != 0 when running GenomicsDBImport\n" +
"2: you set --batch-size != 0 when running GenomicsDBImport\n" +
"3: your callset was imported in multiple batches, i.e. your number of samples > batchSize\n" +
"4: you supplied the exact same sampleNameMap file and batch size you used in the initial GenomicsDBImport\n" +
"or:\n" +
"1. you ran GenomicsDBImport with --readerThreads > 1, and at least one sample name as declared\n" +
"1. you ran GenomicsDBImport with --reader-threads > 1, and at least one sample name as declared\n" +
" in a GVCF header did not match the sample name specified for that file in the sample name map file\n" +
" provided to GenomicsDBImport\n\n" +

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
*
* <p>This is an implementation of {@link HaplotypeCaller} using spark to distribute the computation.
* It is still in an early stage of development and does not yet support all the options that the non-spark version does.
* Specifically it does not support the --dbsnp, --comp, and --bamOutput options.</p>
* Specifically it does not support the --dbsnp, --comp, and --bam-output options.</p>
*
* <h3>Usage Example</h3>
* <pre>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@
* gatk ApplyBQSRSpark \
* -I gs://my-gcs-bucket/input.bam \
* -bqsr gs://my-gcs-bucket/recalibration.table \
* -SQQ 10 -SQQ 20 -SQQ 30 -SQQ 40 \
* --static-quantized-quals 10 --static-quantized-quals 20 \
* --static-quantized-quals 30 --static-quantized-quals 40 \
* -O gs://my-gcs-bucket/output.bam \
* -- \
* --sparkRunner GCS \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -291,10 +291,10 @@ public void doReadFilterArgumentWarnings(final GATKReadFilterPluginDescriptor pl
for (final ReadFilter filter : readFilters) {
if (filter.getClass().isAssignableFrom(AmbiguousBaseReadFilter.class)) {
logger.warn("Detected the use of AmbiguousBaseReadFilter, which is applied before the PathSeq " +
"base masking steps. Did you mean to use --maxMaskedBases, which is applied after masking?");
"base masking steps. Did you mean to use --max-masked-bases, which is applied after masking?");
} else if (filter.getClass().isAssignableFrom(ReadLengthReadFilter.class)) {
logger.warn("Detected the use of ReadLengthReadFilter, which is applied before the PathSeq " +
"clipping steps. Did you mean to use --minClippedReadLength, which is applied after clipping?");
"clipping steps. Did you mean to use --min-clipped-read-length, which is applied after clipping?");
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ static JavaRDD<Iterable<GATKRead>> groupReadsIntoPairs(final JavaRDD<GATKRead> p
} else if (unpairedReads != null) {
groupedReads = unpairedReads.map(Collections::singletonList);
} else {
throw new UserException.BadInput("No reads were loaded. Ensure --pairedInput and/or --unpairedInput are set and valid.");
throw new UserException.BadInput("No reads were loaded. Ensure --paired-input and/or --unpaired-input are set and valid.");
}
return groupedReads;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ public static int pathseqGetRecommendedNumReducers(final String inputPath, final

/**
* Returns a deep copy of the input header with an empty sequence dictionary, and logs warnings if the input may
* be aligned but --isHostAligned was not set to true (or vice versa).
* be aligned but --is-host-aligned was not set to true (or vice versa).
*/
public static SAMFileHeader checkAndClearHeaderSequences(final SAMFileHeader inputHeader, final PSFilterArgumentCollection filterArgs, final Logger logger) {

Expand All @@ -79,10 +79,10 @@ public static SAMFileHeader checkAndClearHeaderSequences(final SAMFileHeader inp
final SAMFileHeader header = inputHeader.clone();

if (filterArgs.alignedInput && (header.getSequenceDictionary() == null || header.getSequenceDictionary().isEmpty())) {
logger.warn("--isHostAligned is true but the BAM header contains no sequences");
logger.warn("--is-host-aligned is true but the BAM header contains no sequences");
}
if (!filterArgs.alignedInput && header.getSequenceDictionary() != null && !header.getSequenceDictionary().isEmpty()) {
logger.warn("--isHostAligned is false but there are one or more sequences in the BAM header");
logger.warn("--is-host-aligned is false but there are one or more sequences in the BAM header");
}

//Clear header sequences
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ public class PathSeqBuildReferenceTaxonomy extends CommandLineProgram {
public Object doWork() {

if (refseqCatalogPath == null && genbankCatalogPath == null) {
throw new UserException.BadInput("At least one of --refseqCatalogPath or --genbankCatalogPath must be specified");
throw new UserException.BadInput("At least one of --refseq-catalog or --genbank-catalog must be specified");
}

logger.info("Parsing reference and files... (this may take a few minutes)");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,15 +212,15 @@ private boolean alignBam(final String inputBamPath, final PSBwaAlignerSpark alig
protected void runTool(final JavaSparkContext ctx) {

if (!readArguments.getReadFiles().isEmpty()) {
throw new UserException.BadInput("Please use --pairedInput or --unpairedInput instead of --input");
throw new UserException.BadInput("Please use --paired-input or --unpaired-input instead of --input");
}
final ReadsSparkSource readsSource = new ReadsSparkSource(ctx, readArguments.getReadValidationStringency());

final PSBwaAlignerSpark aligner = new PSBwaAlignerSpark(ctx, bwaArgs);
boolean bPairedSuccess = alignBam(inputPaired, aligner, true, ctx, readsSource);
boolean bUnpairedSuccess = alignBam(inputUnpaired, aligner, false, ctx, readsSource);
if (!bPairedSuccess && !bUnpairedSuccess) {
throw new UserException.BadInput("No reads were loaded. Ensure --pairedInput and/or --unpairedInput are set and valid.");
throw new UserException.BadInput("No reads were loaded. Ensure --paired-input and/or --unpaired-input are set and valid.");
}
aligner.close();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@
* <h4>Local mode:</h4>
*
* <pre>
* gatk PathSeqFilterSpark \
* gatk PathSeqPipelineSpark \
* --input input_reads.bam \
* --kmer-file host_kmers.bfi \
* --filter-bwa-image host_reference.img \
Expand All @@ -112,7 +112,7 @@
* <h4>Spark cluster on Google Cloud DataProc with 6 16-core / 208GB memory worker nodes:</h4>
*
* <pre>
* gatk PathSeqFilterSpark \
* gatk PathSeqPipelineSpark \
* --input gs://my-gcs-bucket/input_reads.bam \
* --kmer-file hdfs://my-cluster-m:8020//host_kmers.bfi \
* --filter-bwa-image /references/host_reference.img \
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ static SAMFileHeader joinBamHeaders(final SAMFileHeader pairedHeader, final SAMF
protected void runTool(final JavaSparkContext ctx) {

if (!readArguments.getReadFiles().isEmpty()) {
throw new UserException.BadInput("Please use --pairedInput or --unpairedInput instead of --input");
throw new UserException.BadInput("Please use --paired-input or --unpaired-input instead of --input");
}

final ReadsSparkSource readsSource = new ReadsSparkSource(ctx, readArguments.getReadValidationStringency());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ public final class CombineGVCFs extends MultiVariantWalkerGroupedOnStart {
* span across a given genomic position (e.g. when scatter-gathering jobs across a compute farm). The option below enables users to break bands at
* pre-defined positions. For example, a value of 10,000 would mean that we would ensure that no bands span across chr1:10000, chr1:20000, etc.
*
* Note that the --convertToBasePairResolution argument is just a special case of this argument with a value of 1.
* Note that the --convert-to-base-pair-resolution argument is just a special case of this argument with a value of 1.
*/
@Argument(fullName=BREAK_BANDS_LONG_NAME, doc = "If > 0, reference bands will be broken up at genomic positions that are multiples of this number", optional=true)
protected int multipleAtWhichToBreakBands = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ public final class VariantFiltration extends VariantWalker {

/**
* Any variant which overlaps entries from the provided mask file will be filtered. If the user wants logic to be reversed,
* i.e. filter variants that do not overlap with provided mask, then argument -filterNotInMask can be used.
* i.e. filter variants that do not overlap with provided mask, then argument --filter-not-in-mask can be used.
* Note that it is up to the user to adapt the name of the mask to make it clear that the reverse logic was used
* (e.g. if masking against Hapmap, use -maskName=hapmap for the normal masking and -maskName=not_hapmap for the reverse masking).
* (e.g. if masking against Hapmap, use --mask-name=hapmap for the normal masking and --mask-name=not_hapmap for the reverse masking).
*/
@Argument(fullName="mask", shortName="mask", doc="Input mask", optional=true)
public FeatureInput<Feature> mask;
Expand Down Expand Up @@ -142,20 +142,20 @@ public final class VariantFiltration extends VariantWalker {
public Integer maskExtension = 0;

/**
* When using the -mask argument, the maskName will be annotated in the variant record.
* Note that when using the -filter-not-in-mask argument to reverse the masking logic,
* When using the --mask argument, the mask-name will be annotated in the variant record.
* Note that when using the --filter-not-in-mask argument to reverse the masking logic,
* it is up to the user to adapt the name of the mask to make it clear that the reverse logic was used
* (e.g. if masking against Hapmap, use -mask-name=hapmap for the normal masking and -mask-name=not_hapmap for the reverse masking).
* (e.g. if masking against Hapmap, use --mask-name=hapmap for the normal masking and --mask-name=not_hapmap for the reverse masking).
*/
@Argument(fullName=MASK_NAME_LONG_NAME, doc="The text to put in the FILTER field if a 'mask' is provided and overlaps with a variant call", optional=true)
public String maskName = "Mask";

/**
* By default, if the -mask argument is used, any variant falling in a mask will be filtered.
* By default, if the --mask argument is used, any variant falling in a mask will be filtered.
* If this argument is used, logic is reversed, and variants falling outside a given mask will be filtered.
* Use case is, for example, if we have an interval list or BED file with "good" sites.
* Note that it is up to the user to adapt the name of the mask to make it clear that the reverse logic was used
* (e.g. if masking against Hapmap, use -mask-name=hapmap for the normal masking and -mask-name=not_hapmap for the reverse masking).
* (e.g. if masking against Hapmap, use --mask-name=hapmap for the normal masking and --mask-name=not_hapmap for the reverse masking).
*/
@Argument(fullName=FILTER_NOT_IN_MASK_LONG_NAME, doc="Filter records NOT in given input mask.", optional=true)
public boolean filterRecordsNotInMask = false;
Expand Down Expand Up @@ -215,7 +215,7 @@ private static boolean invertLogic(final boolean logic, final boolean invert){
}

/**
* Prepend inverse phrase to description if --invertFilterExpression
* Prepend inverse phrase to description if --invert-filter-expression
*
* @param description the description
* @return the description with inverse prepended if --invert_filter_expression
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ public GenotypeCalculationArgumentCollection( final GenotypeCalculationArgumentC
* f) If user-defined values add to more than one, an error will be produced.
*
* If user wants completely flat priors, then user should specify the same value (=1/(2*N+1)) 2*N times,e.g.
* -inputPrior 0.33 -inputPrior 0.33
* --input-prior 0.33 --input-prior 0.33
* for the single-sample diploid case.
*/
@Advanced
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ public class HaplotypeCallerArgumentCollection extends AssemblyBasedCallerArgume
* If set, certain "early exit" optimizations in HaplotypeCaller, which aim to save compute and time by skipping
* calculations if an ActiveRegion is determined to contain no variants, will be disabled. This is most likely to be useful if
* you're using the -bamout argument to examine the placement of reads following reassembly and are interested in seeing the mapping of
* reads in regions with no variations. Setting the -forceActive and -dontTrimActiveRegions flags may also be necessary.
* reads in regions with no variations. Setting the --force-active and --dont-trim-active-regions flags may also be necessary.
*/
@Advanced
@Argument(fullName = "disable-optimizations", doc="Don't skip calculations in ActiveRegions with no variants",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ private void validateAndInitializeArgs() {
Utils.validateArg(hcArgs.likelihoodArgs.BASE_QUALITY_SCORE_THRESHOLD >= QualityUtils.MIN_USABLE_Q_SCORE, "BASE_QUALITY_SCORE_THRESHOLD must be greater than or equal to " + QualityUtils.MIN_USABLE_Q_SCORE + " (QualityUtils.MIN_USABLE_Q_SCORE)");

if ( emitReferenceConfidence() && samplesList.numberOfSamples() != 1 ) {
throw new CommandLineException.BadArgumentValue("--emitRefConfidence", "Can only be used in single sample mode currently. Use the sample_name argument to run on a single sample out of a multi-sample BAM file.");
throw new CommandLineException.BadArgumentValue("--emit-ref-confidence", "Can only be used in single sample mode currently. Use the --sample-name argument to run on a single sample out of a multi-sample BAM file.");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ public final class ReadThreadingAssemblerArgumentCollection implements Serializa
// -----------------------------------------------------------------------------------------------

/**
* Multiple kmer sizes can be specified, using e.g. `-kmerSize 10 -kmerSize 25`.
* Multiple kmer sizes can be specified, using e.g. `--kmer-size 10 --kmer-size 25`.
*/
@Advanced
@Argument(fullName="kmer-size", doc="Kmer size to use in the read threading assembler", optional = true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@
* gatk --java-options "-Xmx4g" AnnotateVcfWithExpectedAlleleFraction \
* -V input.vcf \
* -O output.vcf \
* -mixingFractions mixingFractions.table
* --mixing-fractions mixingFractions.table
* </pre>
*
* Created by David Benjamin on 1/31/17.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@
* By default, priors will be applied to each variant separately, provided each variant features data from at least
* 10 called samples (no-calls do not count). SNP sites in the input callset that have a SNP at the matching site in
* the supporting VCF will have priors applied based on the AC from the supporting samples and the input callset
* unless the --ignoreInputSamples flag is used. If a site is not called in the supporting VCF, priors will be
* applied using the discovered AC from the input samples unless the --discoveredACpriorsOff flag is used.
* unless the --ignore-input-samples flag is used. If a site is not called in the supporting VCF, priors will be
* applied using the discovered AC from the input samples unless the --discovered-allele-count-priors-off flag is used.
* For any non-SNP sites in the input callset, flat priors are applied.
* </p>
*
Expand All @@ -103,7 +103,7 @@
* -V input.vcf.gz \
* -O output.vcf.gz \
* -ped family.ped \
* --skipPopulationPriors
* --skip-population-priors
* </pre>
*
* <h4>Apply frequency and HWE-based priors to the genotypes of a family without including the family allele counts
Expand All @@ -112,7 +112,7 @@
* gatk --java-options "-Xmx4g" CalculateGenotypePosteriors \
* -V input.vcf.gz \
* -O output.vcf.gz \
* --ignoreInputSamples
* --ignore-input-samples
* </pre>
*
* <h4>Calculate the posterior genotypes of a callset, and impose that a variant *not seen* in the external panel
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ public class ApplyVQSR extends MultiVariantWalker {
protected Double VQSLOD_CUTOFF = null;

/**
* For this to work properly, the -ignoreFilter argument should also be applied to the VariantRecalibration command.
* For this to work properly, the --ignore-filter argument should also be applied to the VariantRecalibration command.
*/
@Argument(fullName="ignore-filter", doc="If specified, the recalibration will be applied to variants marked as filtered by the specified filter name in the input VCF file", optional=true)
private List<String> IGNORE_INPUT_FILTERS = new ArrayList<>();
Expand Down Expand Up @@ -246,7 +246,7 @@ public void onTraversalStart() {
if( TS_FILTER_LEVEL != null ) {
// if the user specifies both ts_filter_level and lodCutoff then throw a user error
if( VQSLOD_CUTOFF != null ) {
throw new UserException("Arguments --ts_filter_level and --lodCutoff are mutually exclusive. Please only specify one option.");
throw new UserException("Arguments --truth-sensitivity-filter-level and --lod-score-cutoff are mutually exclusive. Please only specify one option.");
}

if( tranches.size() >= 2 ) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
* <h3>Inputs</h3>
* <ul>
* <li>data-dir The training data created by {@link CNNVariantWriteTensors}.</li>
* <li>The tensor-name argument determines what types of tensors the model will expect.
* <li>The --tensor-type argument determines what types of tensors the model will expect.
* Set it to "reference" for 1D tensors or "read_tensor" for 2D tensors.</li>
* </ul>
*
Expand All @@ -37,17 +37,17 @@
* <h4>Train a 1D CNN on Reference Tensors</h4>
* <pre>
* gatk CNNVariantTrain \
* -tensor-type reference \
* -input-tensors-dir my_tensor_folder \
* -model-name my_1d_model
* --tensor-type reference \
* --input-tensor-dir my_tensor_folder \
* --model-name my_1d_model
* </pre>
*
* <h4>Train a 2D CNN on Read Tensors</h4>
* <pre>
* gatk CNNVariantTrain \
* -input-tensors-dir my_tensor_folder \
* -tensor-type read-tensor \
* -model-name my_2d_model
* --input-tensor-dir my_tensor_folder \
* --tensor-type read-tensor \
* --model-name my_2d_model
* </pre>
*
*/
Expand Down
Loading

0 comments on commit 1e98c6d

Please sign in to comment.