Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reformat source code for consistent use of whitespace. #153

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions etc/IntelliJ IDEA Code Style.xml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
<code_scheme name="Default (1)">
<option name="CLASS_COUNT_TO_USE_IMPORT_ON_DEMAND" value="99"/>
<option name="NAMES_COUNT_TO_USE_IMPORT_ON_DEMAND" value="99"/>
<option name="IMPORT_LAYOUT_TABLE">
<value>
<package name="java" withSubpackages="true" static="false"/>
<package name="javax" withSubpackages="true" static="false"/>
<package name="" withSubpackages="true" static="false"/>
<emptyLine/>
<emptyLine/>
<package name="" withSubpackages="true" static="true"/>
</value>
</option>
<option name="JD_ALIGN_PARAM_COMMENTS" value="false"/>
<option name="JD_ALIGN_EXCEPTION_COMMENTS" value="false"/>
<ScalaCodeStyleSettings>
<option name="USE_SCALADOC2_FORMATTING" value="false"/>
<option name="ENABLE_SCALADOC_FORMATTING" value="false"/>
</ScalaCodeStyleSettings>
<codeStyleSettings language="JAVA">
<option name="ELSE_ON_NEW_LINE" value="true"/>
<option name="WHILE_ON_NEW_LINE" value="true"/>
<option name="CATCH_ON_NEW_LINE" value="true"/>
<option name="FINALLY_ON_NEW_LINE" value="true"/>
<option name="SPACE_WITHIN_ARRAY_INITIALIZER_BRACES" value="true"/>
<option name="SPACE_BEFORE_ARRAY_INITIALIZER_LBRACE" value="true"/>
<option name="IF_BRACE_FORCE" value="3"/>
<option name="DOWHILE_BRACE_FORCE" value="3"/>
<option name="WHILE_BRACE_FORCE" value="3"/>
<option name="FOR_BRACE_FORCE" value="3"/>
</codeStyleSettings>
</code_scheme>
5 changes: 3 additions & 2 deletions examples/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@
~ IN THE SOFTWARE.
-->

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

<modelVersion>4.0.0</modelVersion>
<groupId>org.seqdoop</groupId>
<artifactId>hadoop-bam-examples</artifactId>
Expand Down
102 changes: 49 additions & 53 deletions examples/src/main/java/org/seqdoop/hadoop_bam/examples/TestBAM.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

import java.io.IOException;
import java.util.Iterator;

import htsjdk.samtools.SAMRecord;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
Expand All @@ -34,94 +34,90 @@
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import htsjdk.samtools.SAMRecord;

import org.seqdoop.hadoop_bam.AnySAMInputFormat;
import org.seqdoop.hadoop_bam.KeyIgnoringBAMOutputFormat;
import org.seqdoop.hadoop_bam.SAMRecordWritable;

/**
* Simple example that reads a BAM (or SAM) file, groups reads by their name and writes the
* output again as BAM file. Note that both the file and its index must be present.
*
* <p>
* Usage: hadoop jar target/*-jar-with-dependencies.jar org.seqdoop.hadoop_bam.examples.TestBAM \
* <input.bam> <output_directory>
* <input.bam> <output_directory>
*/
public class TestBAM extends Configured implements Tool {

static class MyOutputFormat extends KeyIgnoringBAMOutputFormat<NullWritable> {
public final static String HEADER_FROM_FILE = "TestBAM.header";
static class MyOutputFormat extends KeyIgnoringBAMOutputFormat<NullWritable> {
public final static String HEADER_FROM_FILE = "TestBAM.header";

@Override
public RecordWriter<NullWritable, SAMRecordWritable> getRecordWriter(TaskAttemptContext ctx) throws IOException {
final Configuration conf = ctx.getConfiguration();
readSAMHeaderFrom(new Path(conf.get(HEADER_FROM_FILE)), conf);
return super.getRecordWriter(ctx);
}
}
@Override
public RecordWriter<NullWritable, SAMRecordWritable> getRecordWriter(TaskAttemptContext ctx) throws IOException {
final Configuration conf = ctx.getConfiguration();
readSAMHeaderFrom(new Path(conf.get(HEADER_FROM_FILE)), conf);
return super.getRecordWriter(ctx);
}
}

public int run(String[] args) throws Exception {
final Configuration conf = getConf();
public int run(String[] args) throws Exception {
final Configuration conf = getConf();

conf.set(MyOutputFormat.HEADER_FROM_FILE, args[0]);
conf.set(MyOutputFormat.HEADER_FROM_FILE, args[0]);

final Job job = new Job(conf);
final Job job = new Job(conf);

job.setJarByClass(TestBAM.class);
job.setMapperClass (TestBAMMapper.class);
job.setReducerClass(TestBAMReducer.class);
job.setJarByClass(TestBAM.class);
job.setMapperClass(TestBAMMapper.class);
job.setReducerClass(TestBAMReducer.class);

job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(SAMRecordWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass (SAMRecordWritable.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(SAMRecordWritable.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(SAMRecordWritable.class);

job.setInputFormatClass(AnySAMInputFormat.class);
job.setOutputFormatClass(TestBAM.MyOutputFormat.class);
job.setInputFormatClass(AnySAMInputFormat.class);
job.setOutputFormatClass(TestBAM.MyOutputFormat.class);

org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, new Path(args[0]));
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, new Path(args[0]));

org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.submit();
org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.submit();

if (!job.waitForCompletion(true)) {
System.err.println("sort :: Job failed.");
return 1;
}
if (!job.waitForCompletion(true)) {
System.err.println("sort :: Job failed.");
return 1;
}

return 0;
}


public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.out.printf("Usage: hadoop jar <name.jar> %s <input.bam> <output_directory>\n", TestBAM.class.getCanonicalName());
System.exit(0);
return 0;
}

int res = ToolRunner.run(new Configuration(), new TestBAM(), args);
System.exit(res);
}

public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.out.printf("Usage: hadoop jar <name.jar> %s <input.bam> <output_directory>\n", TestBAM.class.getCanonicalName());
System.exit(0);
}

int res = ToolRunner.run(new Configuration(), new TestBAM(), args);
System.exit(res);
}
}

final class TestBAMMapper
extends org.apache.hadoop.mapreduce.Mapper<LongWritable,SAMRecordWritable, Text,SAMRecordWritable>
{
@Override protected void map(
extends org.apache.hadoop.mapreduce.Mapper<LongWritable, SAMRecordWritable, Text, SAMRecordWritable> {
@Override
protected void map(
LongWritable ignored, SAMRecordWritable wrec,
org.apache.hadoop.mapreduce.Mapper<LongWritable,SAMRecordWritable, Text,SAMRecordWritable>.Context
org.apache.hadoop.mapreduce.Mapper<LongWritable, SAMRecordWritable, Text, SAMRecordWritable>.Context
ctx)
throws InterruptedException, IOException
{
throws InterruptedException, IOException {
final SAMRecord record = wrec.get();
System.out.println(record.toString());
ctx.write(new Text(wrec.get().getReadName()), wrec);
}
}

final class TestBAMReducer
extends org.apache.hadoop.mapreduce.Reducer<Text,SAMRecordWritable, Text,SAMRecordWritable> {
extends org.apache.hadoop.mapreduce.Reducer<Text, SAMRecordWritable, Text, SAMRecordWritable> {

@Override
protected void reduce(
Expand Down
27 changes: 12 additions & 15 deletions examples/src/main/java/org/seqdoop/hadoop_bam/examples/TestVCF.java
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

import java.io.IOException;
import java.util.Iterator;

import htsjdk.variant.variantcontext.VariantContext;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
Expand All @@ -34,9 +34,6 @@
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import htsjdk.variant.variantcontext.VariantContext;

import org.seqdoop.hadoop_bam.KeyIgnoringVCFOutputFormat;
import org.seqdoop.hadoop_bam.VCFInputFormat;
import org.seqdoop.hadoop_bam.VCFOutputFormat;
Expand All @@ -45,9 +42,9 @@
/**
* Simple example that reads a VCF file, groups variants by their ID and writes the
* output again as VCF file.
*
* <p>
* Usage: hadoop jar target/*-jar-with-dependencies.jar org.seqdoop.hadoop_bam.examples.TestVCF \
* <input.vcf> <output_directory>
* <input.vcf> <output_directory>
*/
public class TestVCF extends Configured implements Tool {

Expand All @@ -58,8 +55,9 @@ static class MyVCFOutputFormat
private KeyIgnoringVCFOutputFormat<Text> baseOF;

private void initBaseOF(Configuration conf) {
if (baseOF == null)
if (baseOF == null) {
baseOF = new KeyIgnoringVCFOutputFormat<Text>(conf);
}
}

@Override
Expand Down Expand Up @@ -87,7 +85,7 @@ public int run(String[] args) throws Exception {
final Job job = new Job(conf);

job.setJarByClass(TestVCF.class);
job.setMapperClass (TestVCFMapper.class);
job.setMapperClass(TestVCFMapper.class);
job.setReducerClass(TestVCFReducer.class);

job.setMapOutputKeyClass(Text.class);
Expand Down Expand Up @@ -123,22 +121,21 @@ public static void main(String[] args) throws Exception {
}

final class TestVCFMapper
extends org.apache.hadoop.mapreduce.Mapper<LongWritable,VariantContextWritable, Text, VariantContextWritable>
{
@Override protected void map(
extends org.apache.hadoop.mapreduce.Mapper<LongWritable, VariantContextWritable, Text, VariantContextWritable> {
@Override
protected void map(
LongWritable ignored, VariantContextWritable wrec,
org.apache.hadoop.mapreduce.Mapper<LongWritable, VariantContextWritable, Text, VariantContextWritable>.Context
ctx)
throws InterruptedException, IOException
{
throws InterruptedException, IOException {
final VariantContext context = wrec.get();
System.out.println(context.toString());
ctx.write(new Text(context.getChr()+":" + context.getID()), wrec);
ctx.write(new Text(context.getChr() + ":" + context.getID()), wrec);
}
}

final class TestVCFReducer
extends org.apache.hadoop.mapreduce.Reducer<Text,VariantContextWritable, Text, VariantContextWritable> {
extends org.apache.hadoop.mapreduce.Reducer<Text, VariantContextWritable, Text, VariantContextWritable> {

@Override
protected void reduce(
Expand Down
18 changes: 9 additions & 9 deletions findbugs-exclude.xml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
<FindBugsFilter>
<Match>
<Bug pattern="EI_EXPOSE_REP" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP2" />
</Match>
<Match>
<Bug pattern="VA_FORMAT_STRING_USES_NEWLINE" />
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP"/>
</Match>
<Match>
<Bug pattern="EI_EXPOSE_REP2"/>
</Match>
<Match>
<Bug pattern="VA_FORMAT_STRING_USES_NEWLINE"/>
</Match>
</FindBugsFilter>
11 changes: 7 additions & 4 deletions pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,13 @@
~ IN THE SOFTWARE.
-->

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<parent>
<groupId>org.sonatype.oss</groupId>
<artifactId>oss-parent</artifactId>
<version>7</version>
<relativePath />
<relativePath/>
</parent>

<modelVersion>4.0.0</modelVersion>
Expand All @@ -35,7 +36,9 @@
<version>7.9.2-SNAPSHOT</version>
<packaging>jar</packaging>
<name>Hadoop-BAM</name>
<description>A Java library for the manipulation of files in common bioinformatics formats using the Hadoop MapReduce framework.</description>
<description>A Java library for the manipulation of files in common bioinformatics formats using the Hadoop
MapReduce framework.
</description>
<url>http://sourceforge.net/projects/hadoop-bam/</url>

<properties>
Expand Down Expand Up @@ -215,7 +218,7 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>2.19.1</version>
<configuration>
<argLine>-Xms1g -Xmx4g</argLine>
<argLine>-Xms1g -Xmx4g</argLine>
</configuration>
</plugin>
<plugin>
Expand Down
17 changes: 7 additions & 10 deletions src/main/java/htsjdk/samtools/LinearBAMIndex.java
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,18 @@
// IN THE SOFTWARE.
package htsjdk.samtools;

import htsjdk.samtools.CachingBAMFileIndex;
import htsjdk.samtools.LinearIndex;
import htsjdk.samtools.SAMSequenceDictionary;
import htsjdk.samtools.seekablestream.SeekableStream;

/**
* The htsjdk APIs for accessing the linear BAM index are private...
*/
public class LinearBAMIndex extends CachingBAMFileIndex {

public LinearBAMIndex(SeekableStream stream, SAMSequenceDictionary dict) {
super(stream, dict);
}
public LinearIndex getLinearIndex(int idx) {
return getQueryResults(idx).getLinearIndex();
}
public LinearBAMIndex(SeekableStream stream, SAMSequenceDictionary dict) {
super(stream, dict);
}

public LinearIndex getLinearIndex(int idx) {
return getQueryResults(idx).getLinearIndex();
}
}
6 changes: 3 additions & 3 deletions src/main/java/htsjdk/samtools/SAMRecordHelper.java
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
* {@link SAMRecord#eagerDecode()} method in HTSJDK.
*/
public class SAMRecordHelper {
public static void eagerDecode(SAMRecord record) {
record.eagerDecode();
}
public static void eagerDecode(SAMRecord record) {
record.eagerDecode();
}
}
Loading