Skip to content

Commit

Permalink
DEV: Integrate bgzipped file support in VCF import API (#237)
Browse files Browse the repository at this point in the history
* .bgz loader function implemented by Christina
  • Loading branch information
NickEdwards7502 committed Oct 17, 2024
1 parent 279bd5b commit 5ad8cc0
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 4 deletions.
3 changes: 2 additions & 1 deletion src/main/scala/au/csiro/variantspark/api/VSContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,8 @@ class VSContext(val spark: SparkSession) extends SqlContextHolder {
*/
def importVCF(inputFile: String, sparkPar: Int = 0): FeatureSource = {
val vcfSource =
VCFSource(sc.textFile(inputFile, if (sparkPar > 0) sparkPar else sc.defaultParallelism))
VCFSource(sc, inputFile)
// VCFSource(sc.textFile(inputFile, if (sparkPar > 0) sparkPar else sc.defaultParallelism))
VCFFeatureSource(vcfSource)
}

Expand Down
8 changes: 5 additions & 3 deletions src/main/scala/au/csiro/variantspark/input/VCFSource.scala
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import htsjdk.variant.variantcontext.VariantContext
import htsjdk.variant.vcf.{VCFCodec, VCFHeader, VCFHeaderVersion}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import au.csiro.variantspark.utils.BGZLoader

class DelegatingLineIterator(val it: Iterator[String])
extends AbstractIterator[String] with LineIterator {
Expand Down Expand Up @@ -37,12 +38,13 @@ class VCFSource(val lines: RDD[String], val headerLines: Int = 500) {
}

object VCFSource {

def apply(lines: RDD[String], headerLines: Int = 500): VCFSource =
new VCFSource(lines, headerLines)
def apply(sc: SparkContext, fileName: String, headerLines: Int): VCFSource =
apply(sc.textFile(fileName), headerLines)
def apply(sc: SparkContext, fileName: String): VCFSource = apply(sc.textFile(fileName))
apply(BGZLoader.textFile(sc, fileName), headerLines)
def apply(sc: SparkContext, fileName: String): VCFSource = {
apply(BGZLoader.textFile(sc, fileName))
}

private def computeGenotypes(lines: RDD[String], headerAndVersion: HeaderAndVersion) = {
val br_headerAndVersion = lines.context.broadcast(headerAndVersion)
Expand Down
26 changes: 26 additions & 0 deletions src/main/scala/au/csiro/variantspark/utils/BGZLoader.scala
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package au.csiro.variantspark.utils

import au.csiro.pbdava.ssparkle.spark.SparkApp
import org.apache.spark.rdd.RDD
import htsjdk.samtools.util.BlockCompressedInputStream
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkContext

object BGZLoader {
def textFile(sc: SparkContext, inputFile: String): RDD[String] = {
val isBGZ = FileUtils.isBGZFile(inputFile)
println(inputFile + " is loading to spark RDD, isBGZFile: " + isBGZ)
if (isBGZ) {
// BGZIP file is compressed as blocks, requires specialized libraries htsjdk
val path = new Path(inputFile)
val fs = path.getFileSystem(sc.hadoopConfiguration)
val bgzInputStream = new BlockCompressedInputStream(fs.open(path))
// each blocks can be decompressed independently and to be read in parallel
sc.parallelize(Stream.continually(bgzInputStream.readLine()).takeWhile(_ != null).toList)
} else {
// The standard GZIP libraries can handle files compressed as a whole
// load .vcf, .vcf.gz or .vcf.bz2 to RDD
sc.textFile(inputFile)
}
}
}

0 comments on commit 5ad8cc0

Please sign in to comment.