From 6b5d864c719b3ead15154b9909299ea9bfba8616 Mon Sep 17 00:00:00 2001 From: guibletwm Date: Fri, 19 May 2023 11:06:58 -0400 Subject: [PATCH 1/3] Transcriptome mapping --- config/snakemake_config.yaml | 5 ++- workflow/Snakefile | 86 +++++++++++++++++++++++++++++++----- 2 files changed, 79 insertions(+), 12 deletions(-) diff --git a/config/snakemake_config.yaml b/config/snakemake_config.yaml index ce53a00..cddb13a 100644 --- a/config/snakemake_config.yaml +++ b/config/snakemake_config.yaml @@ -22,7 +22,8 @@ contrastManifest: "OUTPUT_DIR/manifests/contrasts.tsv" multiplexflag: "Y" #whether samples are multiplexed ["Y","N"] umiSeparator: "rbc:" #required for nondemultiplexed samples to determine delimiter for deduplication [":", "_", "rbc:"] mismatch: 1 #required for multiplexed samples, number of bp mismatches allowed in demultiplexing [1,2,3] -barcode_qc_flag: "PROCESS" #barcodes will undergo QC to ensure uniformity within samples; if set to IGNORE this qc step will be skipped +barcode_qc_flag: "PROCESS" #barcodes will undergo QC to ensure uniformity within samples; ["PROCESS", "IGNORE"] +min_reads_mapped: 0.5 #minimum percent of reads that should be mapped; IE .5 for 50% of all reads must be mapped [0.5] reference: "" #reference organism ["hg38","mm10"] filterlength: 20 #minimum read length to include in analysis [any int >20] phredQuality: 20 #minimum quality score for 3’ end trimming @@ -50,6 +51,7 @@ alignSJDBoverhangMin: 3 # minimum overhang value for annotated spliced junctions alignSJoverhangMin: 5 # minimum overhang value for non-cannonical splied junctions alignTranscriptsPerReadNmax: 10000 #max number of different alignments per read to consider [int>0] alignWindowsPerReadNmax: 10000 #max number of windows per read [int>0] +limitOutSJcollapsed: 1000000 # max number of collapsed junctions [int>0] outFilterMatchNmin: 15 # alignment will be output only if the number of matched bases is higher than or equal to this value. outFilterMatchNminOverLread: 0.9 #alignment will be output only if the number of matched bases is >= to value; normalized to sum of mates’ lengths for paired-end reads outFilterMismatchNmax: 999 #alignment will be output only if it has no more mismatches than this value. @@ -69,6 +71,7 @@ seedPerReadNmax: 10000 #max number of seeds per read seedPerWindowNmax: 500 #max number of seeds per window sjdbScore: 2 #extra alignment score for alignmets that cross database junctions winAnchorMultimapNmax: 500 #max number of loci anchors are allowed to map to +quantmod: 'TranscriptomeSAM' #additionnal alignment on transcriptome ######################################################################################### # modules, container parameters diff --git a/workflow/Snakefile b/workflow/Snakefile index b4a892e..29088e6 100644 --- a/workflow/Snakefile +++ b/workflow/Snakefile @@ -42,6 +42,7 @@ contrast_manifest = config['contrastManifest'] multiplex_flag = config['multiplexflag'].capitalize() mismatch = config['mismatch'] barcode_qc_flag = config['barcode_qc_flag'] +count_threshold = config['min_reads_mapped'] species_ref = config['reference'] filter_length = config['filterlength'] phredQuality = config['phredQuality'] @@ -78,6 +79,7 @@ star_filt_score = config['outFilterScoreMin'] star_filt_type = config['outFilterType'] star_sam_att = config['outSAMattributes'] star_sam_unmap = config['outSAMunmapped'] +star_SJcollapsed = config['limitOutSJcollapsed'] star_filt_sjmin = config['outSJfilterCountTotalMin'].replace(",", " ") star_filt_overhang = config['outSJfilterOverhangMin'].replace(",", " ") star_filt_sjreads = config['outSJfilterReads'] @@ -87,6 +89,7 @@ star_seed_read = config['seedPerReadNmax'] star_seed_wind = config['seedPerWindowNmax'] star_sj = config['sjdbScore'] star_win_anchor = config['winAnchorMultimapNmax'] +star_quantmod = config['quantmod'] # modules, container cont_dir = config['containerDir'] @@ -147,6 +150,10 @@ else: #set strand ids strand_list=['P','N'] +# check mapped threshold is a percentage +if (count_threshold > 1): + print("Count_threshold must be a decimal value, representing a percentage") + ############################################################### # create sample lists ############################################################### @@ -221,7 +228,6 @@ def rename_cmd(wildcards): return(cmd) def get_input_qc_troubleshoot(wildcards): - print("HERE") d=dict() l = "log_list" @@ -229,17 +235,12 @@ def get_input_qc_troubleshoot(wildcards): p = "png_bc" if (multiplex_flag == 'Y'): - print("m flag on") d[l] = expand(join(out_dir,'log','STAR','{sp}.log'),sp=sp_list), d[t] = expand(join(out_dir,'{mp}', '01_qc_post','{mp}_barcode_passed.txt'),mp=samp_dict.keys()) if (barcode_qc_flag == "PROCESS"): - print("b flag on") d[p] = expand(join(out_dir,'{mp}', '01_qc_post','{mp}_barcode.png'),mp=samp_dict.keys()) else: - print("m flag off") d[l] = expand(join(out_dir,'log','STAR','{sp}.log'),sp=sp_list), - print(d) - d="YO" return (d) #read in contrast list for manorm @@ -358,7 +359,7 @@ else: input_demethod_reports = [expand(join(out_dir,'04_annotation', '02_peaks','{sp}_' + peak_id + 'readPeaks_annotation_complete.txt'),sp=sp_list)] #local rules -localrules: nondemux, rename_fastqs, multiqc +localrules: nondemux, rename_fastqs, check_read_counts, multiqc #structure #mp @@ -400,7 +401,8 @@ rule all: # index_stats expand(join(out_dir,'02_bam','01_merged','{sp}.si.bam'), sp=sp_list), - + join(out_dir,'qc', '01_qc_post','qc_read_count_check_pass.txt'), + # multiqc join(out_dir,'qc','multiqc_report.html'), @@ -445,6 +447,9 @@ rule all: # # star # expand(join(out_dir,'01_preprocess','02_alignment','{sp}_Aligned.sortedByCoord.out.bam'),sp=sp_list), + + # index_stats + # expand(join(out_dir, 'qc', '01_qc_post','{sp}_samstats.txt')),sp=sp_list), # create_beds_safs # expand(join(out_dir,'03_peaks','01_bed','{sp}_ALLreadPeaks.bed'),sp=sp_list), @@ -757,6 +762,7 @@ rule star: s_ftype = star_filt_type, s_att = star_sam_att, s_unmap = star_sam_unmap, + s_sjcol = star_SJcollapsed, s_sjmin = star_filt_sjmin, s_overhang = star_filt_overhang, s_sjreads = star_filt_sjreads, @@ -766,6 +772,7 @@ rule star: s_wind = star_seed_wind, s_sj = star_sj, s_anchor = star_win_anchor, + s_quantmod = star_quantmod, out_prefix = '{sp}_' envmodules: config['star'], @@ -800,6 +807,7 @@ rule star: --alignTranscriptsPerReadNmax {params.s_transc} \ --alignWindowsPerReadNmax {params.s_windows} \ --limitBAMsortRAM {params.s_bam_limit} \ + --limitOutSJcollapsed {params.s_sjcol} \ --outFilterMatchNmin {params.s_match} \ --outFilterMatchNminOverLread {params.s_readmatch} \ --outFilterMismatchNmax {params.s_mismatch} \ @@ -818,7 +826,8 @@ rule star: --seedPerReadNmax {params.s_read} \ --seedPerWindowNmax {params.s_wind} \ --sjdbScore {params.s_sj} \ - --winAnchorMultimapNmax {params.s_anchor} + --winAnchorMultimapNmax {params.s_anchor} \ + --quantMode {params.s_quantmod} # sort file samtools sort -m 80G -T $tmp_dir $tmp_dir/{params.out_prefix}Aligned.out.bam -o $tmp_dir/{params.out_prefix}Aligned.sortedByCoord.out.bam @@ -863,6 +872,60 @@ rule index_stats: samtools stats --threads {threads} {output.bam} > {output.samstat} """ +rule check_read_counts: + """ + In a recent project the incorrect species was selected and nearly 80% of all reads in all samples (N=6) were not mapped. + Rather than continuing with this type of potential low-quality sample, the pipeline should stop. + + http://www.htslib.org/doc/samtools-stats.html + """ + input: + stats=expand(join(out_dir, 'qc', '01_qc_post','{sp}_samstats.txt'),sp=sp_list), + envmodules: + config['samtools'] + params: + count_threshold=count_threshold, + qc_base=join(out_dir,'qc', '01_qc_post','qc_read_count_check') + output: + qc_counts_pass=join(out_dir,'qc', '01_qc_post','qc_read_count_check_pass.txt'), + qc_raw_counts=join(out_dir,'qc', '01_qc_post','qc_read_count_raw_values.txt') + shell: + """ + # set fail count + fail=0 + + # create output file + if [[ -f {output.qc_raw_counts} ]]; then rm {output.qc_raw_counts}; fi + touch {output.qc_raw_counts} + + for f in {input.stats}; do + # check samstats file to determine number of reads and reads mapped + raw_count=`cat $f | grep "raw total sequences" | awk -F"\t" '{{print $3}}'` + mapped_count=`cat $f | grep "reads mapped:" | awk -F"\t" '{{print $3}}'` + found_percentage=$(($mapped_count / $raw_count)) + + # check the count against the set count_threshold, if counts found are lower than expected, fail + fail=0 + if [ 1 -eq "$(echo "${{found_percentage}} < {params.count_threshold}" | bc)" ]; then + flag="sample failed" + fail=$((fail + 1)) + else + flag="sample passed" + fi + + # put data into output + echo "$f\t$found_percentage\t$flag" >> {output.qc_raw_counts} + done + + # create output file +if [ 1 -eq "$(echo "${{fail}} > 0" | bc)" ]; then + echo "Check sample log {output.qc_raw_counts} to review what sample(s) failed" > {params.qc_base}_fail.txt + else + touch {params.qc_base}_pass.txt + fi + """ + + rule multiqc: """ merges FastQC reports for pre/post trimmed fastq files into MultiQC report @@ -901,6 +964,7 @@ rule qc_troubleshoot: generates a PDF of barcode plots and alignment plots for qc troubleshooting """ input: + qc_counts_pass=rules.check_read_counts.output.qc_counts_pass, log_list=expand(join(out_dir,'log','STAR','{sp}.log'),sp=sp_list), params: rname = "08_qc_troubleshoot", @@ -1842,13 +1906,13 @@ if (DE_method=="MANORM"): envmodules: config['R'] output: - report = join(out_dir,'05_demethod','02_analysis','{group_id}','{gid_1}_vs_{gid_2}_' + peak_id + 'readPeaks_manorm_report.html'), + reportOut = join(out_dir,'05_demethod','02_analysis','{group_id}','{gid_1}_vs_{gid_2}_' + peak_id + 'readPeaks_manorm_report.html'), reportRev = join(out_dir,'05_demethod','02_analysis','{group_id}','{gid_2}_vs_{gid_1}_' + peak_id + 'readPeaks_manorm_report.html') shell: """ Rscript -e 'library(rmarkdown); \ rmarkdown::render("{params.R}", \ - output_file = "{output.report}", \ + output_file = "{output.reportOut}", \ params= list(peak_in="{input.post_proc}", \ PeakIdnt="{params.p_id}",\ samplename="{params.gid_1}", \ From 85e2798e946ef6d1b4b23e4e6bfde8e73d18a866 Mon Sep 17 00:00:00 2001 From: slsevilla Date: Thu, 25 May 2023 15:21:02 -0400 Subject: [PATCH 2/3] update configs for tests --- .tests/cluster_config.yaml | 162 ++++++++++++++++++--------------- .tests/multiplex_hg38_full.tsv | 4 +- .tests/sample_hg38_full.tsv | 6 +- .tests/snakemake_config.yaml | 82 +++++++++++++---- 4 files changed, 158 insertions(+), 96 deletions(-) diff --git a/.tests/cluster_config.yaml b/.tests/cluster_config.yaml index ff41cc4..be15eea 100755 --- a/.tests/cluster_config.yaml +++ b/.tests/cluster_config.yaml @@ -2,125 +2,137 @@ __default__: gres: lscratch:96 mem: 40g - partition: norm - time: 00-02:00:00 + partition: ccr,norm + time: 00-08:00:00 threads: 32 output: .%j.{wildcards}.out error: .%j.{wildcards}.err qc_barcode: - threads: 3 - mem: 3g + threads: 8 + mem: 75g time: 00-04:00:00 demultiplex: - threads: 3 - mem: 3g - time: 04-00:00:00 + threads: 56 + mem: 32g + gres: lscratch:800 + time: 00-05:00:00 -remove_adaptors: - threads: 3 - time: 1-00:00:00 - mem: 3g - -qc_fastq_pre: - threads: 3 - mem: 3g - time: 00-03:00:00 +nondemux: + time: 00-01:00:00 -qc_fastq_post: - threads: 3 +qc_fastq: + threads: 4 mem: 3g time: 00-03:00:00 qc_screen_validator: - mem: 15g + mem: 32g time: 00-03:00:00 -split_files: - threads: 3 - mem: 3g - time: 00-03:00:00 - -novoalign: - mem: 50g - time: 10-00:00:00 - -cleanup_conversion: - threads: 5 - mem: 30g - time: 00-3:00:00 - -merge_unmapped_splits: - time: 01-00:00:00 - mem: 75g - -create_bam_mm_unique: - threads: 6 - gres: lscratch:256 - mem: 30g +star: time: 04-00:00:00 - -merge_splits_unique_mm: - mem: 512g - time: 02-06:00:00 - partition: largemem - -merge_mm_and_unique: - threads: 2 - gres: lscratch:256 - mem: 5g - time: 02-00:00:00 - -qc_alignment: - mem: 10g + gres: lscratch:800 + threads: 16 + mem: 120g + +index_stats: + threads: 8 + gres: lscratch:800 + mem: 200g + time: 01-00:00:00 qc_troubleshoot: - threads: 3 + threads: 4 mem: 3g dedup: - threads: 2 - mem: 64g + threads: 8 + mem: 200g gres: lscratch:256 - time: 01-00:00:00 + time: 02-00:00:00 create_beds_safs: - mem: 350g - gres: lscratch:256 + mem: 200g + gres: lscratch:512 + threads: 8 + +bgzip_beds: + mem: 100g threads: 4 - partition: largemem + +feature_counts: + threads: 8 + mem: 200g project_annotations: threads: 2 mem: 10g time: 00-01:00:00 -peak_annotations: - threads: 3 +peak_junctions: + threads: 10 + gres: lscratch:128 + mem: 36g + time: 04-00:00:00 + +peak_Transcripts: + threads: 4 + gres: lscratch:128 + mem: 30g + time: 04-00:00:00 + +peak_ExonIntron: + threads: 4 gres: lscratch:128 mem: 30g - time: 00-12:00:00 + time: 04-00:00:00 +peak_RMSK: + threads: 4 + gres: lscratch:128 + mem: 30g + time: 04-00:00:00 + annotation_report: - mem: 10g + threads: 4 + gres: lscratch:128 + mem: 30g + time: 00-12:00:00 + +MANORM_beds: + threads: 4 + mem: 30g MANORM_analysis: threads: 4 mem: 30g + time: 04-00:00:00 MANORM_post_processing: threads: 2 - mem: 2g - time: 00-01:00:00 + mem: 30g + time: 00-12:00:00 MANORM_RMD: threads: 2 - mem: 3g - time: 00-01:00:00 + mem: 30g + time: 00-02:00:00 -mapq_recalc: - mem: 1TB - gres: lscratch:256 - partition: largemem - time: 00-06:00:00 +DIFFBIND_beds: + threads: 4 + mem: 30g + +DIFFBIND_preprocess: + threads: 4 + mem: 30g + +DIFFBIND_analysis: + threads: 4 + mem: 30g + + +DIFFBIND_report: + threads: 4 + mem: 30g diff --git a/.tests/multiplex_hg38_full.tsv b/.tests/multiplex_hg38_full.tsv index 5ec8457..8efe5e0 100755 --- a/.tests/multiplex_hg38_full.tsv +++ b/.tests/multiplex_hg38_full.tsv @@ -1,2 +1,2 @@ -file_name multiplex -test_6.fastq.gz test_6 +file_name,multiplex +test_6.fastq.gz,test_6 diff --git a/.tests/sample_hg38_full.tsv b/.tests/sample_hg38_full.tsv index 9ac4d5b..09d29d3 100755 --- a/.tests/sample_hg38_full.tsv +++ b/.tests/sample_hg38_full.tsv @@ -1,3 +1,3 @@ -multiplex sample group barcode adaptor -test_6 Ro_Clip CLIP NNNNNCACTGTNNNN AGATCGGAAGAGCGTCGTG -test_6 Control_Clip CNTRL NNNNNATTGGCNNNN AGATCGGAAGAGCGTCGTG +multiplex,sample,group,barcode,adaptor +test_6,Ro_Clip,CLIP,NNNNNCACTGTNNNN,AGATCGGAAGAGCGTCGTG +test_6,Control_Clip,CNTRL,NNNNNATTGGCNNNN,AGATCGGAAGAGCGTCGTG diff --git a/.tests/snakemake_config.yaml b/.tests/snakemake_config.yaml index b2e0865..d436b4c 100755 --- a/.tests/snakemake_config.yaml +++ b/.tests/snakemake_config.yaml @@ -1,52 +1,102 @@ +######################################################################################### # Global configuration file for the pipeline +######################################################################################### + +######################################################################################### +#Folders and Paths +######################################################################################### #path to snakemake file sourceDir: "" - #path to output directory outputDir: "hg38_full/" - +#path to fastq files +fastqDir: ".tests/" #path to manifest files sampleManifest: ".tests/sample_hg38_full.tsv" multiplexManifest: ".tests/multiplex_hg38_full.tsv" contrastManifest: ".test/contrasts_example.tsv" -#path to fastq files -fastqDir: ".tests/" - +######################################################################################## #user parameters -filterlength: 20 #minimum read length to include in analysis [any int >20] +######################################################################################### multiplexflag: "Y" #flag that samples are multiplexed ["Y","N"] +umiSeparator: "rbc:" #required for nondemultiplexed samples to determine delimiter for deduplication [":", "_", "rbc:"] mismatch: 1 #number of bp mismatches allowed in demultiplexing [1,2,3] +barcode_qc_flag: "PROCESS" #barcodes will undergo QC to ensure uniformity within samples; ["PROCESS", "IGNORE"] +min_reads_mapped: 0.5 #minimum percent of reads that should be mapped; IE .5 for 50% of all reads must be mapped [0.5] reference: "hg38" #reference organism ["mm10", "hg38"] -spliceaware: "N" #whether to run splice_aware part of the pipeline ['y', 'n'] +filterlength: 20 #minimum read length to include in analysis [any int >20] +phredQuality: 20 #minimum quality score for 3’ end trimming includerRNA: "N" #include refseq rRNA's in annotations ["Y", "N"] -spliceBPlength: 75 #length of splice index to use [50, 75, 150] splicejunction: "N" #include splice junctions in peak calls: "manorm" -condenseexon: "N" #whether to collapse exons +AnnoAnchor: "max_total" #whether annotations for spliced peaks will be based on either 5' most region or region with max reads ["max","5prime"] mincount: 3 #minimum number of matches to count as a peak [1,2,3] ntmerge: 50 #minimum distance of nucleotides to merge peaks [10,20,30,40,50,60] peakid: "ALL" #report peaks for unique peaks only or unique and fractional mm ["unique","all"] DEmethod: "none" #choose DE method ["manorm","none"] +MANormWidth: 50 #Width of window to calculate read density. [any integer >1; default 50] +MNormDistance: 25 #Summit-to-summit distance cutoff for common peaks. [ any integer >1; default MANormWidth/2] sampleoverlap: 1 #if DEmethod DIFFBIND, minimum number of samples a peak must be found in to be counted [>1] pval: 0.005 #if DEmethod, pval cutoff for significance fc: 1 #if DEmethod, fold change cut off for significance +single_qc_threshold: 95 #maximum threshold for unmampped reads in any single sample +project_qc_threshold: 50 #maximum threshold for unmapped reads across average of all project samples + +######################################################################################### +# STAR parameters +######################################################################################### +alignEndsType: "Local" #type of read ends alignment ["Local", "EndToEnd", "Extend5pOfRead1", "Extend5pOfReads12"] +alignIntronMax: 50000 #maximum intron length +alignSJDBoverhangMin: 3 # minimum overhang value for annotated spliced junctions +alignSJoverhangMin: 5 # minimum overhang value for non-cannonical splied junctions +alignTranscriptsPerReadNmax: 10000 #max number of different alignments per read to consider [int>0] +alignWindowsPerReadNmax: 10000 #max number of windows per read [int>0] +limitOutSJcollapsed: 1000000 # max number of collapsed junctions [int>0] +outFilterMatchNmin: 15 # alignment will be output only if the number of matched bases is higher than or equal to this value. +outFilterMatchNminOverLread: 0.9 #alignment will be output only if the number of matched bases is >= to value; normalized to sum of mates’ lengths for paired-end reads +outFilterMismatchNmax: 999 #alignment will be output only if it has no more mismatches than this value. +outFilterMismatchNoverReadLmax: 0.04 #alignment will be output only if its ratio of mismatches to *read* length is less than or equal to this value. +outFilterMultimapNmax: 10000 #max number of multiple alignments allowed for a read: if exceeded, the read is considered unmapped +outFilterMultimapScoreRange: 0 #the score range below the maximum score for multimapping alignments +outFilterScoreMin: 0 #alignment will be output only if its score is higher than or equal to this value. +outFilterType: "Normal" #type of filtering ["Normal", "BySJout"] +outSAMattributes: "All" #a string of desired SAM attributes, in the order desired for the output SAM +outSAMunmapped: "None" #output of unmapped reads in the SAM format ["None", "Within"] +outSJfilterCountTotalMin: "3,1,1,1" #minimum total (multi-mapping+unique) read count per junction for: (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif +outSJfilterOverhangMin: "30,12,12,12" #minimum overhang length for splice junctions on both sides for: (1) non-canonical motifs, (2) GT/AG and CT/AC motif, (3) GC/AG and CT/GC motif, (4) AT/AC and GT/AT motif +outSJfilterReads: "All" #which reads to consider for collapsed splice junctions output ["All", "Unique"] +seedMultimapNmax: 10000 #only pieces that map fewer than this value are utilized in the stitching procedure [int>0] +seedNoneLociPerWindow: 20 #max number of one seed loci per window [int>0] +seedPerReadNmax: 10000 #max number of seeds per read +seedPerWindowNmax: 500 #max number of seeds per window +sjdbScore: 2 #extra alignment score for alignmets that cross database junctions +winAnchorMultimapNmax: 500 #max number of loci anchors are allowed to map to + +######################################################################################### +# modules, container parameters +######################################################################################### #modules, container parameters containerDir: "/data/CCBR_Pipeliner/iCLIP/container" fastq_val: "/data/CCBR_Pipeliner/db/PipeDB/bin/fastQValidator" + bedtools: "bedtools/2.29.2" bowtie2: "bowtie/2-2.3.4" fastq_screen: "fastq_screen/0.14.0" fastqc: "fastqc/0.11.9" -java: "java/12.0.1" manorm: "manorm/1.1.4" multiqc: "multiqc/1.9" -novocraft: "novocraft/4.03.01" perl: "perl/5.24.3" -python: "python/3.7" -Qt: "Qt/5.13.2" -singularity: "singularity" +python: "python/3.8" +R: "R/4.0" samtools: "samtools/1.11" -umitools: "umitools/1.1.1" +star: "STAR/2.7.8a" subread: "subread/2.0.1" -R: "R/4.0" \ No newline at end of file +ultraplex: "ultraplex/1.2.5" +umitools: "umitools/1.1.1" + +######################################################################################### +# dev +######################################################################################### +#testing parameter +testing_option: "N" From bc65f117dfac8890b852ae12e837fa3ed8babeda Mon Sep 17 00:00:00 2001 From: slsevilla Date: Thu, 25 May 2023 15:21:14 -0400 Subject: [PATCH 3/3] fix qcdir error --- workflow/scripts/02_barcode_qc.R | 1 + 1 file changed, 1 insertion(+) diff --git a/workflow/scripts/02_barcode_qc.R b/workflow/scripts/02_barcode_qc.R index 506ce39..4eca6f7 100644 --- a/workflow/scripts/02_barcode_qc.R +++ b/workflow/scripts/02_barcode_qc.R @@ -29,6 +29,7 @@ barcode_input = args$barcode_input output_dir = args$output_dir mismatch = as.integer(args$mismatch) mpid = args$mpid +qc_dir = args$qc_dir #test input testing="N"