Skip to content

Commit 031966d

Browse files
committed
Replace version channels with topc channel
1 parent 13f5eb3 commit 031966d

File tree

16 files changed

+2
-156
lines changed

16 files changed

+2
-156
lines changed

main.nf

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,6 @@ workflow NFCORE_RNASEQ {
5454

5555
main:
5656

57-
ch_versions = Channel.empty()
58-
5957
//
6058
// SUBWORKFLOW: Prepare reference genome files
6159
//
@@ -86,7 +84,6 @@ workflow NFCORE_RNASEQ {
8684
params.skip_alignment,
8785
params.skip_pseudo_alignment
8886
)
89-
ch_versions = ch_versions.mix(PREPARE_GENOME.out.versions)
9087

9188
// Check if contigs in genome fasta file > 512 Mbp
9289
if (!params.skip_alignment && !params.bam_csi_index) {
@@ -102,7 +99,6 @@ workflow NFCORE_RNASEQ {
10299
ch_samplesheet = Channel.value(file(params.input, checkIfExists: true))
103100
RNASEQ (
104101
ch_samplesheet,
105-
ch_versions,
106102
PREPARE_GENOME.out.fasta,
107103
PREPARE_GENOME.out.gtf,
108104
PREPARE_GENOME.out.fai,
@@ -120,14 +116,12 @@ workflow NFCORE_RNASEQ {
120116
PREPARE_GENOME.out.splicesites,
121117
!params.remove_ribo_rna && params.remove_ribo_rna
122118
)
123-
ch_versions = ch_versions.mix(RNASEQ.out.versions)
124119

125120
emit:
126121
trim_status = RNASEQ.out.trim_status // channel: [id, boolean]
127122
map_status = RNASEQ.out.map_status // channel: [id, boolean]
128123
strand_status = RNASEQ.out.strand_status // channel: [id, boolean]
129124
multiqc_report = RNASEQ.out.multiqc_report // channel: /path/to/multiqc_report.html
130-
versions = ch_versions // channel: [version1, version2, ...]
131125
}
132126

133127
/*

subworkflows/local/align_star/main.nf

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,8 +19,6 @@ workflow ALIGN_STAR {
1919

2020
main:
2121

22-
ch_versions = Channel.empty()
23-
2422
//
2523
// Map reads with STAR
2624
//
@@ -42,7 +40,6 @@ workflow ALIGN_STAR {
4240
ch_bam_transcript = STAR_ALIGN_IGENOMES.out.bam_transcript
4341
ch_fastq = STAR_ALIGN_IGENOMES.out.fastq
4442
ch_tab = STAR_ALIGN_IGENOMES.out.tab
45-
ch_versions = ch_versions.mix(STAR_ALIGN_IGENOMES.out.versions.first())
4643
} else {
4744
STAR_ALIGN ( reads, index, gtf, star_ignore_sjdbgtf, seq_platform, seq_center )
4845
ch_orig_bam = STAR_ALIGN.out.bam
@@ -53,14 +50,12 @@ workflow ALIGN_STAR {
5350
ch_bam_transcript = STAR_ALIGN.out.bam_transcript
5451
ch_fastq = STAR_ALIGN.out.fastq
5552
ch_tab = STAR_ALIGN.out.tab
56-
ch_versions = ch_versions.mix(STAR_ALIGN.out.versions.first())
5753
}
5854

5955
//
6056
// Sort, index BAM file and run samtools stats, flagstat and idxstats
6157
//
6258
BAM_SORT_STATS_SAMTOOLS ( ch_orig_bam, fasta )
63-
ch_versions = ch_versions.mix(BAM_SORT_STATS_SAMTOOLS.out.versions)
6459

6560
emit:
6661
orig_bam = ch_orig_bam // channel: [ val(meta), bam ]
@@ -79,5 +74,4 @@ workflow ALIGN_STAR {
7974
flagstat = BAM_SORT_STATS_SAMTOOLS.out.flagstat // channel: [ val(meta), [ flagstat ] ]
8075
idxstats = BAM_SORT_STATS_SAMTOOLS.out.idxstats // channel: [ val(meta), [ idxstats ] ]
8176

82-
versions = ch_versions // channel: [ versions.yml ]
8377
}

subworkflows/local/prepare_genome/main.nf

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -65,8 +65,6 @@ workflow PREPARE_GENOME {
6565
skip_pseudo_alignment // boolean: Skip all of the pseudoalignment-based processes within the pipeline
6666

6767
main:
68-
// Versions collector
69-
ch_versions = Channel.empty()
7068

7169
//---------------------------
7270
// 1) Uncompress GTF or GFF -> GTF
@@ -75,20 +73,17 @@ workflow PREPARE_GENOME {
7573
if (gtf) {
7674
if (gtf.endsWith('.gz')) {
7775
ch_gtf = GUNZIP_GTF ([ [:], file(gtf, checkIfExists: true) ]).gunzip.map { it[1] }
78-
ch_versions = ch_versions.mix(GUNZIP_GTF.out.versions)
7976
} else {
8077
ch_gtf = Channel.value(file(gtf, checkIfExists: true))
8178
}
8279
} else if (gff) {
8380
def ch_gff
8481
if (gff.endsWith('.gz')) {
8582
ch_gff = GUNZIP_GFF ([ [:], file(gff, checkIfExists: true) ]).gunzip
86-
ch_versions = ch_versions.mix(GUNZIP_GFF.out.versions)
8783
} else {
8884
ch_gff = Channel.value(file(gff, checkIfExists: true)).map { [ [:], it ] }
8985
}
9086
ch_gtf = GFFREAD(ch_gff, []).gtf.map { it[1] }
91-
ch_versions = ch_versions.mix(GFFREAD.out.versions)
9287
}
9388

9489
//-------------------------------------
@@ -101,7 +96,6 @@ workflow PREPARE_GENOME {
10196
// Uncompress FASTA if needed
10297
if (fasta.endsWith('.gz')) {
10398
ch_fasta = GUNZIP_FASTA ([ [:], file(fasta, checkIfExists: true) ]).gunzip.map { it[1] }
104-
ch_versions = ch_versions.mix(GUNZIP_FASTA.out.versions)
10599
} else {
106100
ch_fasta = Channel.value(file(fasta, checkIfExists: true))
107101
}
@@ -119,7 +113,6 @@ workflow PREPARE_GENOME {
119113
if (filter_gtf_needed) {
120114
GTF_FILTER(ch_fasta, ch_gtf)
121115
ch_gtf = GTF_FILTER.out.genome_gtf.first()
122-
ch_versions = ch_versions.mix(GTF_FILTER.out.versions)
123116
}
124117

125118
//---------------------------------------------------
@@ -129,7 +122,6 @@ workflow PREPARE_GENOME {
129122
if (fasta_provided && additional_fasta) {
130123
if (additional_fasta.endsWith('.gz')) {
131124
ch_add_fasta = GUNZIP_ADDITIONAL_FASTA([ [:], file(additional_fasta, checkIfExists: true) ]).gunzip.map { it[1] }
132-
ch_versions = ch_versions.mix(GUNZIP_ADDITIONAL_FASTA.out.versions)
133125
} else {
134126
ch_add_fasta = Channel.value(file(additional_fasta, checkIfExists: true))
135127
}
@@ -141,7 +133,6 @@ workflow PREPARE_GENOME {
141133
)
142134
ch_fasta = CUSTOM_CATADDITIONALFASTA.out.fasta.map { it[1] }.first()
143135
ch_gtf = CUSTOM_CATADDITIONALFASTA.out.gtf.map { it[1] }.first()
144-
ch_versions = ch_versions.mix(CUSTOM_CATADDITIONALFASTA.out.versions)
145136
}
146137

147138
//------------------------------------------------------
@@ -151,13 +142,11 @@ workflow PREPARE_GENOME {
151142
if (gene_bed) {
152143
if (gene_bed.endsWith('.gz')) {
153144
ch_gene_bed = GUNZIP_GENE_BED ([ [:], file(gene_bed, checkIfExists: true) ]).gunzip.map { it[1] }
154-
ch_versions = ch_versions.mix(GUNZIP_GENE_BED.out.versions)
155145
} else {
156146
ch_gene_bed = Channel.value(file(gene_bed, checkIfExists: true))
157147
}
158148
} else {
159149
ch_gene_bed = GTF2BED(ch_gtf).bed
160-
ch_versions = ch_versions.mix(GTF2BED.out.versions)
161150
}
162151

163152
//----------------------------------------------------------------------
@@ -170,19 +159,16 @@ workflow PREPARE_GENOME {
170159
// Use user-provided transcript FASTA
171160
if (transcript_fasta.endsWith('.gz')) {
172161
ch_transcript_fasta = GUNZIP_TRANSCRIPT_FASTA ([ [:], file(transcript_fasta, checkIfExists: true) ]).gunzip.map { it[1] }
173-
ch_versions = ch_versions.mix(GUNZIP_TRANSCRIPT_FASTA.out.versions)
174162
} else {
175163
ch_transcript_fasta = Channel.value(file(transcript_fasta, checkIfExists: true))
176164
}
177165
if (gencode) {
178166
PREPROCESS_TRANSCRIPTS_FASTA_GENCODE(ch_transcript_fasta)
179167
ch_transcript_fasta = PREPROCESS_TRANSCRIPTS_FASTA_GENCODE.out.fasta
180-
ch_versions = ch_versions.mix(PREPROCESS_TRANSCRIPTS_FASTA_GENCODE.out.versions)
181168
}
182169
} else if (fasta_provided) {
183170
// Build transcripts from genome if we have it
184171
ch_transcript_fasta = MAKE_TRANSCRIPTS_FASTA(ch_fasta, ch_gtf).transcript_fasta
185-
ch_versions = ch_versions.mix(MAKE_TRANSCRIPTS_FASTA.out.versions)
186172
}
187173

188174
//-------------------------------------------------------
@@ -194,7 +180,6 @@ workflow PREPARE_GENOME {
194180
CUSTOM_GETCHROMSIZES(ch_fasta.map { [ [:], it ] })
195181
ch_fai = CUSTOM_GETCHROMSIZES.out.fai.map { it[1] }
196182
ch_chrom_sizes = CUSTOM_GETCHROMSIZES.out.sizes.map { it[1] }
197-
ch_versions = ch_versions.mix(CUSTOM_GETCHROMSIZES.out.versions)
198183
}
199184

200185
//------------------------------------------------
@@ -215,7 +200,6 @@ workflow PREPARE_GENOME {
215200
// Use user-provided bbsplit index
216201
if (bbsplit_index.endsWith('.tar.gz')) {
217202
ch_bbsplit_index = UNTAR_BBSPLIT_INDEX ([ [:], file(bbsplit_index, checkIfExists: true) ]).untar.map { it[1] }
218-
ch_versions = ch_versions.mix(UNTAR_BBSPLIT_INDEX.out.versions)
219203
} else {
220204
ch_bbsplit_index = Channel.value(file(bbsplit_index, checkIfExists: true))
221205
}
@@ -238,7 +222,6 @@ workflow PREPARE_GENOME {
238222
ch_bbsplit_fasta_list,
239223
true
240224
).index
241-
ch_versions = ch_versions.mix(BBMAP_BBSPLIT.out.versions)
242225
}
243226
// else: no FASTA and no user-provided index -> remains empty
244227
}
@@ -257,7 +240,6 @@ workflow PREPARE_GENOME {
257240
if (sortmerna_index) {
258241
if (sortmerna_index.endsWith('.tar.gz')) {
259242
ch_sortmerna_index = UNTAR_SORTMERNA_INDEX ([ [:], file(sortmerna_index, checkIfExists: true) ]).untar.map { it[1] }
260-
ch_versions = ch_versions.mix(UNTAR_SORTMERNA_INDEX.out.versions)
261243
} else {
262244
ch_sortmerna_index = Channel.value([ [:], file(sortmerna_index, checkIfExists: true) ])
263245
}
@@ -269,7 +251,6 @@ workflow PREPARE_GENOME {
269251
Channel.of([ [], [] ])
270252
)
271253
ch_sortmerna_index = SORTMERNA_INDEX.out.index.first()
272-
ch_versions = ch_versions.mix(SORTMERNA_INDEX.out.versions)
273254
}
274255
}
275256

@@ -281,7 +262,6 @@ workflow PREPARE_GENOME {
281262
if (star_index) {
282263
if (star_index.endsWith('.tar.gz')) {
283264
ch_star_index = UNTAR_STAR_INDEX ([ [:], file(star_index, checkIfExists: true) ]).untar.map { it[1] }
284-
ch_versions = ch_versions.mix(UNTAR_STAR_INDEX.out.versions)
285265
} else {
286266
ch_star_index = Channel.value(file(star_index, checkIfExists: true))
287267
}
@@ -295,13 +275,11 @@ workflow PREPARE_GENOME {
295275
}
296276
if (is_aws_igenome) {
297277
ch_star_index = STAR_GENOMEGENERATE_IGENOMES(ch_fasta, ch_gtf).index
298-
ch_versions = ch_versions.mix(STAR_GENOMEGENERATE_IGENOMES.out.versions)
299278
} else {
300279
ch_star_index = STAR_GENOMEGENERATE(
301280
ch_fasta.map { [ [:], it ] },
302281
ch_gtf.map { [ [:], it ] }
303282
).index.map { it[1] }
304-
ch_versions = ch_versions.mix(STAR_GENOMEGENERATE.out.versions)
305283
}
306284
}
307285
}
@@ -314,14 +292,12 @@ workflow PREPARE_GENOME {
314292
if (rsem_index) {
315293
if (rsem_index.endsWith('.tar.gz')) {
316294
ch_rsem_index = UNTAR_RSEM_INDEX ([ [:], file(rsem_index, checkIfExists: true) ]).untar.map { it[1] }
317-
ch_versions = ch_versions.mix(UNTAR_RSEM_INDEX.out.versions)
318295
} else {
319296
ch_rsem_index = Channel.value(file(rsem_index, checkIfExists: true))
320297
}
321298
}
322299
else if (fasta_provided) {
323300
ch_rsem_index = RSEM_PREPAREREFERENCE_GENOME(ch_fasta, ch_gtf).index
324-
ch_versions = ch_versions.mix(RSEM_PREPAREREFERENCE_GENOME.out.versions)
325301
}
326302
}
327303

@@ -337,13 +313,11 @@ workflow PREPARE_GENOME {
337313
}
338314
else if (fasta_provided) {
339315
ch_splicesites = HISAT2_EXTRACTSPLICESITES(ch_gtf.map { [ [:], it ] }).txt.map { it[1] }
340-
ch_versions = ch_versions.mix(HISAT2_EXTRACTSPLICESITES.out.versions)
341316
}
342317
// the index
343318
if (hisat2_index) {
344319
if (hisat2_index.endsWith('.tar.gz')) {
345320
ch_hisat2_index = UNTAR_HISAT2_INDEX ([ [:], file(hisat2_index, checkIfExists: true) ]).untar.map { it[1] }
346-
ch_versions = ch_versions.mix(UNTAR_HISAT2_INDEX.out.versions)
347321
} else {
348322
ch_hisat2_index = Channel.value(file(hisat2_index, checkIfExists: true))
349323
}
@@ -354,7 +328,6 @@ workflow PREPARE_GENOME {
354328
ch_gtf.map { [ [:], it ] },
355329
ch_splicesites.map { [ [:], it ] }
356330
).index.map { it[1] }
357-
ch_versions = ch_versions.mix(HISAT2_BUILD.out.versions)
358331
}
359332
}
360333

@@ -366,20 +339,17 @@ workflow PREPARE_GENOME {
366339
if (salmon_index) {
367340
if (salmon_index.endsWith('.tar.gz')) {
368341
ch_salmon_index = UNTAR_SALMON_INDEX ( [ [:], salmon_index ] ).untar.map { it[1] }
369-
ch_versions = ch_versions.mix(UNTAR_SALMON_INDEX.out.versions)
370342
} else {
371343
ch_salmon_index = Channel.value(file(salmon_index))
372344
}
373345
} else if ('salmon' in prepare_tool_indices) {
374346
if (ch_transcript_fasta && fasta_provided) {
375347
// build from transcript FASTA + genome FASTA
376348
ch_salmon_index = SALMON_INDEX(ch_fasta, ch_transcript_fasta).index
377-
ch_versions = ch_versions.mix(SALMON_INDEX.out.versions)
378349
}
379350
else if (ch_transcript_fasta) {
380351
// some Salmon module can run with just a transcript FASTA
381352
ch_salmon_index = SALMON_INDEX([], ch_transcript_fasta).index
382-
ch_versions = ch_versions.mix(SALMON_INDEX.out.versions)
383353
}
384354
}
385355

@@ -390,14 +360,12 @@ workflow PREPARE_GENOME {
390360
if (kallisto_index) {
391361
if (kallisto_index.endsWith('.tar.gz')) {
392362
ch_kallisto_index = UNTAR_KALLISTO_INDEX ( [ [:], kallisto_index ] ).untar
393-
ch_versions = ch_versions.mix(UNTAR_KALLISTO_INDEX.out.versions)
394363
} else {
395364
ch_kallisto_index = Channel.value([[:], file(kallisto_index)])
396365
}
397366
} else {
398367
if ('kallisto' in prepare_tool_indices) {
399368
ch_kallisto_index = KALLISTO_INDEX ( ch_transcript_fasta.map { [ [:], it] } ).index
400-
ch_versions = ch_versions.mix(KALLISTO_INDEX.out.versions)
401369
}
402370
}
403371

@@ -420,5 +388,4 @@ workflow PREPARE_GENOME {
420388
hisat2_index = ch_hisat2_index // channel: path(hisat2/index/)
421389
salmon_index = ch_salmon_index // channel: path(salmon/index/)
422390
kallisto_index = ch_kallisto_index // channel: [ meta, path(kallisto/index/) ]
423-
versions = ch_versions.ifEmpty(null) // channel: [ versions.yml ]
424391
}

subworkflows/local/quantify_rsem/main.nf

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,19 +14,15 @@ workflow QUANTIFY_RSEM {
1414

1515
main:
1616

17-
ch_versions = Channel.empty()
18-
1917
//
2018
// Quantify reads with RSEM
2119
//
2220
RSEM_CALCULATEEXPRESSION ( reads, index )
23-
ch_versions = ch_versions.mix(RSEM_CALCULATEEXPRESSION.out.versions.first())
2421

2522
//
2623
// Sort, index BAM file and run samtools stats, flagstat and idxstats
2724
//
2825
BAM_SORT_STATS_SAMTOOLS ( RSEM_CALCULATEEXPRESSION.out.bam_star, fasta )
29-
ch_versions = ch_versions.mix(BAM_SORT_STATS_SAMTOOLS.out.versions)
3026

3127
//
3228
// Merge counts across samples
@@ -35,7 +31,6 @@ workflow QUANTIFY_RSEM {
3531
RSEM_CALCULATEEXPRESSION.out.counts_gene.collect{it[1]}, // [meta, counts]: Collect the second element (counts files) in the channel across all samples
3632
RSEM_CALCULATEEXPRESSION.out.counts_transcript.collect{it[1]}
3733
)
38-
ch_versions = ch_versions.mix(RSEM_MERGE_COUNTS.out.versions)
3934

4035
emit:
4136
counts_gene = RSEM_CALCULATEEXPRESSION.out.counts_gene // channel: [ val(meta), counts ]
@@ -58,5 +53,4 @@ workflow QUANTIFY_RSEM {
5853
merged_counts_transcript = RSEM_MERGE_COUNTS.out.counts_transcript // path: *.transcript_counts.tsv
5954
merged_tpm_transcript = RSEM_MERGE_COUNTS.out.tpm_transcript // path: *.transcript_tpm.tsv
6055

61-
versions = ch_versions // channel: [ versions.yml ]
6256
}

subworkflows/nf-core/bam_dedup_stats_samtools_umicollapse/main.nf

Lines changed: 0 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)