Skip to content

Commit

Permalink
update test UTILS and test multi profile job matrix
Browse files Browse the repository at this point in the history
  • Loading branch information
sateeshperi committed Jun 19, 2023
1 parent aa654d1 commit 1a4605c
Show file tree
Hide file tree
Showing 11 changed files with 55 additions and 238 deletions.
88 changes: 9 additions & 79 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,10 @@ jobs:
NXF_VER:
- "latest-everything"
aligner:
- "paired_end"
- "single_end"
- "misc"
- "tests/pipeline/misc/custom_reftax.nf.test --profile test_reftaxcustom"
- "tests/pipeline/misc/doubleprimers.nf.test --profile test_doubleprimers"
# - "single_end"
# - "misc"
profile:
- "docker"

Expand Down Expand Up @@ -91,7 +92,7 @@ jobs:
# Run nf-test with the specified aligner and output test results in TAP format
- name: Run nf-test
run: |
nf-test test tests/pipeline/${{ matrix.aligner }}/ --profile ${{ matrix.profile }} --tap=test.tap
nf-test test "${{ matrix.aligner }},${{ matrix.profile }}" --junitxml=test.xml
# If the test fails, output the software_versions.yml using the 'batcat' utility
- name: Output log on failure
Expand All @@ -100,79 +101,8 @@ jobs:
sudo apt install bat > /dev/null
batcat --decorations=always --color=always ${{ github.workspace }}/.nf-test/tests/*/output/pipeline_info/software_versions.yml
test-master:
# Only run this job if the base branch is 'master'
if: ${{ (github.base_ref == 'master') }}
name: Run pipeline with test data on master
runs-on: ubuntu-latest
# Define the matrix strategy for the job
strategy:
matrix:
NXF_VER:
- "22.10.1"
- "latest-everything"
aligner:
- "paired_end"
- "single_end"
- "misc"
profile:
- "docker"

# Define the steps for the job
steps:
# Checkout the pipeline code from the repository
- name: Check out pipeline code
uses: actions/checkout@v3

- name: Hash Github Workspace
id: hash_workspace
run: |
echo "digest=$(echo ampliseq_2.6_${{ github.workspace }} | md5sum | cut -c 1-25)" >> $GITHUB_OUTPUT
- name: Cache test data
id: cache-testdata
uses: actions/cache@v3
with:
path: test-datasets/
key: ${{ steps.hash_workspace.outputs.digest }}

- name: Check out test data
if: steps.cache-testdata.outputs.cache-hit != 'true'
uses: actions/checkout@v3
with:
repository: nf-core/test-datasets
ref: ampliseq
path: test-datasets/

- name: Replace remote paths in samplesheets
run: |
for f in ${{ github.workspace }}/test-datasets/samplesheets/*.tsv; do
sed -i "s=https://github.com/nf-core/test-datasets/raw/ampliseq/testdata/=${{ github.workspace }}/test-datasets/=g" $f
echo "========== $f ============"
cat $f
echo "========================================"
done;
# Install the specified version of Nextflow
- name: Install Nextflow
uses: nf-core/setup-nextflow@v1
- name: Publish Test krakenuniq Report
uses: mikepenz/action-junit-report@v3
if: always() # always run even if the previous step fails
with:
version: "${{ matrix.NXF_VER }}"

# Install nf-test
- name: Install nf-test
run: |
wget -qO- https://code.askimed.com/install/nf-test | bash -s $NFTEST_VER
sudo mv nf-test /usr/local/bin/
# Run nf-test with the specified aligner and output test results in TAP format
- name: Run nf-test
run: |
nf-test test tests/pipeline/${{ matrix.aligner }}/ --profile ${{ matrix.profile }} --tap=test.tap
# If the test fails, output the software_versions.yml using the 'batcat' utility
- name: Output log on failure
if: failure()
run: |
sudo apt install bat > /dev/null
batcat --decorations=always --color=always ${{ github.workspace }}/.nf-test/tests/*/output/pipeline_info/software_versions.yml
report_paths: "*.xml"
29 changes: 28 additions & 1 deletion tests/pipeline/lib/UTILS.groovy
Original file line number Diff line number Diff line change
@@ -1,11 +1,38 @@
// Function to remove Nextflow version from software_versions.yml
// Helper functions for pipeline tests

class UTILS {

// Function to remove Nextflow version from software_versions.yml
public static String removeNextflowVersion(outputDir) {
def softwareVersions = path("$outputDir/pipeline_info/software_versions.yml").yaml
if (softwareVersions.containsKey("Workflow")) {
softwareVersions.Workflow.remove("Nextflow")
}
return softwareVersions
}

// Function to filter lines from a file and return a new file
public static File filterLines(String inFilePath, int linesToSkip) {
if (linesToSkip >= 0) {
File inputFile = new File(inFilePath)
File outputFile = new File(inFilePath + ".filtered")
def lineCount = 0
inputFile.eachLine { line ->
lineCount++
if (lineCount > linesToSkip) {
outputFile.append(line + '\n')
}
}
return outputFile
} else {
File inputFile = new File(inFilePath)
File outputFile = new File(inFilePath + ".filtered")
def lines = inputFile.readLines()
def totalLines = lines.size()
lines.take(totalLines + linesToSkip).each { line ->
outputFile.append(line + '\n')
}
return outputFile
}
}
}
19 changes: 2 additions & 17 deletions tests/pipeline/misc/custom_reftax.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,30 +2,15 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "test_reftaxcustom"
tag "dada2"
tag "pipeline"

test("Custom DADA2 Reference Taxonomy Database") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test custom DADA2 reference taxonomy database profile'
config_profile_description = 'Minimal test dataset to check --dada_ref_tax_custom'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
FW_primer = "GTGYCAGCMGCCGCGGTAA"
RV_primer = "GGACTACNVGGGTWTCTAAT"
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet.tsv"
// Custom reference taxonomy
dada_ref_tax_custom = "https://zenodo.org/record/4310151/files/rdp_train_set_18.fa.gz"
dada_ref_tax_custom_sp = "https://zenodo.org/record/4310151/files/rdp_species_assignment_18.fa.gz"
dada_assign_taxlevels = "Kingdom,Phylum,Class,Order,Family,Genus"
// Skip downstream analysis with QIIME2
skip_qiime = true
outdir = "$outputDir"
}
}

Expand Down
18 changes: 2 additions & 16 deletions tests/pipeline/misc/doubleprimers.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "doubleprimers"
tag "test_doubleprimers"
tag "pipeline"

test("Double-Primers") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test doubleprimers profile'
config_profile_description = 'Minimal test dataset to check pipeline function when removing double primers'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
FW_primer = "NNNNCCTAHGGGRBGCAGCAG"
RV_primer = "GACTACHVGGGTATCTAATCC"
double_primer = true
dada_ref_taxonomy = false
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet_double_primer.tsv"
trunc_qmin = 30
skip_fastqc = true
outdir = "$outputDir"
}
}

Expand Down
15 changes: 2 additions & 13 deletions tests/pipeline/misc/fasta_input.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,25 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "fasta_input"
tag "test_fasta"
tag "pipeline"

test("Fasta Input") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test fasta input profile'
config_profile_description = 'Minimal test dataset to check pipeline function with fasta input'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/testdata/ASV_seqs.fasta"
dada_ref_taxonomy = "rdp=18"
dada_assign_taxlevels = "K,P,C,O,F,Genus"
skip_qiime = true
outdir = "$outputDir"
}
}

Expand Down
18 changes: 2 additions & 16 deletions tests/pipeline/misc/multi_seq.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "multiseq"
tag "test_multi"
tag "pipeline"

test("Multiple Sequencing Runs") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test profile for multiple sequencing runs'
config_profile_description = 'Test dataset for multiple sequencing runs to check pipeline function'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
skip_cutadapt = true
trunclenf = 200
trunclenr = 150
skip_dada_quality = true
dada_ref_taxonomy = "rdp=18"
skip_dada_addspecies = true
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet_multi.tsv"
outdir = "$outputDir"
}
}

Expand Down
30 changes: 2 additions & 28 deletions tests/pipeline/paired_end/paired_end.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,40 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "paired_end"
tag "test"
tag "pipeline"

test("Paired-End") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test profile'
config_profile_description = 'Minimal test dataset to check pipeline function'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
FW_primer = "GTGYCAGCMGCCGCGGTAA"
RV_primer = "GGACTACNVGGGTWTCTAAT"
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet.tsv"
metadata = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Metadata.tsv"
dada_ref_taxonomy = "gtdb"
cut_dada_ref_taxonomy = true
qiime_ref_taxonomy = "greengenes85"
max_len_asv = 255
filter_ssu = "bac"
//this is to remove low abundance ASVs to reduce runtime of downstream processes
min_samples = 2
min_frequency = 10
//produce average barplots
metadata_category_barplot = "treatment1,badpairwise10"
//restrict ANCOM analysis to higher taxonomic levels
tax_agglom_max = 4
sbdiexport = true
qiime_adonis_formula = "treatment1,mix8"
diversity_rarefaction_depth = 500
outdir = "$outputDir"
}
}

Expand Down
18 changes: 2 additions & 16 deletions tests/pipeline/paired_end/pe_iontorrent.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "iontorrent"
tag "test_iontorrent"
tag "pipeline"

test("Paired-End Ion Torrent") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test profile single-end ionTorrent reads'
config_profile_description = 'Minimal test dataset to check pipeline function with single-end ionTorrent sequences'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
FW_primer = "GTGARTCATCGARTCTTTG"
RV_primer = "TCCTCSSCTTATTGATATGC"
dada_ref_taxonomy = "unite-fungi"
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet_it_SE_ITS.tsv"
iontorrent = true
max_ee = 5
skip_qiime = true
outdir = "$outputDir"
}
}

Expand Down
18 changes: 2 additions & 16 deletions tests/pipeline/paired_end/pe_novaseq.nf.test
Original file line number Diff line number Diff line change
Expand Up @@ -2,28 +2,14 @@ nextflow_pipeline {

name "Test Workflow main.nf"
script "main.nf"
tag "novaseq"
tag "test_novaseq"
tag "pipeline"

test("Paired-End Novaseq") {

when {
params {
outdir = "$outputDir"
config_profile_name = 'Test profile Novaseq data'
config_profile_description = 'Minimal test dataset to check pipeline function with Novaseq data'
// Limit resources so that this can run on GitHub Actions
max_cpus = 2
max_memory = '6.GB'
max_time = '6.h'
// Input data
input = "https://raw.githubusercontent.com/nf-core/test-datasets/ampliseq/samplesheets/Samplesheet_novaseq.tsv"
illumina_novaseq = true
// Skip steps
skip_cutadapt = true
skip_barrnap = true
skip_taxonomy = true
skip_qiime = true
outdir = "$outputDir"
}
}

Expand Down
Loading

0 comments on commit 1a4605c

Please sign in to comment.