Skip to content

Commit

Permalink
Merge branch 'master' into uw_hyak_pedslabs
Browse files Browse the repository at this point in the history
  • Loading branch information
maxulysse authored Jul 8, 2023
2 parents df40b83 + 395ff45 commit b0d5f5f
Show file tree
Hide file tree
Showing 4 changed files with 187 additions and 16 deletions.
6 changes: 3 additions & 3 deletions conf/genouest.config
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ params {
singularity {
enabled = true
autoMounts = true
runOptions = '-B /scratch:/scratch -B /local:/local -B /db:/db'
runOptions = '-B /scratch:/scratch -B /local:/local -B /db:/db -B /groups:/groups'
}

process {
Expand All @@ -18,7 +18,7 @@ process {
params {
igenomes_ignore = true
igenomesIgnore = true //deprecated
max_memory = 750.GB
max_cpus = 80
max_memory = 3000.GB
max_cpus = 160
max_time = 336.h
}
30 changes: 22 additions & 8 deletions conf/vai.config
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,31 @@ params {
config_profile_url = 'https://vanandelinstitute.sharepoint.com/sites/SC/SitePages/HPC3-High-Performance-Cluster-and-Cloud-Computing.aspx'
max_memory = 250.GB
max_cpus = 40
max_time = 640.h
}

process {
beforeScript = 'alias singularity=/varidata/research/software/singularity/singularity-ce-3.8.2/bin/singularity'
executor = 'pbs'
queue = { task.time <= 48.h ? 'shortq' : 'longq' }
maxRetries = 2
max_time = 336.h
}

singularity {
enabled = true
autoMounts = true
}

// See if we can find evidence that we're on the new HPC
def new_cluster = false
try {
new_cluster = ['/bin/bash', '-c', 'echo $HOSTNAME'].execute().text.trim() ==~ /(compute|submit)[0-9]{3}/
} catch (java.io.IOException e) {
System.err.println("WARNING: Couldn't figure out which cluster we're on, defaulting to old (PBS) cluster")
}

if (new_cluster) {
System.out.println("Using VAI institutional configuration for new HPC cluster")
} else {
System.out.println("Using VAI institutional configuration for old HPC cluster")
}


process {
beforeScript = { new_cluster ? '' : 'module load VARI/singularity' }()
executor = { new_cluster ? 'slurm' : 'pbs' }()
queue = { new_cluster ? 'long' : { task.time <= 48.h ? 'shortq' : 'longq' } }()
}
161 changes: 159 additions & 2 deletions conf/vsc_ugent.config
Original file line number Diff line number Diff line change
@@ -1,5 +1,26 @@
// Set up the Tier 1 parameter
params.validationSchemaIgnoreParams = params.validationSchemaIgnoreParams.toString() + ",tier1_project"
if (!params.tier1_project) {
params.tier1_project = null
}

// Get the hostname and check some values for tier1
def hostname = "doduo"
try {
hostname = ['/bin/bash', '-c', 'sinfo --local -N -h | head -n 1 | cut -d " " -f1'].execute().text.trim()
} catch (java.io.IOException e) {
System.err.println("WARNING: Could not run sinfo to determine current cluster, defaulting to doduo")
}

if(!params.tier1_project && hostname.contains("dodrio")){
System.err.println("Please specify your project with --tier1_project in your Nextflow command or with params.tier1_project in your config file.")
System.exit(1)
}

// Define the Scratch directory
def scratch_dir = System.getenv("VSC_SCRATCH_VO_USER") ?: System.getenv("VSC_SCRATCH")
def scratch_dir = System.getenv("VSC_SCRATCH_PROJECTS_BASE") ? "${System.getenv("VSC_SCRATCH_PROJECTS_BASE")}/${params.tier1_project}" : // Tier 1 scratch
System.getenv("VSC_SCRATCH_VO_USER") ?: // VO scratch
System.getenv("VSC_SCRATCH") // user scratch

// Specify the work directory
workDir = "$scratch_dir/work"
Expand Down Expand Up @@ -35,7 +56,7 @@ env {

// AWS maximum retries for errors (This way the pipeline doesn't fail if the download fails one time)
aws {
maxErrorRetry = 3
maxErrorRetry = 3
}

// Define profiles for each cluster
Expand Down Expand Up @@ -120,6 +141,142 @@ profiles {
}
}

cpu_rome {
params {
config_profile_description = 'HPC_DODRIO_cpu_rome profile for use on the Dodrio/cpu_rome cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 256.GB
max_cpus = 128
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/cpu_rome'
clusterOptions = "-A ${params.tier1_project}"
}
}

cpu_rome_512 {
params {
config_profile_description = 'HPC_DODRIO_cpu_rome_512 profile for use on the Dodrio/cpu_rome_512 cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 512.GB
max_cpus = 128
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/cpu_rome_512'
clusterOptions = "-A ${params.tier1_project}"
}
}

cpu_milan {
params {
config_profile_description = 'HPC_DODRIO_cpu_milan profile for use on the Dodrio/cpu_milan cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 256.GB
max_cpus = 128
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/cpu_milan'
clusterOptions = "-A ${params.tier1_project}"
}
}

gpu_rome_a100_40 {
params {
config_profile_description = 'HPC_DODRIO_gpu_rome_a100_40 profile for use on the Dodrio/gpu_rome_a100_40 cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 256.GB
max_cpus = 48
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/gpu_rome_a100_40'
clusterOptions = "-A ${params.tier1_project}"
}
}

gpu_rome_a100_80 {
params {
config_profile_description = 'HPC_DODRIO_gpu_rome_a100_80 profile for use on the Dodrio/gpu_rome_a100_80 cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 512.GB
max_cpus = 48
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/gpu_rome_a100_80'
clusterOptions = "-A ${params.tier1_project}"
}
}

debug_rome {
params {
config_profile_description = 'HPC_DODRIO_debug_rome profile for use on the Dodrio/debug_rome cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 256.GB
max_cpus = 48
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/debug_rome'
clusterOptions = "-A ${params.tier1_project}"
}
}

cpu_rome_all {
params {
config_profile_description = 'HPC_DODRIO_cpu_rome_all profile for use on the Dodrio/cpu_rome_all cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 250.GB
max_cpus = 128
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/cpu_rome_all'
clusterOptions = "-A ${params.tier1_project}"
}
}

gpu_rome_a100 {
params {
config_profile_description = 'HPC_DODRIO_gpu_rome_a100 profile for use on the Dodrio/gpu_rome_a100 cluster of the VSC HPC.'
config_profile_contact = '[email protected]'
config_profile_url = 'https://www.ugent.be/hpc/en'
max_memory = 384.GB
max_cpus = 48
max_time = "3day"
}

process {
executor = 'slurm'
queue = 'dodrio/gpu_rome_a100'
clusterOptions = "-A ${params.tier1_project}"
}
}

stub {
params {
config_profile_description = 'Stub profile for the VSC HPC. Please also specify the `-stub` argument when using this profile.'
Expand Down
6 changes: 3 additions & 3 deletions docs/genouest.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ To use, run the pipeline with `-profile genouest`. This will download and launch

Nextflow is installed on the GenOuest cluster. Some documentation is available on the [GenOuest website](https://www.genouest.org/howto/#nextflow).

You need to activate it like this:
You need to activate it like this (or any more recent version in the same directory):

```bash
source /local/env/envnextflow-19.07.0.sh
source /local/env/envnextflow-22.10.4.sh
```

Nextflow manages each process as a separate job that is submitted to the cluster by using the sbatch command.
Expand All @@ -22,7 +22,7 @@ Nextflow shouldn't run directly on the submission node but on a compute node. Ru
srun --pty bash

# Load the dependencies if not done before
source /local/env/envnextflow-19.07.0.sh
source /local/env/envnextflow-22.10.4.sh

# Run a downloaded/git-cloned nextflow workflow from
nextflow run \\
Expand Down

0 comments on commit b0d5f5f

Please sign in to comment.