nf-core/configs: Alliance Canada Configuration
Configuration for running nextflow on the clusters of the Digital Research Alliance of Canada. Invoke by specifying -profile alliance_canada
.
You also need to supply the name of the group under which you are working or whose resource allocation you want to use by running export SLURM_ACCOUNT=<def-user>
before you run any nf-core pipeline. If you run nf-core frequently and always use the same resource allocation, you may find it more convenient to add the SLURM_ACCOUNT
environment variable to your ~/.bashrc
file.
For detailed information on running nf-core pipelines on Alliance clusters, please visit the documentation: https://docs.alliancecan.ca/wiki/Nextflow
If you run into issues, please contact Alliance Support: https://docs.alliancecan.ca/wiki/Technical_support
Config file
params {
config_profile_description = 'Alliance Canada HPC config'
config_profile_contact = 'Jerry Li (@jerryakii)'
config_profile_url = 'https://docs.alliancecan.ca/wiki/Nextflow'
max_time = 168.h
max_cpus = 40
max_memory = 186.GB
}
cleanup = true
singularity {
enabled = true
autoMounts = true
}
apptainer {
autoMounts = true
}
// Group name for resource allocation must be supplied as environment variable
process {
executor = 'slurm'
clusterOptions = "--account=${System.getenv('SLURM_ACCOUNT')}"
maxRetries = 1
errorStrategy = { task.exitStatus in [125,139] ? 'retry' : 'finish' }
cpu = 1
time = '3h'
resourceLimits = [
memory: 186.GB,
cpus: 40,
time: 168.h
]
}
executor {
pollInterval = '60 sec'
submitRateLimit = '60/1min'
queueSize = 100
}
// Cluster name is available as environment variable
// If not found, default to beluga as it has the lowest limits
hostname = "beluga"
try {
hostname = "${System.getenv('HOSTNAME')}"
} catch (java.io.IOException e) {
System.err.println("WARNING: Could not determine current cluster, defaulting to beluga")
}
// Cluster Beluga
if (hostname.startsWith("beluga")) {
params.config_profile_description = 'Alliance Canada (Beluga) cluster profile provided by nf-core/configs.'
params.max_memory = 186.GB
params.max_cpus = 40
process {
resourceLimits = [
memory: 186.GB,
cpus: 40,
time: 168.h
]
}
}
// Cluster Cedar
if (hostname.startsWith("cedar")) {
params.config_profile_description = 'Alliance Canada (Cedar) cluster profile provided by nf-core/configs.'
params.max_memory = 186.GB
params.max_cpus = 48
params.max_time = 672.h
process {
resourceLimits = [
memory: 186.GB,
cpus: 48,
time: 672.h
]
}
}
// Cluster Graham
if (hostname.startsWith("gra")) {
params.config_profile_description = 'Alliance Canada (Graham) cluster profile provided by nf-core/configs.'
params.max_memory = 186.GB
params.max_cpus = 40
process {
resourceLimits = [
memory: 186.GB,
cpus: 40,
time: 168.h
]
}
}
// Cluster Narval
if (hostname.startsWith("narval")) {
params.config_profile_description = 'Alliance Canada (Narval) cluster profile provided by nf-core/configs.'
params.max_memory = 249.GB
params.max_cpus = 64
process {
resourceLimits = [
memory: 249.GB,
cpus: 64,
time: 168.h
]
}
}
// Cluster Rorqual
if (hostname.startsWith("rorqual")) {
params.config_profile_description = 'Alliance Canada (Rorqual) cluster profile provided by nf-core/configs.'
params.max_memory = 512.GB
params.max_cpus = 64
process {
resourceLimits = [
memory: 512.GB,
cpus: 64,
time: 168.h
]
}
}