Skip to content

Commit

Permalink
Merge pull request #442 from broadinstitute/ct-beast_gpu_new_defaults
Browse files Browse the repository at this point in the history
set default GPU to K80->P4 in BEAST task; parameterize beagle_order
  • Loading branch information
dpark01 authored Dec 21, 2022
2 parents 9605025 + afaf3f8 commit 07348d4
Showing 1 changed file with 51 additions and 11 deletions.
62 changes: 51 additions & 11 deletions pipes/WDL/tasks/tasks_interhost.wdl
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ task multi_align_mafft {
task beast {
input {
File beauti_xml

Boolean beagle_double_precision=true
String? beagle_order

String? accelerator_type
Int? accelerator_count
Expand All @@ -106,23 +109,60 @@ task beast {
String docker = "quay.io/broadinstitute/beast-beagle-cuda:1.10.5pre"
}

Int disk_size = 300
Int boot_disk = 50
meta {
description: "Execute GPU-accelerated BEAST. For tips on performance, see https://beast.community/performance#gpu"
}
parameter_meta {
beagle_double_precision: {
description: "If beagle_double_precision=true, use double-precision calculation (perhaps set to false to gain execution speed if MCMC chain convergence is possible using single-precision calculation)."
}
beagle_order: {
description: "The order of CPU(0) and GPU(1+) resources used to process partitioned data."
}

accelerator_type: {
description: "[GCP] The model of GPU to use. For availability and pricing on GCP, see https://cloud.google.com/compute/gpus-pricing#gpus"
}
accelerator_count: {
description: "[GCP] The number of GPUs of the specified type to use."
}
gpu_type: {
description: "[Terra] The model of GPU to use. For availability and pricing on GCP, see https://support.terra.bio/hc/en-us/articles/4403006001947-Getting-started-with-GPUs-in-a-Cloud-Environment"
}
gpu_count: {
description: "[Terra] The number of GPUs of the specified type to use."
}
}

Int disk_size = 300
Int boot_disk = 50
Int disk_size_az = disk_size + boot_disk

# TO DO: parameterize gpuType and gpuCount
# platform-agnostic number of GPUs we're actually using
Int gpu_count_used = select_first([accelerator_count, gpu_count, 1])

command {
set -e
beast -beagle_info
nvidia-smi

# if beagle_order is not specified by the user,
# create an appropriate string based on the gpu count

default_beagle_order="$(seq -s, ~{gpu_count_used})"
beagle_order=~{default="$default_beagle_order" beagle_order}
echo "beagle_order: $beagle_order"

bash -c "sleep 60; nvidia-smi" &
beast \
-beagle_multipartition off \
-beagle_GPU -beagle_cuda -beagle_SSE \
-beagle_double -beagle_scaling always \
-beagle_order 1,2,3,4 \
${beauti_xml}
-beagle_GPU \
-beagle_cuda \
-beagle_SSE \
~{true="-beagle_double" false="-beagle_single" beagle_double_precision} \
-beagle_scaling always \
~{'-beagle_order ' + beagle_order} \
~{beauti_xml}
}

output {
Expand All @@ -143,10 +183,10 @@ task beast {
gpu: true # dxWDL
dx_timeout: "40H" # dxWDL
dx_instance_type: "mem1_ssd1_gpu2_x8" # dxWDL
acceleratorType: select_first([accelerator_type, "nvidia-tesla-k80"]) # GCP PAPIv2
acceleratorCount: select_first([accelerator_count, 4]) # GCP PAPIv2
gpuType: select_first([gpu_type, "nvidia-tesla-k80"]) # Terra
gpuCount: select_first([gpu_count, 4]) # Terra
acceleratorType: select_first([accelerator_type, "nvidia-tesla-p4"]) # GCP PAPIv2
acceleratorCount: select_first([accelerator_count, 1]) # GCP PAPIv2
gpuType: select_first([gpu_type, "nvidia-tesla-p4"]) # Terra
gpuCount: select_first([gpu_count, 1]) # Terra
nvidiaDriverVersion: "410.79"
}
}
Expand Down

0 comments on commit 07348d4

Please sign in to comment.