diff --git a/pipes/WDL/tasks/tasks_interhost.wdl b/pipes/WDL/tasks/tasks_interhost.wdl index bf4fb1b2c..38f64ad66 100644 --- a/pipes/WDL/tasks/tasks_interhost.wdl +++ b/pipes/WDL/tasks/tasks_interhost.wdl @@ -96,6 +96,9 @@ task multi_align_mafft { task beast { input { File beauti_xml + + Boolean beagle_double_precision=true + String? beagle_order String? accelerator_type Int? accelerator_count @@ -106,23 +109,60 @@ task beast { String docker = "quay.io/broadinstitute/beast-beagle-cuda:1.10.5pre" } - Int disk_size = 300 - Int boot_disk = 50 + meta { + description: "Execute GPU-accelerated BEAST. For tips on performance, see https://beast.community/performance#gpu" + } + parameter_meta { + beagle_double_precision: { + description: "If beagle_double_precision=true, use double-precision calculation (perhaps set to false to gain execution speed if MCMC chain convergence is possible using single-precision calculation)." + } + beagle_order: { + description: "The order of CPU(0) and GPU(1+) resources used to process partitioned data." + } + + accelerator_type: { + description: "[GCP] The model of GPU to use. For availability and pricing on GCP, see https://cloud.google.com/compute/gpus-pricing#gpus" + } + accelerator_count: { + description: "[GCP] The number of GPUs of the specified type to use." + } + gpu_type: { + description: "[Terra] The model of GPU to use. For availability and pricing on GCP, see https://support.terra.bio/hc/en-us/articles/4403006001947-Getting-started-with-GPUs-in-a-Cloud-Environment" + } + gpu_count: { + description: "[Terra] The number of GPUs of the specified type to use." + } + } + + Int disk_size = 300 + Int boot_disk = 50 Int disk_size_az = disk_size + boot_disk - # TO DO: parameterize gpuType and gpuCount + # platform-agnostic number of GPUs we're actually using + Int gpu_count_used = select_first([accelerator_count, gpu_count, 1]) command { set -e beast -beagle_info nvidia-smi + + # if beagle_order is not specified by the user, + # create an appropriate string based on the gpu count + + default_beagle_order="$(seq -s, ~{gpu_count_used})" + beagle_order=~{default="$default_beagle_order" beagle_order} + echo "beagle_order: $beagle_order" + bash -c "sleep 60; nvidia-smi" & beast \ -beagle_multipartition off \ - -beagle_GPU -beagle_cuda -beagle_SSE \ - -beagle_double -beagle_scaling always \ - -beagle_order 1,2,3,4 \ - ${beauti_xml} + -beagle_GPU \ + -beagle_cuda \ + -beagle_SSE \ + ~{true="-beagle_double" false="-beagle_single" beagle_double_precision} \ + -beagle_scaling always \ + ~{'-beagle_order ' + beagle_order} \ + ~{beauti_xml} } output { @@ -143,10 +183,10 @@ task beast { gpu: true # dxWDL dx_timeout: "40H" # dxWDL dx_instance_type: "mem1_ssd1_gpu2_x8" # dxWDL - acceleratorType: select_first([accelerator_type, "nvidia-tesla-k80"]) # GCP PAPIv2 - acceleratorCount: select_first([accelerator_count, 4]) # GCP PAPIv2 - gpuType: select_first([gpu_type, "nvidia-tesla-k80"]) # Terra - gpuCount: select_first([gpu_count, 4]) # Terra + acceleratorType: select_first([accelerator_type, "nvidia-tesla-p4"]) # GCP PAPIv2 + acceleratorCount: select_first([accelerator_count, 1]) # GCP PAPIv2 + gpuType: select_first([gpu_type, "nvidia-tesla-p4"]) # Terra + gpuCount: select_first([gpu_count, 1]) # Terra nvidiaDriverVersion: "410.79" } }