diff --git a/.github/workflows/c-cpp.yml b/.github/workflows/c-cpp.yml index 88df687bd6..8eede28705 100644 --- a/.github/workflows/c-cpp.yml +++ b/.github/workflows/c-cpp.yml @@ -38,7 +38,7 @@ jobs: CPU_MAC: runs-on: macos-latest env: - FC: gfortran-11 + FC: gfortran-14 # see #971 strategy: matrix: folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum, epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg ] diff --git a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk index a052631aa9..78512a5eeb 100644 --- a/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk +++ b/epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %%/bin/nvcc,%%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %%/bin/hipcc,%%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_tt01g.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttgg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttgg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttggg.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gg_ttggg.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gq_ttq.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/gq_ttq.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/heft_gg_bb.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/heft_gg_bb.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/pp_tt012j.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/smeft_gg_tttt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/smeft_gg_tttt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_t1t1.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_t1t1.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_tt.mad/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand diff --git a/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk b/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk index 359f16c029..9cff5e1a60 100644 --- a/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk +++ b/epochX/cudacpp/susy_gg_tt.sa/SubProcesses/cudacpp.mk @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null)) # Set HIP_HOME from the path to hipcc, if it exists override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null)) -# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists -# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?) -ifneq ($(CUDA_HOME),) - USE_NVTX ?=-DUSE_NVTX - CUDA_INC = -I$(CUDA_HOME)/include/ +# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965) +ifeq ($(CUDA_HOME),) + # CUDA_HOME is empty (nvcc not found) + override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/),) + # CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist? + override CUDA_INC= else + CUDA_INC = -I$(CUDA_HOME)/include/ +endif +###$(info CUDA_INC=$(CUDA_INC)) + +# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965) +ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist override USE_NVTX= - override CUDA_INC= +else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),) + # $(CUDA_HOME)/include/ exists but NVTX headers do not exist? + override USE_NVTX= +else + # $(CUDA_HOME)/include/nvtx.h exists: use NVTX + # (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed) + override USE_NVTX=-DUSE_NVTX endif +###$(info USE_NVTX=$(USE_NVTX)) # NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024) # - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP. @@ -424,13 +440,18 @@ endif # (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...) ifeq ($(HASCURAND),) ifeq ($(GPUCC),) # CPU-only build - ifneq ($(CUDA_HOME),) + ifeq ($(CUDA_INC),) + # $(CUDA_HOME)/include/ does not exist (see #965) + override HASCURAND = hasNoCurand + else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),) + # $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965) + override HASCURAND = hasNoCurand + else # By default, assume that curand is installed if a CUDA installation exists override HASCURAND = hasCurand - else - override HASCURAND = hasNoCurand endif else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build + # By default, assume that curand is installed if a CUDA build is requested override HASCURAND = hasCurand else # non-Nvidia GPU build override HASCURAND = hasNoCurand