Skip to content

Commit

Permalink
Merge branch 'grid' (including also upstream/master merge with hel ma…
Browse files Browse the repository at this point in the history
…dgraph5#960, mac madgraph5#974, nvcc madgraph5#966) into cmsdy

Fix conflict in tlau/fromgridpacks/parseGridpackLogs.sh
(use the currenmt cmsdy version: git checkout b125b65 tlau/fromgridpacks/parseGridpackLogs.sh)
  • Loading branch information
valassi committed Aug 22, 2024
2 parents ea87504 + 289a973 commit 5223f79
Show file tree
Hide file tree
Showing 37 changed files with 1,436 additions and 282 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/c-cpp.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ jobs:
CPU_MAC:
runs-on: macos-latest
env:
FC: gfortran-11
FC: gfortran-14 # see #971
strategy:
matrix:
folder: [ epochX/cudacpp/ee_mumu.mad/SubProcesses/P1_epem_mupmum, epochX/cudacpp/gg_ttgg.mad/SubProcesses/P1_gg_ttxgg ]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def compile(self, *args, **opts):
misc.sprint('FPTYPE checked')
cudacpp_supported_backends = [ 'fortran', 'cuda', 'hip', 'cpp', 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z', 'cppauto' ]
if args and args[0][0] == 'madevent' and hasattr(self, 'run_card'):
if self.run_card['cudacpp_bldall'] == 'True': # pre-build all backends #945
if self.run_card['cudacpp_bldall'] == True: # pre-build all backends #945
logger.info("Pre-building madevent in madevent_interface.py with ALL matrix elements")
args[0][0] = 'bldall'
misc.compile(nb_core=self.options['nb_core'], *args, **opts)
Expand Down Expand Up @@ -104,11 +104,10 @@ def default_setup(self):
fct_mod=(self.reset_makeopts,(),{}), # AV: I assume this forces a 'make cleanavx' if FPTYPE changes?
allowed=['m','d','f']
)
self.add_param('cudacpp_bldall', 'False',
self.add_param('cudacpp_bldall', False,
include=False, # AV: no need to add this parameter to run_card.inc
hidden=False, # AV: add cudacpp_bldall to runcard template and keep 'hidden='False'
fct_mod=(self.reset_makeopts,(),{}), # AV: I assume this will raise an exception if cudacpp_bldall changes?
allowed=['False','True']
)
self['vector_size'] = 16 # already setup in default class (just change value)
self['aloha_flag'] = '--fast-math'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %%/bin/nvcc,%%,$(shell which nvcc 2>/dev/null))
# Set HIP_HOME from the path to hipcc, if it exists
override HIP_HOME = $(patsubst %%/bin/hipcc,%%,$(shell which hipcc 2>/dev/null))

# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
ifneq ($(CUDA_HOME),)
USE_NVTX ?=-DUSE_NVTX
CUDA_INC = -I$(CUDA_HOME)/include/
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
ifeq ($(CUDA_HOME),)
# CUDA_HOME is empty (nvcc not found)
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
override CUDA_INC=
else
CUDA_INC = -I$(CUDA_HOME)/include/
endif
###$(info CUDA_INC=$(CUDA_INC))

# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist
override USE_NVTX=
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
override USE_NVTX=
else
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
override USE_NVTX=-DUSE_NVTX
endif
###$(info USE_NVTX=$(USE_NVTX))

# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
Expand Down Expand Up @@ -424,13 +440,18 @@ endif
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
ifeq ($(HASCURAND),)
ifeq ($(GPUCC),) # CPU-only build
ifneq ($(CUDA_HOME),)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist (see #965)
override HASCURAND = hasNoCurand
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
override HASCURAND = hasNoCurand
else
# By default, assume that curand is installed if a CUDA installation exists
override HASCURAND = hasCurand
else
override HASCURAND = hasNoCurand
endif
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
# By default, assume that curand is installed if a CUDA build is requested
override HASCURAND = hasCurand
else # non-Nvidia GPU build
override HASCURAND = hasNoCurand
Expand Down
39 changes: 30 additions & 9 deletions epochX/cudacpp/ee_mumu.mad/SubProcesses/cudacpp.mk
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null))
# Set HIP_HOME from the path to hipcc, if it exists
override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null))

# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
ifneq ($(CUDA_HOME),)
USE_NVTX ?=-DUSE_NVTX
CUDA_INC = -I$(CUDA_HOME)/include/
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
ifeq ($(CUDA_HOME),)
# CUDA_HOME is empty (nvcc not found)
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
override CUDA_INC=
else
CUDA_INC = -I$(CUDA_HOME)/include/
endif
###$(info CUDA_INC=$(CUDA_INC))

# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist
override USE_NVTX=
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
override USE_NVTX=
else
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
override USE_NVTX=-DUSE_NVTX
endif
###$(info USE_NVTX=$(USE_NVTX))

# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
Expand Down Expand Up @@ -424,13 +440,18 @@ endif
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
ifeq ($(HASCURAND),)
ifeq ($(GPUCC),) # CPU-only build
ifneq ($(CUDA_HOME),)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist (see #965)
override HASCURAND = hasNoCurand
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
override HASCURAND = hasNoCurand
else
# By default, assume that curand is installed if a CUDA installation exists
override HASCURAND = hasCurand
else
override HASCURAND = hasNoCurand
endif
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
# By default, assume that curand is installed if a CUDA build is requested
override HASCURAND = hasCurand
else # non-Nvidia GPU build
override HASCURAND = hasNoCurand
Expand Down
39 changes: 30 additions & 9 deletions epochX/cudacpp/ee_mumu.sa/SubProcesses/cudacpp.mk
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null))
# Set HIP_HOME from the path to hipcc, if it exists
override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null))

# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
ifneq ($(CUDA_HOME),)
USE_NVTX ?=-DUSE_NVTX
CUDA_INC = -I$(CUDA_HOME)/include/
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
ifeq ($(CUDA_HOME),)
# CUDA_HOME is empty (nvcc not found)
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
override CUDA_INC=
else
CUDA_INC = -I$(CUDA_HOME)/include/
endif
###$(info CUDA_INC=$(CUDA_INC))

# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist
override USE_NVTX=
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
override USE_NVTX=
else
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
override USE_NVTX=-DUSE_NVTX
endif
###$(info USE_NVTX=$(USE_NVTX))

# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
Expand Down Expand Up @@ -424,13 +440,18 @@ endif
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
ifeq ($(HASCURAND),)
ifeq ($(GPUCC),) # CPU-only build
ifneq ($(CUDA_HOME),)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist (see #965)
override HASCURAND = hasNoCurand
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
override HASCURAND = hasNoCurand
else
# By default, assume that curand is installed if a CUDA installation exists
override HASCURAND = hasCurand
else
override HASCURAND = hasNoCurand
endif
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
# By default, assume that curand is installed if a CUDA build is requested
override HASCURAND = hasCurand
else # non-Nvidia GPU build
override HASCURAND = hasNoCurand
Expand Down
39 changes: 30 additions & 9 deletions epochX/cudacpp/gg_tt.mad/SubProcesses/cudacpp.mk
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null))
# Set HIP_HOME from the path to hipcc, if it exists
override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null))

# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
ifneq ($(CUDA_HOME),)
USE_NVTX ?=-DUSE_NVTX
CUDA_INC = -I$(CUDA_HOME)/include/
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
ifeq ($(CUDA_HOME),)
# CUDA_HOME is empty (nvcc not found)
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
override CUDA_INC=
else
CUDA_INC = -I$(CUDA_HOME)/include/
endif
###$(info CUDA_INC=$(CUDA_INC))

# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist
override USE_NVTX=
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
override USE_NVTX=
else
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
override USE_NVTX=-DUSE_NVTX
endif
###$(info USE_NVTX=$(USE_NVTX))

# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
Expand Down Expand Up @@ -424,13 +440,18 @@ endif
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
ifeq ($(HASCURAND),)
ifeq ($(GPUCC),) # CPU-only build
ifneq ($(CUDA_HOME),)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist (see #965)
override HASCURAND = hasNoCurand
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
override HASCURAND = hasNoCurand
else
# By default, assume that curand is installed if a CUDA installation exists
override HASCURAND = hasCurand
else
override HASCURAND = hasNoCurand
endif
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
# By default, assume that curand is installed if a CUDA build is requested
override HASCURAND = hasCurand
else # non-Nvidia GPU build
override HASCURAND = hasNoCurand
Expand Down
5 changes: 2 additions & 3 deletions epochX/cudacpp/gg_tt.mad/bin/internal/launch_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def compile(self, *args, **opts):
misc.sprint('FPTYPE checked')
cudacpp_supported_backends = [ 'fortran', 'cuda', 'hip', 'cpp', 'cppnone', 'cppsse4', 'cppavx2', 'cpp512y', 'cpp512z', 'cppauto' ]
if args and args[0][0] == 'madevent' and hasattr(self, 'run_card'):
if self.run_card['cudacpp_bldall'] == 'True': # pre-build all backends #945
if self.run_card['cudacpp_bldall'] == True: # pre-build all backends #945
logger.info("Pre-building madevent in madevent_interface.py with ALL matrix elements")
args[0][0] = 'bldall'
misc.compile(nb_core=self.options['nb_core'], *args, **opts)
Expand Down Expand Up @@ -104,11 +104,10 @@ def default_setup(self):
fct_mod=(self.reset_makeopts,(),{}), # AV: I assume this forces a 'make cleanavx' if FPTYPE changes?
allowed=['m','d','f']
)
self.add_param('cudacpp_bldall', 'False',
self.add_param('cudacpp_bldall', False,
include=False, # AV: no need to add this parameter to run_card.inc
hidden=False, # AV: add cudacpp_bldall to runcard template and keep 'hidden='False'
fct_mod=(self.reset_makeopts,(),{}), # AV: I assume this will raise an exception if cudacpp_bldall changes?
allowed=['False','True']
)
self['vector_size'] = 16 # already setup in default class (just change value)
self['aloha_flag'] = '--fast-math'
Expand Down
39 changes: 30 additions & 9 deletions epochX/cudacpp/gg_tt.sa/SubProcesses/cudacpp.mk
Original file line number Diff line number Diff line change
Expand Up @@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %/bin/nvcc,%,$(shell which nvcc 2>/dev/null))
# Set HIP_HOME from the path to hipcc, if it exists
override HIP_HOME = $(patsubst %/bin/hipcc,%,$(shell which hipcc 2>/dev/null))

# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
ifneq ($(CUDA_HOME),)
USE_NVTX ?=-DUSE_NVTX
CUDA_INC = -I$(CUDA_HOME)/include/
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
ifeq ($(CUDA_HOME),)
# CUDA_HOME is empty (nvcc not found)
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
override CUDA_INC=
else
CUDA_INC = -I$(CUDA_HOME)/include/
endif
###$(info CUDA_INC=$(CUDA_INC))

# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist
override USE_NVTX=
override CUDA_INC=
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
override USE_NVTX=
else
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
override USE_NVTX=-DUSE_NVTX
endif
###$(info USE_NVTX=$(USE_NVTX))

# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
Expand Down Expand Up @@ -424,13 +440,18 @@ endif
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
ifeq ($(HASCURAND),)
ifeq ($(GPUCC),) # CPU-only build
ifneq ($(CUDA_HOME),)
ifeq ($(CUDA_INC),)
# $(CUDA_HOME)/include/ does not exist (see #965)
override HASCURAND = hasNoCurand
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
override HASCURAND = hasNoCurand
else
# By default, assume that curand is installed if a CUDA installation exists
override HASCURAND = hasCurand
else
override HASCURAND = hasNoCurand
endif
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
# By default, assume that curand is installed if a CUDA build is requested
override HASCURAND = hasCurand
else # non-Nvidia GPU build
override HASCURAND = hasNoCurand
Expand Down
Loading

0 comments on commit 5223f79

Please sign in to comment.