Skip to content

Commit 2cc82f5

Browse files
committed
Merge remote-tracking branch 'upstream/master' (with hel madgraph5#960, mac madgraph5#974, nvcc madgraph5#966) into june24
Fix conflicts: epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/counters.cc epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/fbridge.cc epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f epochX/cudacpp/gg_tt.mad/SubProcesses/counters.cc epochX/cudacpp/gg_tt.mad/SubProcesses/fbridge.cc NB: here I essentially fixed gg_tt.mad, not CODEGEN, which will need to be adjusted a posteriori with a backport In particular: - Note1: patch.P1 is now taken from june24, but will need to be recomputed git checkout HEAD CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/MG5aMC_patches/PROD/patch.P1 - Note2: I need to manually port some upstream/master changes in auto_dsig1.f to smatrix_multi.f, which did not yet exist
2 parents e552190 + 3f69b26 commit 2cc82f5

File tree

17 files changed

+159
-103
lines changed

17 files changed

+159
-103
lines changed

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/Bridge.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,9 @@ namespace mg5amcCpu
109109
* @param rndcol the pointer to the input random numbers for color selection
110110
* @param channelIds the Feynman diagram to enhance in multi-channel mode if 1 to n
111111
* @param mes the pointer to the output matrix elements
112-
* @param goodHelOnly quit after computing good helicities?
113112
* @param selhel the pointer to the output selected helicities
114113
* @param selcol the pointer to the output selected colors
114+
* @param goodHelOnly quit after computing good helicities?
115115
*/
116116
void gpu_sequence( const FORTRANFPTYPE* momenta,
117117
const FORTRANFPTYPE* gs,

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/counters.cc

Lines changed: 20 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -21,26 +21,24 @@ extern "C"
2121
{
2222
// Now: fortran=-1, cudacpp=0
2323
// Eventually: fortran=-1, cuda=0, cpp/none=1, cpp/sse4=2, etc...
24-
constexpr unsigned int nimplC = 2;
24+
constexpr unsigned int nimplC = 3;
2525
constexpr unsigned int iimplF2C( int iimplF ) { return iimplF + 1; }
2626
const char* iimplC2TXT( int iimplC )
2727
{
2828
const int iimplF = iimplC - 1;
2929
switch( iimplF )
3030
{
31-
case -1: return "Fortran"; break;
32-
case +0: return "CudaCpp"; break;
31+
case -1: return "Fortran MEs"; break;
32+
case +0: return "CudaCpp MEs"; break;
33+
case +1: return "CudaCpp HEL"; break;
3334
default: assert( false ); break;
3435
}
3536
}
3637

3738
static mgOnGpu::Timer<TIMERTYPE> program_timer;
3839
static float program_totaltime = 0;
39-
static mgOnGpu::Timer<TIMERTYPE> smatrix1_timer;
40-
static float smatrix1_totaltime = 0;
4140
static mgOnGpu::Timer<TIMERTYPE> smatrix1multi_timer[nimplC];
4241
static float smatrix1multi_totaltime[nimplC] = { 0 };
43-
static int smatrix1_counter = 0;
4442
static int smatrix1multi_counter[nimplC] = { 0 };
4543

4644
void counters_initialise_()
@@ -49,19 +47,6 @@ extern "C"
4947
return;
5048
}
5149

52-
void counters_smatrix1_start_()
53-
{
54-
smatrix1_counter++;
55-
smatrix1_timer.Start();
56-
return;
57-
}
58-
59-
void counters_smatrix1_stop_()
60-
{
61-
smatrix1_totaltime += smatrix1_timer.GetDuration();
62-
return;
63-
}
64-
6550
void counters_smatrix1multi_start_( const int* iimplF, const int* pnevt )
6651
{
6752
const unsigned int iimplC = iimplF2C( *iimplF );
@@ -86,13 +71,23 @@ extern "C"
8671
printf( " [COUNTERS] PROGRAM TOTAL : %9.4fs\n", program_totaltime );
8772
printf( " [COUNTERS] Fortran Overhead ( 0 ) : %9.4fs\n", overhead_totaltime );
8873
for( unsigned int iimplC = 0; iimplC < nimplC; iimplC++ )
74+
{
8975
if( smatrix1multi_counter[iimplC] > 0 )
90-
printf( " [COUNTERS] %7s MEs ( %1d ) : %9.4fs for %8d events => throughput is %8.2E events/s\n",
91-
iimplC2TXT( iimplC ),
92-
iimplC + 1,
93-
smatrix1multi_totaltime[iimplC],
94-
smatrix1multi_counter[iimplC],
95-
smatrix1multi_counter[iimplC] / ( smatrix1multi_totaltime[iimplC] ) );
76+
{
77+
if( iimplC < nimplC - 1 ) // MEs
78+
printf( " [COUNTERS] %11s ( %1d ) : %9.4fs for %8d events => throughput is %8.2E events/s\n",
79+
iimplC2TXT( iimplC ),
80+
iimplC + 1,
81+
smatrix1multi_totaltime[iimplC],
82+
smatrix1multi_counter[iimplC],
83+
smatrix1multi_counter[iimplC] / smatrix1multi_totaltime[iimplC] );
84+
else
85+
printf( " [COUNTERS] %11s ( %1d ) : %9.4fs\n",
86+
iimplC2TXT( iimplC ),
87+
iimplC + 1,
88+
smatrix1multi_totaltime[iimplC] );
89+
}
90+
}
9691
return;
9792
}
9893
}

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/cudacpp.mk

Lines changed: 30 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -116,15 +116,31 @@ override CUDA_HOME = $(patsubst %%/bin/nvcc,%%,$(shell which nvcc 2>/dev/null))
116116
# Set HIP_HOME from the path to hipcc, if it exists
117117
override HIP_HOME = $(patsubst %%/bin/hipcc,%%,$(shell which hipcc 2>/dev/null))
118118

119-
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists
120-
# (FIXME? Is there any equivalent of NVTX FOR HIP? What should be configured if both CUDA and HIP are installed?)
121-
ifneq ($(CUDA_HOME),)
122-
USE_NVTX ?=-DUSE_NVTX
123-
CUDA_INC = -I$(CUDA_HOME)/include/
119+
# Configure CUDA_INC (for CURAND and NVTX) and NVTX if a CUDA installation exists (see #965)
120+
ifeq ($(CUDA_HOME),)
121+
# CUDA_HOME is empty (nvcc not found)
122+
override CUDA_INC=
123+
else ifeq ($(wildcard $(CUDA_HOME)/include/),)
124+
# CUDA_HOME is defined (nvcc was found) but $(CUDA_HOME)/include/ does not exist?
125+
override CUDA_INC=
124126
else
127+
CUDA_INC = -I$(CUDA_HOME)/include/
128+
endif
129+
###$(info CUDA_INC=$(CUDA_INC))
130+
131+
# Configure NVTX if a CUDA include directory exists and NVTX headers exist (see #965)
132+
ifeq ($(CUDA_INC),)
133+
# $(CUDA_HOME)/include/ does not exist
125134
override USE_NVTX=
126-
override CUDA_INC=
135+
else ifeq ($(wildcard $(CUDA_HOME)/include/nvtx3/nvToolsExt.h),)
136+
# $(CUDA_HOME)/include/ exists but NVTX headers do not exist?
137+
override USE_NVTX=
138+
else
139+
# $(CUDA_HOME)/include/nvtx.h exists: use NVTX
140+
# (NB: the option to disable NVTX if 'USE_NVTX=' is defined has been removed)
141+
override USE_NVTX=-DUSE_NVTX
127142
endif
143+
###$(info USE_NVTX=$(USE_NVTX))
128144

129145
# NB: NEW LOGIC FOR ENABLING AND DISABLING CUDA OR HIP BUILDS (AV Feb-Mar 2024)
130146
# - In the old implementation, by default the C++ targets for one specific AVX were always built together with either CUDA or HIP.
@@ -425,13 +441,18 @@ endif
425441
# (NB: allow HASCURAND=hasCurand even if $(GPUCC) does not point to nvcc: assume CUDA_HOME was defined correctly...)
426442
ifeq ($(HASCURAND),)
427443
ifeq ($(GPUCC),) # CPU-only build
428-
ifneq ($(CUDA_HOME),)
444+
ifeq ($(CUDA_INC),)
445+
# $(CUDA_HOME)/include/ does not exist (see #965)
446+
override HASCURAND = hasNoCurand
447+
else ifeq ($(wildcard $(CUDA_HOME)/include/curand.h),)
448+
# $(CUDA_HOME)/include/ exists but CURAND headers do not exist? (see #965)
449+
override HASCURAND = hasNoCurand
450+
else
429451
# By default, assume that curand is installed if a CUDA installation exists
430452
override HASCURAND = hasCurand
431-
else
432-
override HASCURAND = hasNoCurand
433453
endif
434454
else ifeq ($(findstring nvcc,$(GPUCC)),nvcc) # Nvidia GPU build
455+
# By default, assume that curand is installed if a CUDA build is requested
435456
override HASCURAND = hasCurand
436457
else # non-Nvidia GPU build
437458
override HASCURAND = hasNoCurand

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/fbridge.cc

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ extern "C"
8383
* @param mes the pointer to the output matrix elements
8484
* @param selhel the pointer to the output selected helicities
8585
* @param selcol the pointer to the output selected colors
86+
* @param goodHelOnly quit after computing good helicities?
8687
*/
8788
void fbridgesequence_( CppObjectInFortran** ppbridge,
8889
const FORTRANFPTYPE* momenta,
@@ -92,18 +93,20 @@ extern "C"
9293
const unsigned int* channelIds,
9394
FORTRANFPTYPE* mes,
9495
int* selhel,
95-
int* selcol )
96+
int* selcol,
97+
const bool* pgoodHelOnly )
9698
{
9799
Bridge<FORTRANFPTYPE>* pbridge = dynamic_cast<Bridge<FORTRANFPTYPE>*>( *ppbridge );
100+
//printf("fbridgesequence_ goodHelOnly=%d\n", ( *pgoodHelOnly ? 1 : 0 ) );
98101
if( pbridge == 0 ) throw std::runtime_error( "fbridgesequence_: invalid Bridge address" );
99102
#ifdef MGONGPUCPP_GPUIMPL
100103
// Use the device/GPU implementation in the CUDA library
101104
// (there is also a host implementation in this library)
102-
pbridge->gpu_sequence( momenta, gs, rndhel, rndcol, channelIds, mes, selhel, selcol );
105+
pbridge->gpu_sequence( momenta, gs, rndhel, rndcol, channelIds, mes, selhel, selcol, *pgoodHelOnly );
103106
#else
104107
// Use the host/CPU implementation in the C++ library
105108
// (there is no device implementation in this library)
106-
pbridge->cpu_sequence( momenta, gs, rndhel, rndcol, channelIds, mes, selhel, selcol );
109+
pbridge->cpu_sequence( momenta, gs, rndhel, rndcol, channelIds, mes, selhel, selcol, *pgoodHelOnly );
107110
#endif
108111
}
109112

@@ -119,6 +122,7 @@ extern "C"
119122
* @param mes the pointer to the output matrix elements
120123
* @param selhel the pointer to the output selected helicities
121124
* @param selcol the pointer to the output selected colors
125+
* @param goodHelOnly quit after computing good helicities?
122126
*/
123127
void fbridgesequence_nomultichannel_( CppObjectInFortran** ppbridge,
124128
const FORTRANFPTYPE* momenta,
@@ -127,9 +131,11 @@ extern "C"
127131
const FORTRANFPTYPE* rndcol,
128132
FORTRANFPTYPE* mes,
129133
int* selhel,
130-
int* selcol )
134+
int* selcol,
135+
const bool* pgoodHelOnly )
131136
{
132-
fbridgesequence_( ppbridge, momenta, gs, rndhel, rndcol, nullptr, mes, selhel, selcol );
137+
//printf("fbridgesequence_nomultichannel_ goodHelOnly=%d\n", ( *pgoodHelOnly ? 1 : 0 ) );
138+
fbridgesequence_( ppbridge, momenta, gs, rndhel, rndcol, nullptr, mes, selhel, selcol, pgoodHelOnly );
133139
}
134140

135141
/**

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/fbridge.inc

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,10 +40,11 @@ C - CHANID: the input array of channels (Feynman diagrams) to enhance
4040
C - MES: the output matrix element Fortran array
4141
C - SELHEL: the output selected helicity Fortran array
4242
C - SELCOL: the output selected color Fortran array
43+
C - HELONLY: input flag, quit after computing good helicities?
4344
C
4445
INTERFACE
4546
SUBROUTINE FBRIDGESEQUENCE(PBRIDGE, MOMENTA, GS,
46-
& RNDHEL, RNDCOL, CHANID, MES, SELHEL, SELCOL)
47+
& RNDHEL, RNDCOL, CHANID, MES, SELHEL, SELCOL, HELONLY)
4748
INTEGER*8 PBRIDGE
4849
DOUBLE PRECISION MOMENTA(*)
4950
DOUBLE PRECISION GS(*)
@@ -53,6 +54,7 @@ C
5354
DOUBLE PRECISION MES(*)
5455
INTEGER*4 SELHEL(*)
5556
INTEGER*4 SELCOL(*)
57+
LOGICAL HELONLY
5658
END SUBROUTINE FBRIDGESEQUENCE
5759
END INTERFACE
5860

@@ -66,10 +68,11 @@ C - RNDCOL: the input random number Fortran array for color selection
6668
C - MES: the output matrix element Fortran array
6769
C - SELHEL: the output selected helicity Fortran array
6870
C - SELCOL: the output selected color Fortran array
71+
C - HELONLY: input flag, quit after computing good helicities?
6972
C
7073
INTERFACE
7174
SUBROUTINE FBRIDGESEQUENCE_NOMULTICHANNEL(PBRIDGE, MOMENTA, GS,
72-
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL)
75+
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL, HELONLY)
7376
INTEGER*8 PBRIDGE
7477
DOUBLE PRECISION MOMENTA(*)
7578
DOUBLE PRECISION GS(*)
@@ -78,6 +81,7 @@ C
7881
DOUBLE PRECISION MES(*)
7982
INTEGER*4 SELHEL(*)
8083
INTEGER*4 SELCOL(*)
84+
LOGICAL HELONLY
8185
END SUBROUTINE FBRIDGESEQUENCE_NOMULTICHANNEL
8286
END INTERFACE
8387

epochX/cudacpp/CODEGEN/PLUGIN/CUDACPP_SA_OUTPUT/madgraph/iolibs/template_files/gpu/fcheck_sa.f

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ PROGRAM FCHECK_SA
6363
GS(IEVT) = 1.2177157847767195 ! fixed G for aS=0.118 (hardcoded for now in check_sa.cc, fcheck_sa.f, runTest.cc)
6464
END DO
6565
CALL FBRIDGESEQUENCE_NOMULTICHANNEL(BRIDGE, MOMENTA, GS, ! TEMPORARY? disable multi-channel in fcheck.exe and fgcheck.exe #466
66-
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL)
66+
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL, .FALSE.) ! do not quit after computing helicities
6767
DO IEVT = 1, NEVT
6868
c DO IEXTERNAL = 1, NEXTERNAL
6969
c WRITE(6,*) 'MOMENTA', IEVT, IEXTERNAL,

epochX/cudacpp/CODEGEN/generateAndCompare.sh

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ function codeGenAndDiff()
337337
| awk -vdate="D:20240301000000+01'00'" '{print gensub("(^/ModDate\\().*(\\)>>endobj$)","\\1"date"\\2","g")}' \
338338
| awk -vdate="D:20240301000000+01'00'" '{print gensub("(^/CreationDate\\().*(\\)$)","\\1"date"\\2","g")}' \
339339
| awk -vid="0123456789abcdef0123456789abcdef" '{print gensub("(^/ID \\[<).*><.*(>\\]$)","\\1"id"><"id"\\2","g")}' \
340+
| awk -vid="0123456789abcdef0123456789abcdef" '{print gensub("(^/ID \\[\\().*\\)\\(.*(\\)\\]$)","\\1"id")("id"\\2","g")}' \
340341
| awk -vdate="2024-03-01T00:00:00+01:00" '{print gensub("(<xmp:ModifyDate>).*(</xmp:ModifyDate>)","\\1"date"\\2","g")}' \
341342
| awk -vdate="2024-03-01T00:00:00+01:00" '{print gensub("(<xmp:CreateDate>).*(</xmp:CreateDate>)","\\1"date"\\2","g")}' \
342343
| awk -vuuid="'uuid=01234567-89ab-cdef-0123-456789abcdef'" '{print gensub("(xapMM:DocumentID=).*(/>$)","\\1"uuid"\\2","g")}' \

epochX/cudacpp/gg_tt.mad/SubProcesses/Bridge.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,9 @@ namespace mg5amcCpu
109109
* @param rndcol the pointer to the input random numbers for color selection
110110
* @param channelIds the Feynman diagram to enhance in multi-channel mode if 1 to n
111111
* @param mes the pointer to the output matrix elements
112-
* @param goodHelOnly quit after computing good helicities?
113112
* @param selhel the pointer to the output selected helicities
114113
* @param selcol the pointer to the output selected colors
114+
* @param goodHelOnly quit after computing good helicities?
115115
*/
116116
void gpu_sequence( const FORTRANFPTYPE* momenta,
117117
const FORTRANFPTYPE* gs,

epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/auto_dsig1.f

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNELS,
555555

556556
IF( FBRIDGE_MODE .LE. 0 ) THEN ! (FortranOnly=0 or BothQuiet=-1 or BothDebug=-2)
557557
#endif
558-
CALL COUNTERS_SMATRIX1MULTI_START( -1, VECSIZE_USED ) ! fortran=-1
558+
call counters_smatrix1multi_start( -1, VECSIZE_USED ) ! fortranMEs=-1
559559
DO IVEC=1, VECSIZE_USED
560560
CALL SMATRIX1(P_MULTI(0,1,IVEC),
561561
& hel_rand(IVEC),
@@ -571,7 +571,7 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNELS,
571571
C *START* Included from CUDACPP template smatrix_multi.f
572572
C (into function smatrix$i_multi in auto_dsig$i.f)
573573
C ======================================================
574-
CALL COUNTERS_SMATRIX1MULTI_STOP( -1 ) ! fortran=-1
574+
call counters_smatrix1multi_stop( -1 ) ! fortranMEs=-1
575575
#ifdef MG5AMC_MEEXPORTER_CUDACPP
576576
ENDIF
577577

@@ -581,9 +581,10 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNELS,
581581
STOP
582582
ENDIF
583583
IF ( FIRST ) THEN ! exclude first pass (helicity filtering) from timers (#461)
584+
call counters_smatrix1multi_start( 1, VECSIZE_USED ) ! cudacppHEL=1
584585
CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled for helicity filtering
585586
& P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2,
586-
& SELECTED_HEL2, SELECTED_COL2 )
587+
& SELECTED_HEL2, SELECTED_COL2, .TRUE.) ! quit after computing helicities
587588
FIRST = .FALSE.
588589
C ! This is a workaround for
589590
C https://github.com/oliviermattelaer/mg5amc_test/issues/22
@@ -599,22 +600,23 @@ SUBROUTINE SMATRIX1_MULTI(P_MULTI, HEL_RAND, COL_RAND, CHANNELS,
599600
ENDIF
600601
WRITE (6,*) 'NGOODHEL =', NGOODHEL
601602
WRITE (6,*) 'NCOMB =', NCOMB
603+
call counters_smatrix1multi_stop( 1 ) ! cudacppHEL=1
602604
ENDIF
603-
CALL COUNTERS_SMATRIX1MULTI_START( 0, VECSIZE_USED ) ! cudacpp=0
605+
call counters_smatrix1multi_start( 0, VECSIZE_USED ) ! cudacppMEs=0
604606
IF ( .NOT. MULTI_CHANNEL ) THEN
605607
CALL FBRIDGESEQUENCE_NOMULTICHANNEL( FBRIDGE_PBRIDGE, ! multi channel disabled
606608
& P_MULTI, ALL_G, HEL_RAND, COL_RAND, OUT2,
607-
& SELECTED_HEL2, SELECTED_COL2 )
609+
& SELECTED_HEL2, SELECTED_COL2, .FALSE.) ! do not quit after computing helicities
608610
ELSE
609611
IF( SDE_STRAT.NE.1 ) THEN
610612
WRITE(6,*) 'ERROR ! The cudacpp bridge requires SDE=1' ! multi channel single-diagram enhancement strategy
611613
STOP
612614
ENDIF
613-
CALL FBRIDGESEQUENCE(FBRIDGE_PBRIDGE, P_MULTI, ALL_G,
615+
CALL FBRIDGESEQUENCE(FBRIDGE_PBRIDGE, P_MULTI, ALL_G, ! multi channel enabled
614616
& HEL_RAND, COL_RAND, CHANNELS, OUT2,
615-
& SELECTED_HEL2, SELECTED_COL2 ) ! 1-N: multi channel enabled
617+
& SELECTED_HEL2, SELECTED_COL2, .FALSE.) ! do not quit after computing helicities
616618
ENDIF
617-
CALL COUNTERS_SMATRIX1MULTI_STOP( 0 ) ! cudacpp=0
619+
call counters_smatrix1multi_stop( 0 ) ! cudacppMEs=0
618620
ENDIF
619621

620622
IF( FBRIDGE_MODE .LT. 0 ) THEN ! (BothQuiet=-1 or BothDebug=-2)

epochX/cudacpp/gg_tt.mad/SubProcesses/P1_gg_ttx/fcheck_sa.f

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ PROGRAM FCHECK_SA
6363
GS(IEVT) = 1.2177157847767195 ! fixed G for aS=0.118 (hardcoded for now in check_sa.cc, fcheck_sa.f, runTest.cc)
6464
END DO
6565
CALL FBRIDGESEQUENCE_NOMULTICHANNEL(BRIDGE, MOMENTA, GS, ! TEMPORARY? disable multi-channel in fcheck.exe and fgcheck.exe #466
66-
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL)
66+
& RNDHEL, RNDCOL, MES, SELHEL, SELCOL, .FALSE.) ! do not quit after computing helicities
6767
DO IEVT = 1, NEVT
6868
c DO IEXTERNAL = 1, NEXTERNAL
6969
c WRITE(6,*) 'MOMENTA', IEVT, IEXTERNAL,

0 commit comments

Comments
 (0)