# Invocation command line: # /home/scali/apps/mpi2007/bin/runspec --iterations=3 --reportable -c SMC-5.6.1_Intel-9.1.cfg --action=run --define hosts=/var/spool/PBS/aux/49630.cwxhost --define nmbr_nodes=16 --ranks=64 all # output_root was not used for this run ############################################################################ #include: common.inc # ----- Begin inclusion of 'common.inc' ############################################################################ makeflags = -j 4 env_vars = yes use_submit_for_speed = yes teeout = yes teerunout = yes MPI_HOME = /opt/scali submit = $[MPI_HOME]/bin/mpirun -mstdin none -q -machinefile %{hosts} -np $ranks -npn 4 $command sw_base_ptrsize = 64-bit sw_other = None sw_peak_ptrsize = Not Applicable test_sponsor = Scali, Inc tester = Scali, Inc sw_preprocessors = None sw_mpi_other = IB Gold VAPI node_LS1_hw_adapter_InfiniBand_interconnect = Infiniband node_LS1IO_sw_state = multi-user node_LS1IO_sw_sharedfile = GPFS node_LS1IO_sw_other = None node_LS1IO_sw_os = SLES9 SP3 node_LS1IO_sw_localfile = Not applicable node_LS1IO_purpose = file server node_LS1IO_order = 2 node_LS1IO_label = Linux Networx LS1 I/O Nodes node_LS1IO_hw_vendor = Linux Networx, Inc. node_LS1IO_hw_tcache = None node_LS1IO_hw_scache = 4 MB I+D on chip per chip node_LS1IO_hw_pcache = 32 KB I + 32 KB D on chip per core node_LS1IO_hw_other = None node_LS1IO_hw_ocache = None node_LS1IO_hw_nthreadspercore = 1 node_LS1IO_hw_ncpuorder = 1-2 chips node_LS1IO_hw_ncoresperchip = 2 node_LS1IO_hw_ncores = 4 node_LS1IO_hw_nchips = 2 node_LS1IO_hw_model = LS1 node_LS1IO_hw_memory = 4 GB (4 x 1GB DIMMs 667 MHz) node_LS1IO_hw_disk = 18 TB SAN interconnected by FC4 node_LS1IO_hw_cpu_name = Intel Xeon 5150 node_LS1IO_hw_cpu_mhz = 2660 node_LS1IO_hw_cpu_char = 1333 Mhz FSB node_LS1IO_hw_adapter_InfiniBand_slot_type = PCIe x8 node_LS1IO_hw_adapter_InfiniBand_ports_used = 1 node_LS1IO_hw_adapter_InfiniBand_interconnect = InfiniBand node_LS1IO_hw_adapter_InfiniBand_firmware = 5.2.0 node_LS1IO_hw_adapter_InfiniBand_driver = IBGD 1.8.2 node_LS1IO_hw_adapter_InfiniBand_data_rate = InfiniBand 4x DDR node_LS1IO_hw_adapter_InfiniBand_count = 1 node_LS1IO_count = 8 license_num = 021 sw_auto_parallel = No # ---- End inclusion of '/home/scali/apps/mpi2007/config/common.inc' ext = Intel-9.1 #include: Intel-9.1.inc # ----- Begin inclusion of 'Intel-9.1.inc' ############################################################################ BOPTS= -O3 -no-prec-div -ftz -fno-alias -xT CC = $(MPI_HOME)/bin/mpicc -ccl icc FC = $(MPI_HOME)/bin/mpif77 -ccl ifort CXX = $(MPI_HOME)/bin/mpicc -ccl icpc COPTIMIZE = ${BOPTS} FOPTIMIZE = ${BOPTS} CXXOPTIMIZE= ${BOPTS} ##################################################################### # Portability ##################################################################### default=default=default=default: 121.pop2: CPORTABILITY= -DSPEC_MPI_CASE_FLAG 127.wrf2: CPORTABILITY = -DSPEC_MPI_LINUX -DSPEC_MPI_CASE_FLAG srcalt = fixcalling 129.tera_tf: srcalt = fixbuffer sw_c_compiler = Intel C 9.1.045 sw_cxx_compiler = Intel C++ 9.1.045 sw_f_compiler = Intel Fortran 9.1.040 # ---- End inclusion of '/home/scali/apps/mpi2007/config/Intel-9.1.inc' test_date = Feb-2008 sw_avail = Feb-2008 sw_mpi_library = Scali MPI Connect 5.6.1-58818 #include: lnxi-ls1-config.inc # ----- Begin inclusion of 'lnxi-ls1-config.inc' ############################################################################ system_name000= LS-1, system_name001 = Scali MPI Connect 5.6.1, system_name002 = Intel 9.1 compilers system_vendor = Linux Networx system_class = Homogenous hw_avail = Sep-2007 ############################################################ # C O M P U T E N O D E S # ############################################################ node_LS1_label = Linux Networx LS-1 node_LS1_order = 1 node_LS1_purpose = compute node_LS1_count = 16 node_LS1_hw_adapter_InfiniBand_count = 1 node_LS1_hw_adapter_InfiniBand_data_rate = InfiniBand 4x DDR node_LS1_hw_adapter_InfiniBand_driver = IBGD 1.8.2 node_LS1_hw_adapter_InfiniBand_firmware = 5.1.4 node_LS1_hw_adapter_InfiniBand_model000= Mellanox MHGA28-XTC node_LS1_hw_adapter_InfiniBand_model001= PCI-Express DDR InfiniBand HCA node_LS1_hw_adapter_InfiniBand_ports_used = 1 node_LS1_hw_adapter_InfiniBand_slot_type = PCIe x8 node_LS1_hw_cpu_char = 1333 Mhz FSB node_LS1_hw_cpu_mhz = 3000 node_LS1_hw_cpu_name = Intel Xeon 5160 node_LS1_hw_disk = 250GB SAS hard drive node_LS1_hw_memory = 8 GB (8 x 1GB DIMMs 667 MHz) node_LS1_hw_model = LS-1 node_LS1_hw_nchips = 2 node_LS1_hw_ncores = 4 node_LS1_hw_ncoresperchip = 2 node_LS1_hw_ncpuorder = 1-2 chips node_LS1_hw_nthreadspercore = 1 node_LS1_hw_ocache = None node_LS1_hw_other = None node_LS1_hw_pcache = 32 KB I + 32 KB D on chip per core node_LS1_hw_scache = 4 MB I+D on chip per chip node_LS1_hw_tcache = None node_LS1_hw_vendor = Linux Networx, Inc. node_LS1_sw_os = SLES9 SP3 node_LS1_sw_state = multi-user node_LS1_sw_sharedfile = GPFS node_LS1_sw_localfile = Not applicable node_LS1_sw_other = None ############################################################ # F I L E S E R V E R # ############################################################ ############################################################ # I N T E R C O N N E C T # ############################################################ interconnect_InfiniBand_label = InfiniBand interconnect_InfiniBand_order = 0 interconnect_InfiniBand_hw_topo = Single switch (star) interconnect_InfiniBand_hw_switch_count_model = 9120 interconnect_InfiniBand_hw_switch_count_firmware = 4.1.1.1.11 interconnect_InfiniBand_hw_switch_count_data_rate = InfiniBand 4x SDR and InfiniBand 4x DDR interconnect_InfiniBand_hw_vendor = QLogic interconnect_InfiniBand_hw_model = QLogic Silverstorm 9120 Fabric Director interconnect_InfiniBand_purpose = MPI and filesystem traffic interconnect_InfiniBand_hw_switch_count_ports = 144 interconnect_InfiniBand_hw_switch_count_count = 1 # ---- End inclusion of '/home/scali/apps/mpi2007/config/lnxi-ls1-config.inc' # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/MPI2007_flags.20080611.xml flagsurl001 = http://www.spec.org/mpi2007/flags/MPI2007_flags.0.20080611.xml node_LS1IO_hw_adapter_InfiniBand_model000 = Mellanox MHGA28-XTC node_LS1IO_hw_adapter_InfiniBand_model001 = PCI-X DDR InfiniBand HCA nc000 = SPEC has determined that this result was not in compliance with the SPEC nc001 = MPI2007 run and reporting rules. Specifically, the result did not meet the nc002 = requirement for baseline optimization flags to not use assertion flags (the nc003 = flag -fno-alias is a violation of this rule). The result was found to be nc004 = performance neutral compared to runs without -fno-alias. Replacement nc005 = results could not be produced because of system access limitations. notes_000 =The following approved srcalts are used notes_005 = tera_tf - fixbuffer notes_010 = wrf2 - fixcalling