# Invocation command line: # /home/zih/jurenz/SPECMPI-ICE8200EX/1.0_intel_mpt1.19/bin/runspec --config=ZIH-ICE-8200EX-intel-sgimpt.cfg --ranks=32 --reportable medium # output_root was not used for this run ############################################################################ teeout = yes output_format = all no_input_handler = null tune = base basepeak = 1 makeflags = -j 8 flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.20080611.xml # Test description license_num = 037 prepared_by = Matthias Jurenz hw_avail = Mar-2008 system_name000 = SGI Altix ICE 8200EX system_name001 = (Xeon Processor X5472 3GHz) system_vendor = SGI system_class = Homo sw_avail = Apr-2008 tester = Matthias Jurenz test_date = Jun-2008 test_sponsor = ZIH # Benchmark description sw_auto_parallel = No sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit notes_000 = srcalt's: notes_005 = 104.milc: calloc notes_010 = 113.GemsFDTD: maxprocandstop notes_015 = 127.wrf2: fixcalling notes_020 = 129.tera_tf: fixbuffer notes_025 = 130.socorro: second_underscore notes_030 = notes_035 = Environment: notes_040 = export MPI_REQUEST_MAX=65536 notes_045 = Determines the maximum number of nonblocking sends and notes_050 = receives that can simultaneously exist for any single MPI notes_055 = process. MPI generates an error message if this limit notes_060 = (or the default, if not set) is exceeded. Default: 16384 notes_065 = export MPI_TYPE_MAX=32768 notes_070 = Determines the maximum number of data types that can notes_075 = simultaneously exist for any single MPI process. notes_080 = MPI generates an error message if this limit (or the default, notes_085 = if not set) is exceeded. Default: 8192 notes_090 = export MPI_BUFS_PER_HOST=512 notes_095 = Determines the number of shared message buffers (16 KB each) notes_100 = that MPI is to allocate for each host. These buffers are used notes_105 = to send and receive long inter-host messages. notes_110 = Default: 32 pages (1 page = 16KB) notes_115 = export MPI_NUM_QUICKS=16 notes_120 = Controls the number of other ranks that a rank can receive from notes_125 = over InfiniBand using a short message fast path. This is 8 by notes_130 = default and can be any value between 0 and 32. notes_135 = notes_140 = ulimit -s unlimited notes_145 = Removes limits on the maximum size of the automatically- notes_150 = extended stack region of the current process and each notes_155 = process it creates. sw_preprocessors = None sw_c_compiler000 = Intel C sw_c_compiler001 = Applications Version 10.1 (Build 20080112) sw_cxx_compiler000 = Intel C++ sw_cxx_compiler001 = Applications Version 10.1 (Build 20080112) sw_f_compiler000 = Intel Fortran sw_f_compiler001 = Applications Version 10.1 (Build 20080112) sw_mpi_library = SGI Message Passing Toolkit (MPT) Version 1.19 sw_mpi_other = None sw_other = None # Head node description node_head_label = SGI Altix ICE 8200EX Head Node node_head_order = 1 node_head_count = 1 node_head_hw_vendor = SGI node_head_hw_model = Altix ICE 8200EX (Xeon Processor X5365 3GHz) node_head_purpose = head node_head_hw_cpu_name = Intel Xeon X5365 node_head_hw_ncores = 8 node_head_hw_ncpuorder = 1-2 chips node_head_hw_ncoresperchip = 4 node_head_hw_nchips = 2 node_head_hw_nthreadspercore = 1 node_head_hw_cpu_char = Quad Core, 3.0GHz, 1333MHz system bus node_head_hw_cpu_mhz = 3000 node_head_hw_pcache = 32 KB I + 32 KB D on chip per core node_head_hw_scache = 8 MB I+D on chip per chip, 4 MB shared / 2 cores node_head_hw_tcache = None node_head_hw_ocache = None node_head_hw_memory = 16 GB (8*2GB PC2-6400 CL5-5-5 FB-DIMMs) node_head_sw_os = SLES10 SP1 node_head_sw_state = Multi-user, run level 3 node_head_hw_disk = Seagate Cheetah 15K.5 147GB SAS (ST3146855SS) node_head_sw_localfile = XFS node_head_sw_sharedfile = NFS (RDMA InfiniBand, NAS Nexis2000) node_head_sw_other = SGI ProPack 5 SP4 node_head_hw_other = None # Head node - Adapter description node_head_hw_adapter_IB_interconnect = InfiniBand node_head_hw_adapter_IB_model000 = Mellanox MT25208 InfiniHost III Ex (rev 20) node_head_hw_adapter_IB_model001 = (PCIe x8 Gen1 2.5 GT/s) node_head_hw_adapter_IB_slot_type = PCIe x8 Gen1 node_head_hw_adapter_IB_data_rate = InfiniBand 4x DDR node_head_hw_adapter_IB_count = 1 node_head_hw_adapter_IB_ports_used = 1 node_head_hw_adapter_IB_driver = OFED 1.3.0 (mlx4_ib.ko 0.01) node_head_hw_adapter_IB_firmware = 5.2.0 # Compute node description node_compute_label = SGI Altix ICE 8200EX Compute Node node_compute_order = 2 node_compute_count = 4 node_compute_hw_vendor = SGI node_compute_hw_model = Altix ICE 8200EX (Xeon Processor X5472 3GHz) node_compute_purpose = compute node_compute_hw_cpu_name = Intel Xeon X5472 node_compute_hw_ncores = 8 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_ncoresperchip = 4 node_compute_hw_nchips = 2 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = Quad Core, 3.0GHz, 1600MHz system bus node_compute_hw_cpu_mhz = 3000 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 12 MB I+D on chip per chip, 6 MB shared / 2 cores node_compute_hw_tcache = None node_compute_hw_ocache = None node_compute_hw_memory = 16 GB (8*2GB PC2-6400 CL5-5-5 FB-DIMMs) node_compute_sw_os = SLES10 SP1 node_compute_sw_state = Multi-user, run level 3 node_compute_hw_disk = NFS node_compute_sw_localfile = NFS (v3) node_compute_sw_sharedfile = NFS (RDMA InfiniBand, NAS Nexis2000) node_compute_sw_other = SGI ProPack 5 SP4 node_compute_hw_other = None # Compute node - Adapter description node_compute_hw_adapter_IB_interconnect = InfiniBand node_compute_hw_adapter_IB_model000 = Mellanox MT26418 ConnectX IB DDR node_compute_hw_adapter_IB_model001 = (PCIe x8 Gen2 5 GT/s) node_compute_hw_adapter_IB_slot_type = PCIe x8 Gen2 node_compute_hw_adapter_IB_data_rate = InfiniBand 4x DDR node_compute_hw_adapter_IB_count = 1 node_compute_hw_adapter_IB_ports_used = 2 node_compute_hw_adapter_IB_driver = OFED 1.3.0 (mlx4_ib.ko 0.01) node_compute_hw_adapter_IB_firmware = 2.3.0 # Fileserver description node_fileserver_label = SGI InfiniteStorage NEXIS 2000 SAS node_fileserver_order = 3 node_fileserver_count = 1 node_fileserver_hw_vendor = SGI node_fileserver_hw_model = InfiniteStorage NEXIS 2000 SAS node_fileserver_purpose = fileserver node_fileserver_hw_cpu_name = Intel Xeon 5140 node_fileserver_hw_ncores = 4 node_fileserver_hw_ncpuorder = 1-2 chips node_fileserver_hw_ncoresperchip = 2 node_fileserver_hw_nchips = 2 node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_cpu_char = Dual Core, 2.33GHz, 1333MHz system bus node_fileserver_hw_cpu_mhz = 2333 node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_scache = 4 MB I+D on chip per chip, 4 MB shared / 2 cores node_fileserver_hw_tcache = None node_fileserver_hw_ocache = None node_fileserver_hw_memory = 32 GB node_fileserver_sw_os = SLES10 SP1 node_fileserver_sw_state = Multi-user, run level 3 node_fileserver_hw_disk000 = 72 disks, 280GB/disk, 10TB total, 6 Shelves node_fileserver_hw_disk001 = (striped RAID 5) node_fileserver_sw_localfile = XFS node_fileserver_sw_sharedfile = None node_fileserver_sw_other000 = SGI ProPack 5 SP2 node_fileserver_sw_other001 = SGI InfiniteStorage Appliance Manager 4 node_fileserver_sw_other002 = SGI XVM 4.2.2.1 node_fileserver_sw_other003 = XFS 64-bit journaled file system node_fileserver_hw_other = None # Fileserver - Adapter description node_fileserver_hw_adapter_IB_interconnect = InfiniBand node_fileserver_hw_adapter_IB_model000 = Mellanox MT25204 InfiniHost III Lx (rev 20) node_fileserver_hw_adapter_IB_model001 = (PCIe x8 Gen1 2.5 GT/s) node_fileserver_hw_adapter_IB_slot_type = PCIe x8 Gen1 node_fileserver_hw_adapter_IB_data_rate = InfiniBand 4x DDR node_fileserver_hw_adapter_IB_count = 1 node_fileserver_hw_adapter_IB_ports_used = 1 node_fileserver_hw_adapter_IB_driver = OFED 1.2.6 (ib_mthca.ko 0.08) node_fileserver_hw_adapter_IB_firmware = 1.2.0 # Interconnect description (MPI-Fabric) interconnect_MPI_label = InfiniBand (MPI) interconnect_MPI_order = 1 interconnect_MPI_hw_vendor = Mellanox Technologies interconnect_MPI_hw_model = MT26418 ConnectX interconnect_MPI_hw_switch_1_count = 8 interconnect_MPI_hw_switch_1_ports = 24 interconnect_MPI_hw_switch_1_data_rate = InfiniBand 4x DDR interconnect_MPI_hw_switch_1_model = Mellanox MT47396 InfiniScale III interconnect_MPI_hw_topo = Hypercube with express links interconnect_MPI_hw_switch_1_firmware = 1.3.0 interconnect_MPI_purpose = MPI traffic # Interconnect description (IO-Fabric) interconnect_IO_label = InfiniBand (I/O) interconnect_IO_order = 2 interconnect_IO_hw_vendor = Mellanox Technologies interconnect_IO_hw_model = MT26418 ConnectX interconnect_IO_hw_switch_1_count = 8 interconnect_IO_hw_switch_1_ports = 24 interconnect_IO_hw_switch_1_data_rate = InfiniBand 4x DDR interconnect_IO_hw_switch_1_model = Mellanox MT47396 InfiniScale III interconnect_IO_hw_switch_1_firmware = 1.3.0 interconnect_IO_hw_topo = Hypercube with express links interconnect_IO_purpose = I/O traffic CC = icc CXX = icpc FC = ifort F77 = ifort CPP = icc CPPFLAGS = -E EXTRA_LDFLAGS = env_vars = yes default=base=default=default: COPT = -O3 -xT -ipo -no-prec-div CXXOPT = -O3 -xT -ipo -ansi-alias -no-prec-div FOPT = -O3 -xT -ipo -no-prec-div default=peak=default=default: COPT = CXXOPT = FOPT = default=default=default=default: ENV_SPEC_HPG_PARALLEL = MPI CPORTABILITY = FPORTABILITY = COPTIMIZE = ${COPT} CXXOPTIMIZE = ${CXXOPT} FOPTIMIZE = ${FOPT} CPPFLAGS = FPPFLAGS = EXTRA_CFLAGS = -I/opt/sgi-mpt/1.19/include EXTRA_CXXFLAGS = -I/opt/sgi-mpt/1.19/include EXTRA_FFLAGS = -I/opt/sgi-mpt/1.19/include EXTRA_LDFLAGS = -L/opt/sgi-mpt/1.19/lib64 EXTRA_LIBS = -lmpi ENV_MPI_ENABLED = ENABLED MPIRUN = \$SPEC/spec_mpirun.sh submit = $MPIRUN $command 104.milc: srcalt = calloc 113.GemsFDTD: srcalt = maxprocandstop 121.pop2: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 127.wrf2: srcalt = fixcalling CPORTABILITY = -DSPEC_MPI_LINUX -DSPEC_MPI_CASE_FLAG 129.tera_tf: srcalt = fixbuffer 130.socorro: srcalt = second_underscore