# Invocation command line: # /N/u/steige/Quarry/idp/mpi2007-1.1/bin/runspec --reportable -c reptblOMP -F http://mypage.iu.edu/~steige/Indiana_EM64T_Intel101_flags.xml --ranks=128 -define hosts=/N/u/steige/Quarry/idp/mpi2007-1.1/omp128 # output_root was not used for this run ############################################################################ action = validate teeout = yes output_format = all tune = base size = mref iterations = 3 runlist = medium makeflags = -j 8 license_num = 45 company_name = Indiana University machine_name = iDP hw_avail = sw_avail = sw_other = None prepared_by = Scott Teige CC = mpicc CXX = mpicxx FC = mpif90 F77 = mpif77 CPP = mpicc CPPFLAGS = -E env_vars = yes MPI_INC = MPI_LIB = MPI_BIN = MPI_RUN = /N/soft/linux-rhel4-x86_64/openmpi/1.3.1-idp/bin/mpirun MPI_FLAGS = -np $ranks -machinefile %{hosts} --mca btl_openib_flags 1 ########################################################################### # # MPI # ########################################################################### default=base=none=default: ENV_SPEC_HPG_PARALLEL = MPI CPORTABILITY = FPORTABILITY = COPTIMIZE = -O3 -xT -ipo -no-prec-div CXXOPTIMIZE = -O3 -xT -ipo -no-prec-div FOPTIMIZE = -O3 -xT -ipo -no-prec-div CPPFLAGS = -I. FPPFLAGS = -I. FCFLAGS = -I. EXTRA_CFLAGS = EXTRA_CXXFLAGS = EXTRA_FFLAGS = EXTRA_LDFLAGS = use_submit_for_speed = yes ENV_MPI_ENABLED = ENABLED MPIRUN = $MPI_RUN $MPI_FLAGS use_submit_for_speed = 1 submit = $MPIRUN $command 121.pop2: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 126.lammps=default=default=default: CXXPORTABILITY = -DMPICH_IGNORE_CXX_SEEK 127.wrf2: CPORTABILITY = -DSPEC_MPI_LINUX -DSPEC_MPI_CASE_FLAG # Notes test_sponsor = Indiana University tester = Scott Teige interconnect_ib_hw_topo = Single switch test_date = Mar-2009 hw_avail = Sep-2008 sw_avail = Jan-2009 prepared_by = Indiana University system_vendor = IBM system_name = iDP (Intel Xeon L5420, 2.50 GHz) node_compute_sw_other = lustre 1.6.7 kernel patches node_compute_hw_adapter_fs_firmware = 2.4-0 node_compute_hw_adapter_fs_driver = OS default (e1000, v7.3.20-k2-NAPI) interconnect_fs_order = 0 # # Computation node info # node_compute_label = iDP node node_compute_order = 1 node_compute_count = 16 node_compute_purpose = compute node_compute_hw_vendor = IBM node_compute_hw_model = System x iDataPlex dx340 node_compute_hw_cpu_name = Intel Xeon L5420 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_nchips = 2 node_compute_hw_ncores = 8 node_compute_hw_ncoresperchip = 4 node_compute_hw_nthreadspercore = 1 node_compute_hw_cpu_char = 1333 MHz FSB node_compute_hw_cpu_mhz = 2500 node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_scache = 12 MB I+D on chip per chip, 6 MB shared / 2 cores node_compute_hw_tcache = None node_compute_hw_ocache = None node_compute_hw_memory = 32 GB (FBDIMM 8x4-GB 667 MHz) node_compute_hw_other = None node_compute_hw_disk = Western Digital 160 GB SATA WD160YS-23SHBO node_compute_hw_adapter_fs_interconnect = Ethernet node_compute_hw_adapter_fs_model000 = Intel Corporation 80003ES2LAN Gigabit Ethernet node_compute_hw_adapter_fs_model001 = Controller (Copper) (rev 01) node_compute_hw_adapter_fs_count = 2 node_compute_hw_adapter_fs_ports_used = 1 node_compute_hw_adapter_fs_data_rate = Gigabit Ethernet node_compute_hw_adapter_ib_model000 = Mellanox Technologies MT26418 [ConnectX IB DDR, node_compute_hw_adapter_ib_model001 = PCIe 2.0 5GT/s] (rev a0) node_compute_hw_adapter_ib_count = 1 node_compute_hw_adapter_ib_slot_type = PCIe x8 Gen2 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x DDR node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_driver = OFED 1.3.1 node_compute_hw_adapter_ib_firmware = 2.5.0 node_compute_sw_os000 = RedHat EL v4.7 node_compute_sw_os001 = 2.6.9-67.0.22.EL_lustre.1.6.7custom node_compute_sw_localfile = Linux/ext3 node_compute_sw_state = Multi-User node_compute_sw_sharedfile = IBM N5500 NAS via NFSv3 interconnect_ib_label = IB Switch interconnect_ib_order = 1 interconnect_ib_purpose = MPI traffic interconnect_ib_hw_vendor = Cisco interconnect_ib_hw_model = Cisco SFS 7024D interconnect_ib_hw_switch_9080_model = Cisco SFS 7024D interconnect_ib_hw_switch_9080_count = 1 interconnect_ib_hw_switch_9080_ports = 288 interconnect_ib_hw_switch_9080_data_rate = InfiniBand 4x DDR interconnect_ib_hw_switch_9080_firmware = 4.1.1.1.11 interconnect_fs_label = Gigabit Ethernet interconnect_fs_purpose = Cluster File System interconnect_fs_hw_vendor = ProCurve Networking interconnect_fs_hw_model = HP ProCurve Switch 5406zl Intelligent Edge J8697A interconnect_fs_hw_switch_fs_model = HP ProCurve Switch 5406zl Intelligent Edge J8697A interconnect_fs_hw_switch_fs_count = 1 interconnect_fs_hw_switch_fs_ports = 144 interconnect_fs_hw_switch_fs_data_rate = 1Gbps Ethernet interconnect_fs_hw_switch_fs_firmware = -- interconnect_fs_hw_topo = Single switch system_class = Homogeneous max_ranks = 512 max_peak_ranks = 512 sw_c_compiler = Intel C++ Compiler 10.1 for Linux (10.1.013) sw_cxx_compiler = Intel C++ Compiler 10.1 for Linux (10.1.013) sw_f_compiler = Intel Fortran Compiler 10.1 for Linux (10.1.013) sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit sw_mpi_library = OpenMPI 1.3.1 sw_mpi_other = None sw_preprocessors = No # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: flagsurl000 = http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.20090520.00.xml