# Invocation command line: # /shared/PMPI4SC2009/1.1/bin/runspec --config=PMPI-5.6.4-59151_DELLCluster_Intel-10.1.cfg --action=run --size mref --iterations=3 --reportable --define hosts= --define nmbr_nodes=8 --ranks=64 all # output_root was not used for this run ############################################################################ #include: common-PMPI-DELLCluster.inc # ----- Begin inclusion of 'common-PMPI-DELLCluster.inc' ############################################################################ makeflags = -j 4 env_vars = yes use_submit_for_speed = yes teeout = yes teerunout = yes MPI_HOME = /opt/scali #submit = mpimon -inherit_limits $command -- db04b[09-16] 8 submit = $[MPI_HOME]/bin/mpirun -inherit_limits -mstdin none -q -machinefile /shared/hostfile.txt -np $ranks $command #submit = $[MPI_HOME]/bin/mpirun -mstdin none -q -machinefile /home/hakon/SPEC/kit70/nodefile.txt -np 8 -npn 1 $command sw_base_ptrsize = 64-bit license_num = 021 # ---- End inclusion of '/shared/PMPI4SC2009/1.1/config/common-PMPI-DELLCluster.inc' ext = Intel-10.1 makeflags = -j 64 #include: Intel-10.1-PMPI.inc # ----- Begin inclusion of 'Intel-10.1-PMPI.inc' ############################################################################ #BOPTS= -O3 -ipo -no-prec-div -aaxT BOPTS= -O3 -ipo -no-prec-div -axS CC = mpicc -ccl icc FC = mpif77 -ccl ifort CXX = mpicc -ccl icpc COPTIMIZE = ${BOPTS} FOPTIMIZE = ${BOPTS} CXXOPTIMIZE = ${BOPTS} ##################################################################### # Portability ##################################################################### default=default=default=default: 121.pop2: CPORTABILITY = -DSPEC_MPI_CASE_FLAG 127.wrf2: CPORTABILITY = -DSPEC_MPI_LINUX -DSPEC_MPI_CASE_FLAG 129.tera_tf: sw_c_compiler = Intel C Compiler 10.1 for Linux (10.1.018) sw_cxx_compiler = Intel C++ Compiler 10.1 for Linux (10.1.018) sw_f_compiler = Intel Fortran Compiler 10.1 for Linux (10.1.018) # ---- End inclusion of '/shared/PMPI4SC2009/1.1/config/Intel-10.1-PMPI.inc' test_date = Oct-2008 sw_avail = Jul-2008 sw_mpi_library = Scali MPI Connect 5.6.4-59151 #include: DELLCluster-config.inc # ----- Begin inclusion of 'DELLCluster-config.inc' ############################################################################ system_name000= PowerEdge M605, system_name001 = Gigabit Ethernet, system_name002 = Platform MPI 5.6.4, system_name003 = Intel 10.1 compilers system_vendor = Dell Inc. sw_parallel_other = None node_PowerEdgeM605_hw_notes =BIOS Version 3.0.3, Date: 08/01/2008 node_PowerEdgeM605_hw_adapter_GBEthernet_slot_type = PCIe x8 node_PowerEdgeM605_hw_adapter_GBEthernet_ports_used = 1 node_PowerEdgeM605_hw_adapter_GBEthernet_interconnect = Gigabit Ethernet node_PowerEdgeM605_hw_adapter_GBEthernet_firmware = 4.4.1 node_PowerEdgeM605_hw_adapter_GBEthernet_driver = OS default (bnx2, v1.5.11) node_PowerEdgeM605_hw_adapter_GBEthernet_data_rate = 1 Gbps node_PowerEdgeM605_hw_adapter_GBEthernet_count = 1 interconnect_GBEthernet_purpose = MPI and file system traffic interconnect_GBEthernet_order = 1 interconnect_GBEthernet_label = Gigabit Ethernet interconnect_GBEthernet_hw_vendor = Dell Inc. interconnect_GBEthernet_hw_topo = Single Switch interconnect_GBEthernet_hw_switch_Dell5324_ports = 24 interconnect_GBEthernet_hw_switch_Dell5324_model = PowerConnect 5324 interconnect_GBEthernet_hw_switch_Dell5324_firmware = sw: v2.0.0.39, boot: v1.0.2.02, hw: 00.00.02 interconnect_GBEthernet_hw_switch_Dell5324_data_rate = 1 Gbps Ethernet interconnect_GBEthernet_hw_switch_Dell5324_count = 1 interconnect_GBEthernet_hw_model = 24 port Gigabit Ethernet switch system_class = Homogenous hw_avail = Jan-2008 ############################################################ # C O M P U T E N O D E S # ############################################################ node_PowerEdgeM605_label = PowerEdge M605 node_PowerEdgeM605_order =0 node_PowerEdgeM605_purpose = compute, head, fileserver node_PowerEdgeM605_count = 8 node_PowerEdgeM605_hw_cpu_char = Quad-Core AMD Opteron Processor 2350 (Barcelona) node_PowerEdgeM605_hw_cpu_mhz = 2000 node_PowerEdgeM605_hw_cpu_name = AMD Opteron CPU 2350 node_PowerEdgeM605_hw_disk000= 10K RPM Serial-Attach SCSI 3Gbps 2.5-in HotPlug node_PowerEdgeM605_hw_disk001 = Hard Drive node_PowerEdgeM605_hw_memory = 16 GB (4 x 4 GB DIMMs, 667 MHz) node_PowerEdgeM605_hw_model = M605 node_PowerEdgeM605_hw_nchips = 2 node_PowerEdgeM605_hw_ncores = 8 node_PowerEdgeM605_hw_ncoresperchip = 4 node_PowerEdgeM605_hw_ncpuorder = 1-2 chips node_PowerEdgeM605_hw_nthreadspercore = 1 node_PowerEdgeM605_hw_ocache = None node_PowerEdgeM605_hw_other = None node_PowerEdgeM605_hw_pcache = 64 KB I + 64 KB D on chip per core node_PowerEdgeM605_hw_scache = 512 KB I+D on chip per core node_PowerEdgeM605_hw_tcache = 2 MB I+D on chip per chip node_PowerEdgeM605_hw_vendor = Dell Inc. node_PowerEdgeM605_sw_state = multi-user node_PowerEdgeM605_sw_sharedfile = NFSv3 node_PowerEdgeM605_sw_localfile = ext3 node_PowerEdgeM605_sw_other = None # ---- End inclusion of '/shared/PMPI4SC2009/1.1/config/DELLCluster-config.inc' # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: sw_peak_ptrsize = Not Applicable sw_preprocessors = None sw_other = None test_sponsor = Platform Computing Inc. tester = Platform Computing Inc. flagsurl000 = http://www.spec.org/mpi2007/flags/MPI2007_flags.20081204.xml flagsurl001 = http://www.spec.org/mpi2007/flags/EM64T_Intel101_flags.20081204.xml node_PowerEdgeM605_hw_adapter_GBEthernet_model000 = Broadcom Corporation NetXtreme II BCM5708S node_PowerEdgeM605_hw_adapter_GBEthernet_model001 = Gigabit Ethernet (rev 12) sw_mpi_other000 = Platform Computing Inc has acquired Scali MPI sw_mpi_other001 = Connect, hence Platform MPI and Scali MPI Connect sw_mpi_other002 = are used synonymously. notes_adapter_000 =Broadcom NetXtreme II BCM5708S Coalesce Settings: notes_adapter_005 = notes_adapter_010 = The following coalesce settings were applied on all nodes: notes_adapter_015 = ethtool -C eth0 rx-usecs 7 rx-frames 2 rx-usecs-irq 14 rx-frames-irq 4 tx-usecs 20 tx-frames 5 tx -usecs-irq 40 tx-frames-irq 10 notes_adapter_020 = notes_adapter_025 = notes_nodes_000 =NFS server usage: notes_nodes_005 = notes_nodes_010 = The first node was used as NFS server. The other 7 nodes mounted the working directory from the first node. notes_nodes_015 = notes_nodes_020 = notes_GBEthernet_000 =Dell PowerConnect 5324 configuration details: notes_GBEthernet_005 = notes_GBEthernet_010 = Ports 9-16 were used and configured the following way: notes_GBEthernet_015 = Flow Link Back Mdix notes_GBEthernet_020 = Port Type Duplex Speed Neg ctrl State Pressure Mode notes_GBEthernet_025 = -------- ------------ ------ ----- -------- ---- ----------- -------- ------- notes_GBEthernet_030 = g9 1G-Copper Full 1000 Enabled On Up Disabled Off notes_GBEthernet_035 = notes_GBEthernet_040 = node_PowerEdgeM605_sw_os001 = RHEL 5.1 (x86_64) node_PowerEdgeM605_sw_os002 = Kernel 2.6.18-53.el5