# Invocation command line: # /home/qlogic/huiyu/kit-58/bin/runspec --define hosts=/home/qlogic/huiyu/kit-58/hosts-256 --ranks=256 --extension=infinipath --reportable --action validate -I -c qlogic-core-pathscale-infinipath -i mref -n 3 -T base -o asc,cfg,csv,txt medium # output_root was not used for this run ############################################################################ ##################################################################### # This is a sample config file. It was tested with: # # Compiler name/version: QLogic PathScale compilers version 3.0 # Communications software: QLogic InfiniPath software stack # versions 2.0 and 2.1 # Operating system version: SLES 10 (x86-64) # Node Hardware: AMD Opteron and Intel Xeon x64 servers # Interconnect adapters: QLogic InfiniPath InfiniBand # (PCI Express and HTX) adapters # # If your platform uses different versions, different # hardware or operates in a different mode (for # example, 32- vs. 64-bit mode), there is the possibility # that this configuration file may not work as-is. # # Note that issues with compilation should be directed # to the compiler vendor. Information about SPEC techncial # support can be found in the techsupport document in the # Docs directory of your benchmark installation. # # Also note that this is a sample configuration. It # is expected to work for the environment in which # it was tested; it is not guaranteed that this is # the config file that will provide the best performance. # # Note that you might find a more recent config file for # your platform with the posted results at # www.spec.org/mpi2007 ##################################################################### # A runspec line something like the following will supply # the information needed in the submit command below: # runspec --config qlogic-linux-x86_64-pathscale-infinipath \ # --define hosts=$SPEC/hostfile --ranks=24 # # A flags file such as the following can be downloaded from # http://www.spec.org/mpi2007/flags/ and put in your top-level # $SPEC directory and then uncomment this line: flagsurl000=http://www.spec.org/mpi2007/flags/MPI2007_flags.20070717.xml ###################################################################### makeflags = -j 4 env_vars = yes use_submit_for_speed = yes ext = infinipath teeout = yes teerunout = yes mean_anyway = yes MPI_HOME = /usr CC = $(MPI_HOME)/bin/mpicc -cc=pathcc -march=core FC = $(MPI_HOME)/bin/mpif90 -f90=pathf90 -march=core CXX = $(MPI_HOME)/bin/mpicxx -CC=pathCC -march=core ACML_DIR = /net/files/tools/acml/x86_64/acml3.5.0/pathscale64/lib submit = /usr/local/Cluster-Apps/infinipath/2.0/if-mpi/bin/mpirun -m %{hosts} -np $ranks -disable-mpi-progress-check $command EXTRA_LDFLAGS = -IPA:max_jobs=4 ##################################################################### # Portability ##################################################################### default: EXTRA_CPORTABILITY= -DSPEC_MPI_LP64 115.fds4: CPORTABILITY = -DSPEC_MPI_LC_TRAILING_DOUBLE_UNDERSCORE 121.pop2: CPORTABILITY = -DSPEC_MPI_DOUBLE_UNDERSCORE 127.wrf2: CPORTABILITY = -DF2CSTYLE -DSPEC_MPI_DOUBLE_UNDERSCORE \ -DSPEC_MPI_LINUX 130.socorro: FPORTABILITY = -fno-second-underscore ################################################################# # Optimization flags and Notes ################################################################# default=base: COPTIMIZE = -Ofast FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -LANG:copyinout=off license_num = 0018 prepared_by = QLogic system_vendor = Dell, QLogic, ClusterVision, node_fileserver_sw_state = Multi-User node_fileserver_sw_sharedfile = NFS node_fileserver_sw_other = None node_fileserver_sw_localfile = Linux/ext3 node_fileserver_purpose = file server node_fileserver_order = 2 node_fileserver_label = Dell PowerVault MD1000 node_fileserver_hw_vendor = Dell node_fileserver_hw_tcache = None node_fileserver_hw_scache = 4 MB I+D on chip per chip node_fileserver_hw_pcache = 32 KB I + 32 KB D on chip per core node_fileserver_hw_other = None node_fileserver_hw_ocache = None node_fileserver_hw_nthreadspercore = 1 node_fileserver_hw_ncpuorder = 1-2 chip node_fileserver_hw_ncoresperchip = 2 node_fileserver_hw_ncores = 4 node_fileserver_hw_nchips = 2 node_fileserver_hw_model = Dell PowerEdge 1950 node_fileserver_hw_memory = 4 GB (4 x 1 GB PC2-5300F) node_fileserver_hw_cpu_name = Intel Xeon 5160 node_fileserver_hw_cpu_mhz = 3000 node_fileserver_hw_cpu_char = 1333 MHz system bus node_fileserver_hw_adapter_10g_slot_type = PCIe x8 MSI-X node_fileserver_hw_adapter_10g_ports_used = 1 node_fileserver_hw_adapter_10g_model = Chelsio T310 10GBASE-SR RNIC (rev 3) node_fileserver_hw_adapter_10g_interconnect = Ethernet node_fileserver_hw_adapter_10g_firmware = T 3.3.0 node_fileserver_hw_adapter_10g_driver = cxgb3 1.0.078 node_fileserver_hw_adapter_10g_data_rate = 10 Gbps Ethernet node_fileserver_hw_adapter_10g_count = 1 node_fileserver_hw_adapter = 10 Gigabit Ethernet node_fileserver_count = 1 node_compute_sw_state = Multi-User node_compute_sw_sharedfile = NFS node_compute_sw_other = Torque 2.1.2 node_compute_sw_localfile = Linux/ext3 node_compute_purpose = compute, head node_compute_order = 1 node_compute_label = Dell PowerEdge 1950 node_compute_hw_vendor = Dell node_compute_hw_tcache = None node_compute_hw_scache = 4 MB I+D on chip per chip node_compute_hw_pcache = 32 KB I + 32 KB D on chip per core node_compute_hw_other = None node_compute_hw_ocache = None node_compute_hw_nthreadspercore = 1 node_compute_hw_ncpuorder = 1-2 chips node_compute_hw_ncoresperchip = 2 node_compute_hw_ncores = 4 node_compute_hw_nchips = 2 node_compute_hw_model = Dell PowerEdge 1950 node_compute_hw_memory = 8 GB (8 x 1 GB PC2-5300F) node_compute_hw_disk = SAS, 73 GB, 15000 RPM node_compute_hw_cpu_name = Intel Xeon 5160 node_compute_hw_cpu_mhz = 3000 node_compute_hw_cpu_char = 1333 MHz system bus node_compute_hw_avail = Jun-2006 node_compute_hw_adapter_ib_slot_type = PCIe x8 node_compute_hw_adapter_ib_ports_used = 1 node_compute_hw_adapter_ib_model = QLogic InfiniPath QLE7140 node_compute_hw_adapter_ib_interconnect = InfiniBand node_compute_hw_adapter_ib_firmware = None node_compute_hw_adapter_ib_driver = InfiniPath 2.0 node_compute_hw_adapter_ib_data_rate = InfiniBand 4x SDR node_compute_hw_adapter_ib_count = 1 node_compute_count = 64 interconnect_ib_purpose = MPI traffic interconnect_ib_order = 1 interconnect_ib_label = QLogic InfiniBand HCAs and switches interconnect_ib_hw_vendor = QLogic interconnect_ib_hw_switch_9080_ports = 96 interconnect_ib_hw_switch_9080_firmware = 3.4.0.1.3 interconnect_ib_hw_switch_9080_data_rate = InfiniBand 4x SDR and InfiniBand 4x DDR interconnect_ib_hw_switch_9080_count = 1 interconnect_ib_hw_model = InfiniPath adapters and Silverstorm switches interconnect_fs_purpose = file system traffic interconnect_fs_order = 2 interconnect_fs_label = Ethernet Network for File Server Access interconnect_fs_hw_vendor = Chelsio, Nortel interconnect_fs_hw_switch_8610_ports = 24 interconnect_fs_hw_switch_8610_firmware = Optivity Switch Manager version 4.1 interconnect_fs_hw_switch_8610_data_rate = 10 Gbps Ethernet interconnect_fs_hw_switch_8610_count = 1 interconnect_fs_hw_switch_5530_ports = 26 interconnect_fs_hw_switch_5530_model = Nortel Ethernet Routing Switch 5530-24TFD interconnect_fs_hw_switch_5530_firmware = 4.2.0.12 interconnect_fs_hw_switch_5530_count = 2 interconnect_fs_hw_switch_551048_ports = 48 interconnect_fs_hw_switch_551048_model = Nortel Ethernet Routing Switch 5510-48T interconnect_fs_hw_switch_551048_firmware = 1.0.0.16 interconnect_fs_hw_switch_551048_data_rate = 1 Gbps Ethernet interconnect_fs_hw_switch_551048_count = 3 interconnect_fs_hw_switch_551024_ports = 24 interconnect_fs_hw_switch_551024_model = Nortel Ethernet Routing Switch 5510-24T interconnect_fs_hw_switch_551024_firmware = 1.0.0.16 interconnect_fs_hw_switch_551024_data_rate = 1 Gbps Ethernet interconnect_fs_hw_switch_551024_count = 1 sw_auto_parallel = No sw_avail = Feb-2007 CXXOPTIMIZE = -O3 -OPT:Ofast -CG:local_fwd_sched=on sw_other = None sw_base_ptrsize = 64-bit sw_peak_ptrsize = 64-bit test_date = May-2007 test_sponsor = QLogic Corporation tester = QLogic Performance Engineering sw_preprocessors = No sw_mpi_other = None sw_mpi_library = QLogic InfiniPath MPI 2.0 sw_f_compiler = QLogic PathScale Fortran Compiler 3.0 sw_cxx_compiler = QLogic PathScale C++ Compiler 3.0 sw_c_compiler = QLogic PathScale C Compiler 3.0 104.milc=peak: basepeak=true 107.leslie3d=peak: FOPTIMIZE = -O3 -OPT:Ofast -CG:cflow=0 -INLINE:=aggressive=on 113.GemsFDTD=peak: basepeak=true 115.fds4=peak: basepeak=true 121.pop2=peak: basepeak=true 122.tachyon=peak: COPTIMIZE = -Ofast -INLINE:=aggressive=on -OPT:unroll_size=256 126.lammps=peak: CXXOPTIMIZE = -Ofast -CG:local_fwd_sched=on 127.wrf2=peak: basepeak=true 129.tera_tf=peak: FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -OPT:unroll_size=256 130.socorro=peak=default=default: COPTIMIZE = -Ofast -OPT:malloc_alg=1 FOPTIMIZE = -O3 -OPT:Ofast -OPT:malloc_alg=1 -LANG:copyinout=off RM_SOURCES = specblas.F90 specbessel.c EXTRA_LIBS = -L$(ACML_DIR) -lacml 132.zeusmp2=peak: basepeak=true 137.lu=peak: basepeak=true # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: hw_avail = Jul-2006 interconnect_fs_hw_model000 = Chelsio T310 adapters and Nortel 5530 5510 8610 interconnect_fs_hw_model001 = switches interconnect_fs_hw_switch_5530_data_rate000 = 1 Gbps Ethernet (24 ports) and 10 Gbps Ethernet interconnect_fs_hw_switch_5530_data_rate001 = (2 ports) interconnect_fs_hw_switch_8610_model000 = Nortel Passport 8610 switch 4.1.0.0 interconnect_fs_hw_topo000 = Three CUs are connected with six Ethernet Routing interconnect_fs_hw_topo005 = switches 5530-24TFD, 5510-24T and 5510-48T as a interconnect_fs_hw_topo010 = ring. Each of two 5530-24TFD switches is connected interconnect_fs_hw_topo015 = to the Nortel Passport 8610 switch through two interconnect_fs_hw_topo020 = 10Gbit ports. See Slide 10 of interconnect_fs_hw_topo025 = http://www.spec.org/mpi2007/results/supportingdocs/NortelEthernetSwitchDiagram.pdf interconnect_fs_hw_topo030 = for a network diagram. interconnect_ib_hw_switch_9080_model000 = QLogic SilverStorm 9080 Fabric Director interconnect_ib_hw_switch_9080_model001 = (InfiniBand switch) interconnect_ib_hw_topo000 = Full Bisectional Bandwidth, Fat-Tree, Max 3 interconnect_ib_hw_topo001 = swith-chip hops. node_compute_sw_os000 = ClusterVisionOS 2.1 node_compute_sw_os001 = Based on Scientific Linux SL release 4.3 node_compute_sw_os002 = (Beryllium) node_fileserver_hw_disk000 = 13.5 TB: 3 x 15 x 300 GB, SAS, 10000 RPM node_fileserver_hw_disk001 = 3 Dell PowerVault MD1000 Disk Arrays, each one node_fileserver_hw_disk002 = has 15 disks. node_fileserver_sw_os000 = ClusterVisionOS 2.1 node_fileserver_sw_os001 = Based on Scientific Linux SL release 4.3 node_fileserver_sw_os002 = (Beryllium) system_class = Homogeneous system_name000 = U. of Cambridge HPC Cluster Darwin, system_name001 = QLogic InfiniBand Interconnect node_fileserver_notes_000 =A separate node handling login and resouces management node_fileserver_notes_005 =is not listed as it is not performance related. interconnect_ib_notes_000 =The 64 nodes used are from one CU (Computational Unit, 65 nodes) interconnect_ib_notes_005 =of the 9 CUs in the Darwin cluster. Jobs within one CU interconnect_ib_notes_010 =use one SilverStorm 9080 switch. interconnect_ib_notes_015 =The data rate between InifniPath HCAs and SilverStorm switches interconnect_ib_notes_020 =is SDR. However, DDR is used for inter-switch links.