From c2a75badee6634d7dbac8365cb8d6c65ff34e4b6 Mon Sep 17 00:00:00 2001 From: Hans Yang Date: Mon, 2 Jul 2018 11:45:25 +0800 Subject: [PATCH] rk3308: remove unuse file Change-Id: I46617a8d7db95d37e8c2a0a424064a96f72ae99a Signed-off-by: Hans Yang --- rk3308/BoardConfig_voice_moudle_64bit.mk | 41 - rk3308/unixbench-master_32/.cproject | 245 - rk3308/unixbench-master_32/.project | 82 - rk3308/unixbench-master_32/.stamp_built | 0 rk3308/unixbench-master_32/.stamp_configured | 0 rk3308/unixbench-master_32/.stamp_rsynced | 0 .../.stamp_target_installed | 0 rk3308/unixbench-master_32/Makefile | 304 - rk3308/unixbench-master_32/README | 418 - rk3308/unixbench-master_32/RkLunch.sh | 2 - rk3308/unixbench-master_32/Run | 2000 ---- rk3308/unixbench-master_32/USAGE | 400 - rk3308/unixbench-master_32/WRITING_TESTS | 133 - rk3308/unixbench-master_32/pgms/arithoh | Bin 10284 -> 0 bytes rk3308/unixbench-master_32/pgms/context1 | Bin 10528 -> 0 bytes rk3308/unixbench-master_32/pgms/dhry2 | Bin 11012 -> 0 bytes rk3308/unixbench-master_32/pgms/dhry2reg | Bin 11004 -> 0 bytes rk3308/unixbench-master_32/pgms/double | Bin 10300 -> 0 bytes rk3308/unixbench-master_32/pgms/execl | Bin 15904 -> 0 bytes rk3308/unixbench-master_32/pgms/float | Bin 10300 -> 0 bytes rk3308/unixbench-master_32/pgms/fstime | Bin 15404 -> 0 bytes rk3308/unixbench-master_32/pgms/gfx-x11 | 476 - rk3308/unixbench-master_32/pgms/hanoi | Bin 10340 -> 0 bytes rk3308/unixbench-master_32/pgms/index.base | 46 - rk3308/unixbench-master_32/pgms/int | Bin 10284 -> 0 bytes rk3308/unixbench-master_32/pgms/long | Bin 10284 -> 0 bytes rk3308/unixbench-master_32/pgms/looper | Bin 10492 -> 0 bytes rk3308/unixbench-master_32/pgms/multi.sh | 23 - rk3308/unixbench-master_32/pgms/pipe | Bin 10388 -> 0 bytes rk3308/unixbench-master_32/pgms/register | Bin 10284 -> 0 bytes rk3308/unixbench-master_32/pgms/short | Bin 10284 -> 0 bytes rk3308/unixbench-master_32/pgms/spawn | Bin 10348 -> 0 bytes rk3308/unixbench-master_32/pgms/syscall | Bin 10624 -> 0 bytes rk3308/unixbench-master_32/pgms/tst.sh | 20 - .../unixbench-master_32/pgms/unixbench.logo | 14 - .../unixbench-master_32/pgms/whetstone-double | Bin 19300 -> 0 bytes rk3308/unixbench-master_32/src/arith.c | 110 - rk3308/unixbench-master_32/src/big.c | 592 - rk3308/unixbench-master_32/src/context1.c | 126 - rk3308/unixbench-master_32/src/dhry.h | 435 - rk3308/unixbench-master_32/src/dhry_1.c | 429 - rk3308/unixbench-master_32/src/dhry_2.c | 209 - rk3308/unixbench-master_32/src/dummy.c | 319 - rk3308/unixbench-master_32/src/execl.c | 97 - rk3308/unixbench-master_32/src/fstime.c | 466 - rk3308/unixbench-master_32/src/hanoi.c | 77 - rk3308/unixbench-master_32/src/looper.c | 105 - rk3308/unixbench-master_32/src/pipe.c | 68 - rk3308/unixbench-master_32/src/spawn.c | 80 - rk3308/unixbench-master_32/src/syscall.c | 109 - rk3308/unixbench-master_32/src/time-polling.c | 573 - rk3308/unixbench-master_32/src/timeit.c | 41 - rk3308/unixbench-master_32/src/ubgears.c | 650 - rk3308/unixbench-master_32/src/whets.c | 1289 -- rk3308/unixbench-master_32/testdir/cctest.c | 156 - rk3308/unixbench-master_32/testdir/dc.dat | 8 - rk3308/unixbench-master_32/testdir/large.txt | 10000 ---------------- rk3308/unixbench-master_32/testdir/sort.src | 362 - 58 files changed, 20505 deletions(-) delete mode 100755 rk3308/BoardConfig_voice_moudle_64bit.mk delete mode 100755 rk3308/unixbench-master_32/.cproject delete mode 100755 rk3308/unixbench-master_32/.project delete mode 100755 rk3308/unixbench-master_32/.stamp_built delete mode 100755 rk3308/unixbench-master_32/.stamp_configured delete mode 100755 rk3308/unixbench-master_32/.stamp_rsynced delete mode 100755 rk3308/unixbench-master_32/.stamp_target_installed delete mode 100755 rk3308/unixbench-master_32/Makefile delete mode 100755 rk3308/unixbench-master_32/README delete mode 100755 rk3308/unixbench-master_32/RkLunch.sh delete mode 100755 rk3308/unixbench-master_32/Run delete mode 100755 rk3308/unixbench-master_32/USAGE delete mode 100755 rk3308/unixbench-master_32/WRITING_TESTS delete mode 100755 rk3308/unixbench-master_32/pgms/arithoh delete mode 100755 rk3308/unixbench-master_32/pgms/context1 delete mode 100755 rk3308/unixbench-master_32/pgms/dhry2 delete mode 100755 rk3308/unixbench-master_32/pgms/dhry2reg delete mode 100755 rk3308/unixbench-master_32/pgms/double delete mode 100755 rk3308/unixbench-master_32/pgms/execl delete mode 100755 rk3308/unixbench-master_32/pgms/float delete mode 100755 rk3308/unixbench-master_32/pgms/fstime delete mode 100755 rk3308/unixbench-master_32/pgms/gfx-x11 delete mode 100755 rk3308/unixbench-master_32/pgms/hanoi delete mode 100755 rk3308/unixbench-master_32/pgms/index.base delete mode 100755 rk3308/unixbench-master_32/pgms/int delete mode 100755 rk3308/unixbench-master_32/pgms/long delete mode 100755 rk3308/unixbench-master_32/pgms/looper delete mode 100755 rk3308/unixbench-master_32/pgms/multi.sh delete mode 100755 rk3308/unixbench-master_32/pgms/pipe delete mode 100755 rk3308/unixbench-master_32/pgms/register delete mode 100755 rk3308/unixbench-master_32/pgms/short delete mode 100755 rk3308/unixbench-master_32/pgms/spawn delete mode 100755 rk3308/unixbench-master_32/pgms/syscall delete mode 100755 rk3308/unixbench-master_32/pgms/tst.sh delete mode 100755 rk3308/unixbench-master_32/pgms/unixbench.logo delete mode 100755 rk3308/unixbench-master_32/pgms/whetstone-double delete mode 100755 rk3308/unixbench-master_32/src/arith.c delete mode 100755 rk3308/unixbench-master_32/src/big.c delete mode 100755 rk3308/unixbench-master_32/src/context1.c delete mode 100755 rk3308/unixbench-master_32/src/dhry.h delete mode 100755 rk3308/unixbench-master_32/src/dhry_1.c delete mode 100755 rk3308/unixbench-master_32/src/dhry_2.c delete mode 100755 rk3308/unixbench-master_32/src/dummy.c delete mode 100755 rk3308/unixbench-master_32/src/execl.c delete mode 100755 rk3308/unixbench-master_32/src/fstime.c delete mode 100755 rk3308/unixbench-master_32/src/hanoi.c delete mode 100755 rk3308/unixbench-master_32/src/looper.c delete mode 100755 rk3308/unixbench-master_32/src/pipe.c delete mode 100755 rk3308/unixbench-master_32/src/spawn.c delete mode 100755 rk3308/unixbench-master_32/src/syscall.c delete mode 100755 rk3308/unixbench-master_32/src/time-polling.c delete mode 100755 rk3308/unixbench-master_32/src/timeit.c delete mode 100755 rk3308/unixbench-master_32/src/ubgears.c delete mode 100755 rk3308/unixbench-master_32/src/whets.c delete mode 100755 rk3308/unixbench-master_32/testdir/cctest.c delete mode 100755 rk3308/unixbench-master_32/testdir/dc.dat delete mode 100755 rk3308/unixbench-master_32/testdir/large.txt delete mode 100755 rk3308/unixbench-master_32/testdir/sort.src diff --git a/rk3308/BoardConfig_voice_moudle_64bit.mk b/rk3308/BoardConfig_voice_moudle_64bit.mk deleted file mode 100755 index d0a0d0d..0000000 --- a/rk3308/BoardConfig_voice_moudle_64bit.mk +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -#========================= -# Compile Config -#========================= -# Target arch -ARCH=arm64 -# Uboot defconfig -UBOOT_DEFCONFIG=evb-rk3308 -# Kernel defconfig -KERNEL_DEFCONFIG=rk3308_linux_defconfig -# Kernel dts -KERNEL_DTS=rk3308-voice-module-board-v10 -# Buildroot config -CFG_BUILDROOT=rockchip_rk3308_release -# Recovery config -CFG_RECOVERY=rockchip_rk3308_recovery -# Pcba config -CFG_PCBA=rockchip_rk3308_pcba -# Build jobs -JOBS=12 - -#========================= -# Platform Target -#========================= -TARGET_PRODUCT=rk3308 - -# Set rootfs type, see buildroot. -# ext4 squashfs -ROOTFS_TYPE=squashfs - -# Set data partition type. -# ext2 squashfs -OEM_PARTITION_TYPE=ext2 - -# Set flash type. -# support -FLASH_TYPE=nand - -#OEM config: /oem/dueros/aispeech/iflytekSDK/CaeDemo_VAD -OEM_PATH=oem diff --git a/rk3308/unixbench-master_32/.cproject b/rk3308/unixbench-master_32/.cproject deleted file mode 100755 index 90ad350..0000000 --- a/rk3308/unixbench-master_32/.cproject +++ /dev/null @@ -1,245 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/rk3308/unixbench-master_32/.project b/rk3308/unixbench-master_32/.project deleted file mode 100755 index 7945d2b..0000000 --- a/rk3308/unixbench-master_32/.project +++ /dev/null @@ -1,82 +0,0 @@ - - - UnixBench - - - - - - org.eclipse.cdt.managedbuilder.core.genmakebuilder - clean,full,incremental, - - - ?name? - - - - org.eclipse.cdt.make.core.append_environment - true - - - org.eclipse.cdt.make.core.autoBuildTarget - all - - - org.eclipse.cdt.make.core.buildArguments - - - - org.eclipse.cdt.make.core.buildCommand - make - - - org.eclipse.cdt.make.core.buildLocation - ${workspace_loc:/UnixBench/Debug} - - - org.eclipse.cdt.make.core.cleanBuildTarget - clean - - - org.eclipse.cdt.make.core.contents - org.eclipse.cdt.make.core.activeConfigSettings - - - org.eclipse.cdt.make.core.enableAutoBuild - false - - - org.eclipse.cdt.make.core.enableCleanBuild - true - - - org.eclipse.cdt.make.core.enableFullBuild - true - - - org.eclipse.cdt.make.core.fullBuildTarget - all - - - org.eclipse.cdt.make.core.stopOnError - true - - - org.eclipse.cdt.make.core.useDefaultBuildCmd - true - - - - - org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder - - - - - - org.eclipse.cdt.core.cnature - org.eclipse.cdt.core.ccnature - org.eclipse.cdt.managedbuilder.core.managedBuildNature - org.eclipse.cdt.managedbuilder.core.ScannerConfigNature - - diff --git a/rk3308/unixbench-master_32/.stamp_built b/rk3308/unixbench-master_32/.stamp_built deleted file mode 100755 index e69de29..0000000 diff --git a/rk3308/unixbench-master_32/.stamp_configured b/rk3308/unixbench-master_32/.stamp_configured deleted file mode 100755 index e69de29..0000000 diff --git a/rk3308/unixbench-master_32/.stamp_rsynced b/rk3308/unixbench-master_32/.stamp_rsynced deleted file mode 100755 index e69de29..0000000 diff --git a/rk3308/unixbench-master_32/.stamp_target_installed b/rk3308/unixbench-master_32/.stamp_target_installed deleted file mode 100755 index e69de29..0000000 diff --git a/rk3308/unixbench-master_32/Makefile b/rk3308/unixbench-master_32/Makefile deleted file mode 100755 index 4b1d5f8..0000000 --- a/rk3308/unixbench-master_32/Makefile +++ /dev/null @@ -1,304 +0,0 @@ -############################################################################## -# UnixBench v5.1.3 -# Based on The BYTE UNIX Benchmarks - Release 3 -# Module: Makefile SID: 3.9 5/15/91 19:30:15 -# -############################################################################## -# Bug reports, patches, comments, suggestions should be sent to: -# David C Niemi -# -# Original Contacts at Byte Magazine: -# Ben Smith or Tom Yager at BYTE Magazine -# bensmith@bytepb.byte.com tyager@bytepb.byte.com -# -############################################################################## -# Modification Log: 7/28/89 cleaned out workload files -# 4/17/90 added routines for installing from shar mess -# 7/23/90 added compile for dhrystone version 2.1 -# (this is not part of Run file. still use old) -# removed HZ from everything but dhry. -# HZ is read from the environment, if not -# there, you must define it in this file -# 10/30/90 moved new dhrystone into standard set -# new pgms (dhry included) run for a specified -# time rather than specified number of loops -# 4/5/91 cleaned out files not needed for -# release 3 -- added release 3 files -ben -# 10/22/97 added compiler options for strict ANSI C -# checking for gcc and DEC's cc on -# Digital Unix 4.x (kahn@zk3.dec.com) -# 09/26/07 changes for UnixBench 5.0 -# 09/30/07 adding ubgears, GRAPHIC_TESTS switch -# 10/14/07 adding large.txt -# 01/13/11 added support for parallel compilation -# 01/07/16 [refer to version control commit messages and -# cease using two-digit years in date formats] -############################################################################## - -############################################################################## -# CONFIGURATION -############################################################################## - -SHELL = /bin/sh - -# GRAPHIC TESTS: Uncomment the definition of "GRAPHIC_TESTS" to enable -# the building of the graphics benchmarks. This will require the -# X11 libraries on your system. (e.g. libX11-devel mesa-libGL-devel) -# -# Comment the line out to disable these tests. -# GRAPHIC_TESTS = defined - -# Set "GL_LIBS" to the libraries needed to link a GL program. -GL_LIBS = -lGL -lXext -lX11 - - -# COMPILER CONFIGURATION: Set "CC" to the name of the compiler to use -# to build the binary benchmarks. You should also set "$cCompiler" in the -# Run script to the name of the compiler you want to test. -CC=gcc - -# OPTIMISATION SETTINGS: -# Use gcc option if defined UB_GCC_OPTIONS via "Environment variable" or "Command-line arguments". -ifdef UB_GCC_OPTIONS - OPTON = $(UB_GCC_OPTIONS) - -else - ## Very generic - #OPTON = -O - - ## For Linux 486/Pentium, GCC 2.7.x and 2.8.x - #OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math \ - # -m486 -malign-loops=2 -malign-jumps=2 -malign-functions=2 - - ## For Linux, GCC previous to 2.7.0 - #OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math -m486 - - #OPTON = -O2 -fomit-frame-pointer -fforce-addr -fforce-mem -ffast-math \ - # -m386 -malign-loops=1 -malign-jumps=1 -malign-functions=1 - - ## For Solaris 2, or general-purpose GCC 2.7.x - #OPTON = -O2 -fomit-frame-pointer -fforce-addr -ffast-math -Wall - - ## For Digital Unix v4.x, with DEC cc v5.x - #OPTON = -O4 - #CFLAGS = -DTIME -std1 -verbose -w0 - - ## gcc optimization flags - ## (-ffast-math) disables strict IEEE or ISO rules/specifications for math funcs - OPTON = -O3 -ffast-math - - ## OS detection. Comment out if gmake syntax not supported by other 'make'. - OSNAME:=$(shell uname -s) - ARCH := $(shell uname -p) - ifeq ($(OSNAME),Linux) - # Not all CPU architectures support "-march" or "-march=native". - # - Supported : x86, x86_64, ARM, AARCH64, etc.. - # - Not Supported: RISC-V, IBM Power, etc... - ifneq ($(ARCH),$(filter $(ARCH),ppc64 ppc64le)) - OPTON += -march=native -mtune=native - else - OPTON += -mcpu=native -mtune=native - endif - endif - - ifeq ($(OSNAME),Darwin) - # (adjust flags or comment out this section for older versions of XCode or OS X) - # (-mmacosx-versin-min= requires at least that version of SDK be installed) - ifneq ($(ARCH),$(filter $(ARCH),ppc64 ppc64le)) - OPTON += -march=native -mmacosx-version-min=10.10 - else - OPTON += -mcpu=native - endif - #http://stackoverflow.com/questions/9840207/how-to-use-avx-pclmulqdq-on-mac-os-x-lion/19342603#19342603 - CFLAGS += -Wa,-q - endif - -endif - - -## generic gcc CFLAGS. -DTIME must be included. -CFLAGS += -Wall -pedantic $(OPTON) -I $(SRCDIR) -DTIME - - -############################################################################## -# END CONFIGURATION -############################################################################## - - -# local directories -PROGDIR = ./pgms -SRCDIR = ./src -TESTDIR = ./testdir -RESULTDIR = ./results -TMPDIR = ./tmp -# other directories -INCLDIR = /usr/include -LIBDIR = /lib -SCRIPTS = unixbench.logo multi.sh tst.sh index.base -SOURCES = arith.c big.c context1.c \ - dummy.c execl.c \ - fstime.c hanoi.c \ - pipe.c spawn.c \ - syscall.c looper.c timeit.c time-polling.c \ - dhry_1.c dhry_2.c dhry.h whets.c ubgears.c -TESTS = sort.src cctest.c dc.dat large.txt - -ifneq (,$(GRAPHIC_TESTS)) -GRAPHIC_BINS = $(PROGDIR)/ubgears -else -GRAPHIC_BINS = -endif - -# Program binaries. -BINS = $(PROGDIR)/arithoh $(PROGDIR)/register $(PROGDIR)/short \ - $(PROGDIR)/int $(PROGDIR)/long $(PROGDIR)/float $(PROGDIR)/double \ - $(PROGDIR)/hanoi $(PROGDIR)/syscall $(PROGDIR)/context1 \ - $(PROGDIR)/pipe $(PROGDIR)/spawn $(PROGDIR)/execl \ - $(PROGDIR)/dhry2 $(PROGDIR)/dhry2reg $(PROGDIR)/looper \ - $(PROGDIR)/fstime $(PROGDIR)/whetstone-double $(GRAPHIC_BINS) -## These compile only on some platforms... -# $(PROGDIR)/poll $(PROGDIR)/poll2 $(PROGDIR)/select - -# Required non-binary files. -REQD = $(BINS) $(PROGDIR)/unixbench.logo \ - $(PROGDIR)/multi.sh $(PROGDIR)/tst.sh $(PROGDIR)/index.base \ - $(PROGDIR)/gfx-x11 \ - $(TESTDIR)/sort.src $(TESTDIR)/cctest.c $(TESTDIR)/dc.dat \ - $(TESTDIR)/large.txt - -# ######################### the big ALL ############################ -all: -## Ick!!! What is this about??? How about let's not chmod everything bogusly. -# @chmod 744 * $(SRCDIR)/* $(PROGDIR)/* $(TESTDIR)/* $(DOCDIR)/* - $(MAKE) distr - $(MAKE) programs - -# ####################### a check for Run ###################### -check: $(REQD) - $(MAKE) all -# ############################################################## -# distribute the files out to subdirectories if they are in this one -distr: - @echo "Checking distribution of files" -# scripts - @if test ! -d $(PROGDIR) \ - ; then \ - mkdir $(PROGDIR) \ - ; mv $(SCRIPTS) $(PROGDIR) \ - ; else \ - echo "$(PROGDIR) exists" \ - ; fi -# C sources - @if test ! -d $(SRCDIR) \ - ; then \ - mkdir $(SRCDIR) \ - ; mv $(SOURCES) $(SRCDIR) \ - ; else \ - echo "$(SRCDIR) exists" \ - ; fi -# test data - @if test ! -d $(TESTDIR) \ - ; then \ - mkdir $(TESTDIR) \ - ; mv $(TESTS) $(TESTDIR) \ - ; else \ - echo "$(TESTDIR) exists" \ - ; fi -# temporary work directory - @if test ! -d $(TMPDIR) \ - ; then \ - mkdir $(TMPDIR) \ - ; else \ - echo "$(TMPDIR) exists" \ - ; fi -# directory for results - @if test ! -d $(RESULTDIR) \ - ; then \ - mkdir $(RESULTDIR) \ - ; else \ - echo "$(RESULTDIR) exists" \ - ; fi - -.PHONY: all check distr programs run clean spotless - -programs: $(BINS) - -# (use $< to link only the first dependency, instead of $^, -# since the programs matching this pattern have only -# one input file, and others are #include "xxx.c" -# within the first. (not condoning, just documenting)) -# (dependencies could be generated by modern compilers, -# but let's not assume modern compilers are present) -$(PROGDIR)/%: - $(CC) -o $@ $(CFLAGS) $< $(LDFLAGS) - -# Individual programs -# Sometimes the same source file is compiled in different ways. -# This limits the 'make' patterns that can usefully be applied. - -$(PROGDIR)/arithoh: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/arithoh: CFLAGS += -Darithoh -$(PROGDIR)/register: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/register: CFLAGS += -Ddatum='register int' -$(PROGDIR)/short: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/short: CFLAGS += -Ddatum=short -$(PROGDIR)/int: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/int: CFLAGS += -Ddatum=int -$(PROGDIR)/long: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/long: CFLAGS += -Ddatum=long -$(PROGDIR)/float: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/float: CFLAGS += -Ddatum=float -$(PROGDIR)/double: $(SRCDIR)/arith.c $(SRCDIR)/timeit.c -$(PROGDIR)/double: CFLAGS += -Ddatum=double - -$(PROGDIR)/poll: $(SRCDIR)/time-polling.c -$(PROGDIR)/poll: CFLAGS += -DUNIXBENCH -DHAS_POLL -$(PROGDIR)/poll2: $(SRCDIR)/time-polling.c -$(PROGDIR)/poll2: CFLAGS += -DUNIXBENCH -DHAS_POLL2 -$(PROGDIR)/select: $(SRCDIR)/time-polling.c -$(PROGDIR)/select: CFLAGS += -DUNIXBENCH -DHAS_SELECT - -$(PROGDIR)/whetstone-double: $(SRCDIR)/whets.c -$(PROGDIR)/whetstone-double: CFLAGS += -DDP -DGTODay -DUNIXBENCH -$(PROGDIR)/whetstone-double: LDFLAGS += -lm - -$(PROGDIR)/pipe: $(SRCDIR)/pipe.c $(SRCDIR)/timeit.c - -$(PROGDIR)/execl: $(SRCDIR)/execl.c $(SRCDIR)/big.c - -$(PROGDIR)/spawn: $(SRCDIR)/spawn.c $(SRCDIR)/timeit.c - -$(PROGDIR)/hanoi: $(SRCDIR)/hanoi.c $(SRCDIR)/timeit.c - -$(PROGDIR)/fstime: $(SRCDIR)/fstime.c - -$(PROGDIR)/syscall: $(SRCDIR)/syscall.c $(SRCDIR)/timeit.c - -$(PROGDIR)/context1: $(SRCDIR)/context1.c $(SRCDIR)/timeit.c - -$(PROGDIR)/looper: $(SRCDIR)/looper.c $(SRCDIR)/timeit.c - -$(PROGDIR)/ubgears: $(SRCDIR)/ubgears.c -$(PROGDIR)/ubgears: LDFLAGS += -lm $(GL_LIBS) - -$(PROGDIR)/dhry2: CFLAGS += -DHZ=${HZ} -$(PROGDIR)/dhry2: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c \ - $(SRCDIR)/dhry.h $(SRCDIR)/timeit.c - $(CC) -o $@ ${CFLAGS} $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c - -$(PROGDIR)/dhry2reg: CFLAGS += -DHZ=${HZ} -DREG=register -$(PROGDIR)/dhry2reg: $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c \ - $(SRCDIR)/dhry.h $(SRCDIR)/timeit.c - $(CC) -o $@ ${CFLAGS} $(SRCDIR)/dhry_1.c $(SRCDIR)/dhry_2.c - -# Run the benchmarks and create the reports -run: - sh ./Run - -clean: - $(RM) $(BINS) core *~ */*~ - -spotless: clean - $(RM) $(RESULTDIR)/* $(TMPDIR)/* - -## END ## diff --git a/rk3308/unixbench-master_32/README b/rk3308/unixbench-master_32/README deleted file mode 100755 index 67cad92..0000000 --- a/rk3308/unixbench-master_32/README +++ /dev/null @@ -1,418 +0,0 @@ -Version 5.1.3 -- 2011-01-13 - -================================================================ -To use Unixbench: - -1. UnixBench from version 5.1 on has both system and graphics tests. - If you want to use the graphic tests, edit the Makefile and make sure - that the line "GRAPHIC_TESTS = defined" is not commented out; then check - that the "GL_LIBS" definition is OK for your system. Also make sure - that the "x11perf" command is on your search path. - - If you don't want the graphics tests, then comment out the - "GRAPHIC_TESTS = defined" line. Note: comment it out, don't - set it to anything. - -2. Do "make". - -3. Do "Run" to run the system test; "Run graphics" to run the graphics - tests; "Run gindex" to run both. - -You will need perl, as Run is written in perl. - -For more information on using the tests, read "USAGE". - -For information on adding tests into the benchmark, see "WRITING_TESTS". - - -===================== RELEASE NOTES ===================================== - -======================== Jan 13 ========================== - -v5.1.3 - -Fixed issue that would cause a race condition if you attempted to compile in -parallel with more than 3 parallel jobs. - - -Kelly Lucas, Jan 13, 2011 -kdlucas at gmail period com - - -======================== Dec 07 ========================== - -v5.1.2 - -One big fix: if unixbench is installed in a directory whose pathname contains -a space, it should now run (previously it failed). - -To avoid possible clashes, the environment variables unixbench uses are now -prefixed with "UB_". These are all optional, and for most people will be -completely unnecessary, but if you want you can set these: - - UB_BINDIR Directory where the test programs live. - UB_TMPDIR Temp directory, for temp files. - UB_RESULTDIR Directory to put results in. - UB_TESTDIR Directory where the tests are executed. - -And a couple of tiny fixes: -* In pgms/tst.sh, changed "sort -n +1" to "sort -n -k 1" -* In Makefile, made it clearer that GRAPHIC_TESTS should be commented - out (not set to 0) to disable graphics -Thanks to nordi for pointing these out. - - -Ian Smith, December 26, 2007 -johantheghost at yahoo period com - - -======================== Oct 07 ========================== - -v5.1.1 - -It turns out that the setting of LANG is crucial to the results. This -explains why people in different regions were seeing odd results, and also -why runlevel 1 produced odd results -- runlevel 1 doesn't set LANG, and -hence reverts to ASCII, whereas most people use a UTF-8 encoding, which is -much slower in some tests (eg. shell tests). - -So now we manually set LANG to "en_US.utf8", which is configured with the -variable "$language". Don't change this if you want to share your results. -We also report the language settings in use. - -See "The Language Setting" in USAGE for more info. Thanks to nordi for -pointing out the LANG issue. - -I also added the "grep" and "sysexec" tests. These are non-index tests, -and "grep" uses the system's grep, so it's not much use for comparing -different systems. But some folks on the OpenSuSE list have been finding -these useful. They aren't in any of the main test groups; do "Run grep -sysexec" to run them. - -Index Changes -------------- - -The setting of LANG will affect consistency with systems where this is -not the default value. However, it should produce more consistent results -in future. - - -Ian Smith, October 15, 2007 -johantheghost at yahoo period com - - -======================== Oct 07 ========================== - -v5.1 - -The major new feature in this version is the addition of graphical -benchmarks. Since these may not compile on all systems, you can enable/ -disable them with the GRAPHIC_TESTS variable in the Makefile. - -As before, each test is run for 3 or 10 iterations. However, we now discard -the worst 1/3 of the scores before averaging the remainder. The logic is -that a glitch in the system (background process waking up, for example) may -make one or two runs go slow, so let's discard those. Hopefully this will -produce more consistent and repeatable results. Check the log file -for a test run to see the discarded scores. - -Made the tests compile and run on x86-64/Linux (fixed an execl bug passing -int instead of pointer). - -Also fixed some general bugs. - -Thanks to Stefan Esser for help and testing / bug reporting. - -Index Changes -------------- - -The tests are now divided into categories, and each category generates -its own index. This keeps the graphics test results separate from -the system tests. - -The "graphics" test and corresponding index are new. - -The "discard the worst scores" strategy should produce slightly higher -test scores, but at least they should (hopefully!) be more consistent. -The scores should not be higher than the best scores you would have got -with 5.0, so this should not be a huge consistency issue. - -Ian Smith, October 11, 2007 -johantheghost at yahoo period com - - -======================== Sep 07 ========================== - -v5.0 - -All the work I've done on this release is Linux-based, because that's -the only Unix I have access to. I've tried to make it more OS-agnostic -if anything; for example, it no longer has to figure out the format reported -by /usr/bin/time. However, it's possible that portability has been damaged. -If anyone wants to fix this, please feel free to mail me patches. - -In particular, the analysis of the system's CPUs is done via /proc/cpuinfo. -For systems which don't have this, please make appropriate changes in -getCpuInfo() and getSystemInfo(). - -The big change has been to make the tests multi-CPU aware. See the -"Multiple CPUs" section in "USAGE" for details. Other changes: - -* Completely rewrote Run in Perl; drastically simplified the way data is - processed. The confusing system of interlocking shell and awk scripts is - now just one script. Various intermediate files used to store and process - results are now replaced by Perl data structures internal to the script. - -* Removed from the index runs file system read and write tests which were - ignored for the index and wasted about 10 minutes per run (see fstime.c). - The read and write tests can now be selected individually. Made fstime.c - take parameters, so we no longer need to build 3 versions of it. - -* Made the output file names unique; they are built from - hostname-date-sequence. - -* Worked on result reporting, error handling, and logging. See TESTS. - We now generate both text and HTML reports. - -* Removed some obsolete files. - -Index Changes -------------- - -The index is still based on David Niemi's SPARCstation 20-61 (rated at 10.0), -and the intention in the changes I've made has been to keep the tests -unchanged, in order to maintain consistency with old result sets. - -However, the following changes have been made to the index: - -* The Pipe-based Context Switching test (context1) was being dropped - from the index report in v4.1.0 due to a bug; I've put it back in. - -* I've added shell1 to the index, to get a measure of how the shell tests - scale with multiple CPUs (shell8 already exercises all the CPUs, even - in single-copy mode). I made up the baseline score for this by - extrapolation. - -Both of these test can be dropped, if you wish, by editing the "TEST -SPECIFICATIONS" section of Run. - -Ian Smith, September 20, 2007 -johantheghost at yahoo period com - -======================== Aug 97 ========================== - -v4.1.0 - -Double precision Whetstone put in place instead of the old "double" benchmark. - -Removal of some obsolete files. - -"system" suite adds shell8. - -perlbench and poll added as "exhibition" (non-index) benchmarks. - -Incorporates several suggestions by Andre Derrick Balsa - -Code cleanups to reduce compiler warnings by David C Niemi -and Andy Kahn ; Digital Unix options by Andy Kahn. - -======================== Jun 97 ========================== - -v4.0.1 - -Minor change to fstime.c to fix overflow problems on fast machines. Counting -is now done in units of 256 (smallest BUFSIZE) and unsigned longs are used, -giving another 23 dB or so of headroom ;^) Results should be virtually -identical aside from very small rounding errors. - -======================== Dec 95 ========================== - -v4.0 - -Byte no longer seems to have anything to do with this benchmark, and I was -unable to reach any of the original authors; so I have taken it upon myself -to clean it up. - -This is version 4. Major assumptions made in these benchmarks have changed -since they were written, but they are nonetheless popular (particularly for -measuring hardware for Linux). Some changes made: - -- The biggest change is to put a lot more operating system-oriented - tests into the index. I experimented for a while with a decibel-like - logarithmic scale, but finally settled on using a geometric mean for - the final index (the individual scores are a normalized, and their - logs are averaged; the resulting value is exponentiated). - - "George", certain SPARCstation 20-61 with 128 MB RAM, a SPARC Storage - Array, and Solaris 2.3 is my new baseline; it is rated at 10.0 in each - of the index scores for a final score of 10.0. - - Overall I find the geometric averaging is a big improvement for - avoiding the skew that was once possible (e.g. a Pentium-75 which got - 40 on the buggy version of fstime, such that fstime accounted for over - half of its total score and hence wildly skewed its average). - - I also expect that the new numbers look different enough from the old - ones that no one is too likely to casually mistake them for each other. - - I am finding new SPARCs running Solaris 2.4 getting about 15-20, and - my 486 DX2-66 Compaq running Linux 1.3.45 got a 9.1. It got - understandably poor scores on CPU and FPU benchmarks (a horrible - 1.8 on "double" and 1.3 on "fsdisk"); but made up for it by averaging - over 20 on the OS-oriented benchmarks. The Pentium-75 running - Linux gets about 20 (and it *still* runs Windows 3.1 slowly. Oh well). - -- It is difficult to get a modern compiler to even consider making - dhry2 without registers, short of turning off *all* optimizations. - This is also not a terribly meaningful test, even if it were possible, - as noone compiles without registers nowadays. Replaced this benchmark - with dhry2reg in the index, and dropped it out of usage in general as - it is so hard to make a legitimate one. - -- fstime: this had some bugs when compiled on modern systems which return - the number of bytes read/written for read(2)/write(2) calls. The code - assumed that a negative return code was given for EOF, but most modern - systems return 0 (certainly on SunOS 4, Solaris2, and Linux, which is - what counts for me). The old code yielded wildly inflated read scores, - would eat up tens of MB of disk space on fast systems, and yielded - roughly 50% lower than normal copy scores than it should have. - - Also, it counted partial blocks *fully*; made it count the proportional - part of the block which was actually finished. - - Made bigger and smaller variants of fstime which are designed to beat - up the disk I/O and the buffer cache, respectively. Adjusted the - sleeps so that they are short for short benchmarks. - -- Instead of 1,2,4, and 8-shell benchmarks, went to 1, 8, and 16 to - give a broader range of information (and to run 1 fewer test). - The only real problem with this is that not many iterations get - done with 16 at a time on slow systems, so there are some significant - rounding errors; 8 therefore still used for the benchmark. There is - also the problem that the last (uncompleted) loop is counted as a full - loop, so it is impossible to score below 1.0 lpm (which gave my laptop - a break). Probably redesigning Shell to do each loop a bit more - quickly (but with less intensity) would be a good idea. - - This benchmark appears to be very heavily influenced by the speed - of the loader, by which shell is being used as /bin/sh, and by how - well-compiled some of the common shell utilities like grep, sed, and - sort are. With a consistent tool set it is also a good indicator of - the bandwidth between main memory and the CPU (e.g. Pentia score about - twice as high as 486es due to their 64-bit bus). Small, sometimes - broken shells like "ash-linux" do particularly well here, while big, - robust shells like bash do not. - -- "dc" is a somewhat iffy benchmark, because there are two versions of - it floating around, one being small, very fast, and buggy, and one - being more correct but slow. It was never in the index anyway. - -- Execl is a somewhat troubling benchmark in that it yields much higher - scores if compiled statically. I frown on this practice because it - distorts the scores away from reflecting how programs are really used - (i.e. dynamically linked). - -- Arithoh is really more an indicator of the compiler quality than of - the computer itself. For example, GCC 2.7.x with -O2 and a few extra - options optimizes much of it away, resulting in about a 1200% boost - to the score. Clearly not a good one for the index. - -I am still a bit unhappy with the variance in some of the benchmarks, most -notably the fstime suite; and with how long it takes to run. But I think -it gets significantly more reliable results than the older version in less -time. - -If anyone has ideas on how to make these benchmarks faster, lower-variance, -or more meaningful; or has nice, new, portable benchmarks to add, don't -hesitate to e-mail me. - -David C Niemi 7 Dec 1995 - -======================== May 91 ========================== -This is version 3. This set of programs should be able to determine if -your system is BSD or SysV. (It uses the output format of time (1) -to see. If you have any problems, contact me (by email, -preferably): ben@bytepb.byte.com - ---- - -The document doc/bench.doc describes the basic flow of the -benchmark system. The document doc/bench3.doc describes the major -changes in design of this version. As a user of the benchmarks, -you should understand some of the methods that have been -implemented to generate loop counts: - -Tests that are compiled C code: - The function wake_me(second, func) is included (from the file -timeit.c). This function uses signal and alarm to set a countdown -for the time request by the benchmark administration script -(Run). As soon as the clock is started, the test is run with a -counter keeping track of the number of loops that the test makes. -When alarm sends its signal, the loop counter value is sent to stderr -and the program terminates. Since the time resolution, signal -trapping and other factors don't insure that the test is for the -precise time that was requested, the test program is also run -from the time (1) command. The real time value returned from time -(1) is what is used in calculating the number of loops per second -(or minute, depending on the test). As is obvious, there is some -overhead time that is not taken into account, therefore the -number of loops per second is not absolute. The overhead of the -test starting and stopping and the signal and alarm calls is -common to the overhead of real applications. If a program loads -quickly, the number of loops per second increases; a phenomenon -that favors systems that can load programs quickly. (Setting the -sticky bit of the test programs is not considered fair play.) - -Test that use existing UNIX programs or shell scripts: - The concept is the same as that of compiled tests, except the -alarm and signal are contained in separate compiled program, -looper (source is looper.c). Looper uses an execvp to invoke the -test with its arguments. Here, the overhead includes the -invocation and execution of looper. - --- - -The index numbers are generated from a baseline file that is in -pgms/index.base. You can put tests that you wish in this file. -All you need to do is take the results/log file from your -baseline machine, edit out the comment and blank lines, and sort -the result (vi/ex command: 1,$!sort). The sort in necessary -because the process of generating the index report uses join (1). -You can regenerate the reports by running "make report." - --- - -========================= Jan 90 ============================= -Tom Yager has joined the effort here at BYTE; he is responsible -for many refinements in the UNIX benchmarks. - -The memory access tests have been deleted from the benchmarks. -The file access tests have been reversed so that the test is run -for a fixed time. The amount of data transfered (written, read, -and copied) is the variable. !WARNING! This test can eat up a -large hunk of disk space. - -The initial line of all shell scripts has been changed from the -SCO and XENIX form (:) to the more standard form "#! /bin/sh". -But different systems handle shell switching differently. Check -the documentation on your system and find out how you are -supposed to do it. Or, simpler yet, just run the benchmarks from -the Bourne shell. (You may need to set SHELL=/bin/sh as well.) - -The options to Run have not been checked in a while. They may no -longer function. Next time, I'll get back on them. There needs to -be another option added (next time) that halts testing between -each test. !WARNING! Some systems have caches that are not getting flushed -before the next test or iteration is run. This can cause -erroneous values. - -========================= Sept 89 ============================= -The database (db) programs now have a tuneable message queue space. -queue space. The default set in the Run script is 1024 bytes. -Other major changes are in the format of the times. We now show -Arithmetic and Geometric mean and standard deviation for User -Time, System Time, and Real Time. Generally, in reporting, we -plan on using the Real Time values with the benchs run with one -active user (the bench user). Comments and arguments are requested. - -contact: BIX bensmith or rick_g diff --git a/rk3308/unixbench-master_32/RkLunch.sh b/rk3308/unixbench-master_32/RkLunch.sh deleted file mode 100755 index 581fd04..0000000 --- a/rk3308/unixbench-master_32/RkLunch.sh +++ /dev/null @@ -1,2 +0,0 @@ -wpa_supplicant -B -i wlan0 -c /data/cfg/wpa_supplicant.conf -#./wakeWordAgent -e gpio & diff --git a/rk3308/unixbench-master_32/Run b/rk3308/unixbench-master_32/Run deleted file mode 100755 index 0909ded..0000000 --- a/rk3308/unixbench-master_32/Run +++ /dev/null @@ -1,2000 +0,0 @@ -#!/usr/bin/perl -w - -use strict; - -use POSIX qw(strftime); -use Time::HiRes; -use IO::Handle; -use File::Path; -use FindBin; - - -############################################################################ -# UnixBench - Release 5.1.3, based on: -# The BYTE UNIX Benchmarks - Release 3 -# Module: Run SID: 3.11 5/15/91 19:30:14 -# Original Byte benchmarks written by: -# Ben Smith, Tom Yager at BYTE Magazine -# ben@bytepb.byte.com tyager@bytepb.byte.com -# BIX: bensmith tyager -# -####################################################################### -# General Purpose Benchmark -# based on the work by Ken McDonell, Computer Science, Monash University -# -# You will need ... -# perl Time::HiRes IO::Handlecat cc chmod comm cp date dc df echo -# kill ls make mkdir rm sed test time touch tty umask who -############################################################################### -# Modification Log: -# $Header: run,v 5.2 88/01/12 06:23:43 kenj Exp $ -# Ken McDonell, Computer Science, Monash University -# August 1, 1983 -# 3/89 - Ben Smith - BYTE: globalized many variables, modernized syntax -# 5/89 - commented and modernized. Removed workload items till they -# have been modernized. Added database server test. -# 11/14/89 - Made modifications to reflect new version of fstime -# and elimination of mem tests. -# 10/22/90 - Many tests have been flipped so that they run for -# a specified length of time and loops are counted. -# 4/3/91 - Cleaned up and debugged several test parameters - Ben -# 4/9/91 - Added structure for creating index and determing flavor of UNIX -# 4/26/91 - Made changes and corrections suggested by Tin Le of Sony -# 5/15/91 - Removed db from distribution -# 4/4/92 Jon Tombs fixed for GNU time to look like -# BSD (don't know the format of sysV!) -# 12/95 - Massive changes for portability, speed, and more meaningful index -# DCN David C Niemi -# 1997.06.20 DCN Fixed overflow condition in fstime.c on fast machines -# 1997.08.24 DCN Modified "system", replaced double with -# whetstone-double in "index" -# 1997.09.10 DCN Added perlbench as an Exhibition benchmark -# 1997.09.23 DCN Added rgooch's select as an Exhibition benchmark -# 1999.07.28 DCN "select" not compiled or run by default, because it -# does not compile on many platforms. PerlBench also -# not run by default. -# 2007.09.26 IS Huge rewrite -- see release notes in README. -# 2007.10.12 IS Added graphics tests, categories feature. -# 2007.10.14 IS Set and report LANG. Added "grep" and "sysexec". -# 2007.12.22 IS Tiny fixes; see README. -# 2011.01.13 KDL Fix for parallel compilation. - - -############################################################################ -# CONFIGURATION -############################################################################ - -# Version number of the script. -my $version = "5.1.3"; - -# The setting of LANG makes a huge difference to some of the scores, -# particularly depending on whether UTF-8 is used. So we always set -# it to the same value, which is configured here. -# -# If you want your results to be meaningful when compared to other peoples' -# results, you should not change this. Change it if you want to measure the -# effect of different languages. -my $language = "en_US.utf8"; - -# The number of iterations per test. -my $longIterCount = 10; -my $shortIterCount = 3; - -# C compiler to use in compilation tests. -my $cCompiler = 'gcc'; - -# Establish full paths to directories. These need to be full pathnames -# (or do they, any more?). They can be set in env. -# variable names are the first parameter to getDir() below. - -# Directory where the test programs live. -my $BINDIR = getDir('UB_BINDIR', $FindBin::Bin . "/pgms"); - -# Temp directory, for temp files. -my $TMPDIR = getDir('UB_TMPDIR', $FindBin::Bin . "/tmp"); - -# Directory to put results in. -my $RESULTDIR = getDir('UB_RESULTDIR', $FindBin::Bin . "/results"); - -# Directory where the tests are executed. -my $TESTDIR = getDir('UB_TESTDIR', $FindBin::Bin . "/testdir"); - - -############################################################################ -# TEST SPECIFICATIONS -############################################################################ - -# Configure the categories to which tests can belong. -my $testCats = { - 'system' => { 'name' => "System Benchmarks", 'maxCopies' => 0 }, - '2d' => { 'name' => "2D Graphics Benchmarks", 'maxCopies' => 1 }, - '3d' => { 'name' => "3D Graphics Benchmarks", 'maxCopies' => 1 }, - 'misc' => { 'name' => "Non-Index Benchmarks", 'maxCopies' => 0 }, -}; - - -my $arithmetic = [ - "arithoh", "short", "int", "long", "float", "double", "whetstone-double" -]; - -my $fs = [ - "fstime-w", "fstime-r", "fstime", - "fsbuffer-w", "fsbuffer-r", "fsbuffer", - "fsdisk-w", "fsdisk-r", "fsdisk" -]; - -my $oldsystem = [ - "execl", "fstime", "fsbuffer", "fsdisk", "pipe", "context1", "spawn", - "syscall" -]; - -my $system = [ - @$oldsystem, "shell1", "shell8", "shell16" -]; - -my $index = [ - "dhry2reg", "whetstone-double", @$oldsystem, "shell1", "shell8" -]; - -my $graphics = [ - "2d-rects", "2d-ellipse", "2d-aashapes", "2d-text", "2d-blit", - "2d-window", "ubgears" -]; - - -# List of all supported test names. -my $testList = { - # Individual tests. - "dhry2reg" => undef, - "whetstone-double" => undef, - "syscall" => undef, - "pipe" => undef, - "context1" => undef, - "spawn" => undef, - "execl" => undef, - "fstime-w" => undef, - "fstime-r" => undef, - "fstime" => undef, - "fsbuffer-w" => undef, - "fsbuffer-r" => undef, - "fsbuffer" => undef, - "fsdisk-w" => undef, - "fsdisk-r" => undef, - "fsdisk" => undef, - "shell1" => undef, - "shell8" => undef, - "shell16" => undef, - "short" => undef, - "int" => undef, - "long" => undef, - "float" => undef, - "double" => undef, - "arithoh" => undef, - "dc" => undef, - "hanoi" => undef, - "grep" => undef, - "sysexec" => undef, - - "2d-rects" => undef, - "2d-lines" => undef, - "2d-circle" => undef, - "2d-ellipse" => undef, - "2d-shapes" => undef, - "2d-aashapes" => undef, - "2d-polys" => undef, - "2d-text" => undef, - "2d-blit" => undef, - "2d-window" => undef, - - "ubgears" => undef, - - # Named combos and shorthands. - "arithmetic" => $arithmetic, - "dhry" => [ "dhry2reg" ], - "dhrystone" => [ "dhry2reg" ], - "whets" => [ "whetstone-double" ], - "whetstone" => [ "whetstone-double" ], - "load" => [ "shell" ], - "misc" => [ "C", "dc", "hanoi" ], - "speed" => [ @$arithmetic, @$system ], - "oldsystem" => $oldsystem, - "system" => $system, - "fs" => $fs, - "shell" => [ "shell1", "shell8", "shell16" ], - "graphics" => $graphics, - - # The tests which constitute the official index. - "index" => $index, - - # The tests which constitute the official index plus the graphics - # index. - "gindex" => [ @$index, @$graphics ], -}; - - -# Default parameters for benchmarks. Note that if "prog" is used, -# it must contain just the program name, as it will be quoted (this -# is necessary if BINDIR contains spaces). Put any options in "options". -my $baseParams = { - "prog" => undef, - "options" => "", - "repeat" => 'short', - "stdout" => 1, # Non-0 to keep stdout. - "stdin" => "", - "logmsg" => "", -}; - - -# Individual parameters for all benchmarks. -my $testParams = { - - ########################## - ## System Benchmarks ## - ########################## - - "dhry2reg" => { - "logmsg" => "Dhrystone 2 using register variables", - "cat" => 'system', - "options" => "10", - "repeat" => 'long', - }, - "whetstone-double" => { - "logmsg" => "Double-Precision Whetstone", - "cat" => 'system', - "repeat" => 'long', - }, - "syscall" => { - "logmsg" => "System Call Overhead", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "context1" => { - "logmsg" => "Pipe-based Context Switching", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "pipe" => { - "logmsg" => "Pipe Throughput", - "cat" => 'system', - "repeat" => 'long', - "options" => "10", - }, - "spawn" => { - "logmsg" => "Process Creation", - "cat" => 'system', - "options" => "30", - }, - "execl" => { - "logmsg" => "Execl Throughput", - "cat" => 'system', - "options" => "30", - }, - "fstime-w" => { - "logmsg" => "File Write 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fstime-r" => { - "logmsg" => "File Read 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fstime" => { - "logmsg" => "File Copy 1024 bufsize 2000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 1024 -m 2000", - }, - "fsbuffer-w" => { - "logmsg" => "File Write 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsbuffer-r" => { - "logmsg" => "File Read 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsbuffer" => { - "logmsg" => "File Copy 256 bufsize 500 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 256 -m 500", - }, - "fsdisk-w" => { - "logmsg" => "File Write 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-w -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "fsdisk-r" => { - "logmsg" => "File Read 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-r -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "fsdisk" => { - "logmsg" => "File Copy 4096 bufsize 8000 maxblocks", - "cat" => 'system', - "prog" => "${BINDIR}/fstime", - "options" => "-c -t 30 -d \"${TMPDIR}\" -b 4096 -m 8000", - }, - "shell1" => { - "logmsg" => "Shell Scripts (1 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 1", - }, - "shell8" => { - "logmsg" => "Shell Scripts (8 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 8", - }, - "shell16" => { - "logmsg" => "Shell Scripts (16 concurrent)", - "cat" => 'system', - "prog" => "${BINDIR}/looper", - "options" => "60 \"${BINDIR}/multi.sh\" 16", - }, - - ########################## - ## Graphics Benchmarks ## - ########################## - - "2d-rects" => { - "logmsg" => "2D graphics: rectangles", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "rects 3 2", - }, - - "2d-lines" => { - "logmsg" => "2D graphics: lines", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "lines 3 2", - }, - - "2d-circle" => { - "logmsg" => "2D graphics: circles", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "circle 3 2", - }, - - "2d-ellipse" => { - "logmsg" => "2D graphics: ellipses", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "ellipse 3 2", - }, - - "2d-shapes" => { - "logmsg" => "2D graphics: polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "shapes 3 2", - }, - - "2d-aashapes" => { - "logmsg" => "2D graphics: aa polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "aashapes 3 2", - }, - - "2d-polys" => { - "logmsg" => "2D graphics: complex polygons", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "polys 3 2", - }, - - "2d-text" => { - "logmsg" => "2D graphics: text", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "text 3 2", - }, - - "2d-blit" => { - "logmsg" => "2D graphics: images and blits", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "blit 3 2", - }, - - "2d-window" => { - "logmsg" => "2D graphics: windows", - "cat" => '2d', - "prog" => "${BINDIR}/gfx-x11", - "options" => "window 3 2", - }, - - "ubgears" => { - "logmsg" => "3D graphics: gears", - "cat" => '3d', - "options" => "-time 20 -v", - }, - - - ########################## - ## Non-Index Benchmarks ## - ########################## - - "C" => { - "logmsg" => "C Compiler Throughput ($cCompiler)", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "60 $cCompiler cctest.c", - }, - "arithoh" => { - "logmsg" => "Arithoh", - "cat" => 'misc', - "options" => "10", - }, - "short" => { - "logmsg" => "Arithmetic Test (short)", - "cat" => 'misc', - "options" => "10", - }, - "int" => { - "logmsg" => "Arithmetic Test (int)", - "cat" => 'misc', - "options" => "10", - }, - "long" => { - "logmsg" => "Arithmetic Test (long)", - "cat" => 'misc', - "options" => "10", - }, - "float" => { - "logmsg" => "Arithmetic Test (float)", - "cat" => 'misc', - "options" => "10", - }, - "double" => { - "logmsg" => "Arithmetic Test (double)", - "cat" => 'misc', - "options" => "10", - }, - "dc" => { - "logmsg" => "Dc: sqrt(2) to 99 decimal places", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "30 dc", - "stdin" => "dc.dat", - }, - "hanoi" => { - "logmsg" => "Recursion Test -- Tower of Hanoi", - "cat" => 'misc', - "options" => "20", - }, - "grep" => { - "logmsg" => "Grep a large file (system's grep)", - "cat" => 'misc', - "prog" => "${BINDIR}/looper", - "options" => "30 grep -c gimp large.txt", - }, - "sysexec" => { - "logmsg" => "Exec System Call Overhead", - "cat" => 'misc', - "repeat" => 'long', - "prog" => "${BINDIR}/syscall", - "options" => "10 exec", - }, -}; - - -# CPU flags of interest. -my $x86CpuFlags = { - 'pae' => "Physical Address Ext", - 'sep' => "SYSENTER/SYSEXIT", - 'syscall' => "SYSCALL/SYSRET", - 'mmx' => "MMX", - 'mmxext' => "AMD MMX", - 'cxmmx' => "Cyrix MMX", - 'xmm' => "Streaming SIMD", - 'xmm2' => "Streaming SIMD-2", - 'xmm3' => "Streaming SIMD-3", - 'ht' => "Hyper-Threading", - 'ia64' => "IA-64 processor", - 'lm' => "x86-64", - 'vmx' => "Intel virtualization", - 'svm' => "AMD virtualization", -}; - - -############################################################################ -# UTILITIES -############################################################################ - -# Exec the given command, and catch its standard output. -# We return an array containing the PID and the filehandle on the -# process' standard output. It's up to the caller to wait for the command -# to terminate. -sub command { - my ( $cmd ) = @_; - - my $pid = open(my $childFd, "-|"); - if (!defined($pid)) { - die("Run: fork() failed (undef)\n"); - } elsif ($pid == 0) { - exec($cmd); - die("Run: exec() failed (returned)\n"); - } - - return ( $pid, $childFd ); -} - - -# Get data from running a system command. Used for things like getting -# the host OS from `uname -o` etc. -# -# Ignores initial blank lines from the command and returns the first -# non-blank line, with white space trimmed off. Returns a blank string -# if there is no output; undef if the command fails. -sub getCmdOutput { - my ( $cmd ) = @_; - - my ( $pid, $fd ) = command($cmd . " 2>/dev/null"); - my $result = ""; - while (<$fd>) { - chomp; - next if /^[ \t]*$/; - - $result = $_; - $result =~ s/^[ \t]+//; - $result =~ s/[ \t]+$//; - last; - } - - # Close the command and wait for it to die. - waitpid($pid, 0); - my $status = $?; - - return $status == 0 ? $result : undef; -} - - -# Get a directory pathname from an environment variable, or the given -# default. Canonicalise and return the value. -sub getDir { - my ( $var, $def ) = @_; - - # If Environment variables(e.g. UB_RESULTDIR) is unset, use default value. - my $val = $ENV{$var} || $def; - - # Only "execl.c" test needs the Environment variable(UB_BINDIR). - $ENV{$var} = $val; - - return $val; -} - -# Create direcotry(0755) if not exists. -sub createDirrectoriesIfNotExists { - foreach my $path (@_) { - my $isDirectoryNotExists = ! -d $path; - if ( $isDirectoryNotExists ) { - mkpath($path, {chmod => 0755}); - } - } -} - -# Show use directories. -sub printUsingDirectories { - printf "------------------------------------------------------------------------------\n"; - printf " Use directories for:\n"; - printf " * File I/O tests (named fs***) = ${TMPDIR}\n"; - printf " * Results = ${RESULTDIR}\n"; - printf "------------------------------------------------------------------------------\n"; - printf "\n"; -} - - -# Get the name of the file we're going to log to. The name uses the hostname -# and date, plus a sequence number to make it unique. -sub logFile { - my ( $sysInfo ) = @_; - - # If supplied output file name via Environment variable(UB_OUTPUT_FILE_NAME), then use it. - # * If exists same file, it will be overwrite completly. - my $output_file_name_supplied_by_environment = $ENV{"UB_OUTPUT_FILE_NAME"}; - if ( defined($output_file_name_supplied_by_environment) && $output_file_name_supplied_by_environment ne "" ) { - return ${RESULTDIR} . "/" . $output_file_name_supplied_by_environment; - } - - - # Use the date in the base file name. - my $ymd = strftime "%Y-%m-%d", localtime; - - my $count = 1; - while (1) { - my $log = sprintf "%s/%s-%s-%02d", - ${RESULTDIR}, $sysInfo->{'name'}, $ymd, $count; - return $log if (! -e $log); - ++$count; - } -} - - -# Print a message to the named log file. We use this method rather than -# keeping the FD open because we use shell redirection to send command -# output to the same file. -sub printLog { - my ( $logFile, @args ) = @_; - - open(my $fd, ">>", $logFile) || abortRun("can't append to $logFile"); - printf $fd @args; - close($fd); -} - - -# Display a number of something, auto-selecting the plural form -# if appropriate. We are given the number, the singular, and the -# plural; if the plural is omitted, it defaults to singular + "s". -sub number { - my ( $n, $what, $plural ) = @_; - - $plural = $what . "s" if !defined($plural); - - if (!defined($n)) { - return sprintf "unknown %s", $plural; - } else { - return sprintf "%d %s", $n, $n == 1 ? $what : $plural; - } -} - - -# Merge two sets of test parameters -- defaults and actual parameters. -# Return the merged parameter hash. -sub mergeParams { - my ( $def, $vals ) = @_; - - my $params = { }; - foreach my $k (keys(%$def)) { - $params->{$k} = $def->{$k}; - } - foreach my $k (keys(%$vals)) { - $params->{$k} = $vals->{$k}; - } - - $params; -} - - -############################################################################ -# SYSTEM ANALYSIS -############################################################################ - -# Extract interesting flags from the given processor flags string and -# convert them to descriptive names. -sub processCpuFlags { - my ( $flagStr ) = @_; - - my @names; - foreach my $f (sort split(/\s+/, $flagStr)) { - my $name = $x86CpuFlags->{$f}; - push(@names, $name) if $name; - } - - join(", ", @names); -} - - -# Get information on the CPUs in the system. Returns a reference to an -# array of N entries, one per CPU, where each entry is a hash containing -# these fields: -# describing the model etc. Returns undef if the information can't be got. -# -# future: on systems without /proc/cpuinfo, might check for Perl modules: -# Sys::Info::Device::CPU or Sys::CpuAffinity -sub getCpuInfo { - if (!("$^O" eq "darwin")) { - open(my $fd, "<", "/proc/cpuinfo") || return undef; - - my $cpus = [ ]; - my $cpu = 0; - while (<$fd>) { - chomp; - my ( $field, $val ) = split(/[ \t]*:[ \t]*/); - next if (!$field || !$val); - if ($field eq "processor") { - $cpu = $val; - } elsif ($field eq "model name") { - my $model = $val; - $model =~ s/ +/ /g; - $cpus->[$cpu]{'model'} = $model; - } elsif ($field eq "bogomips") { - $cpus->[$cpu]{'bogo'} = $val; - } elsif ($field eq "flags") { - $cpus->[$cpu]{'flags'} = processCpuFlags($val); - } - } - - close($fd); - - $cpus; - - } else { - - my $model = getCmdOutput("sysctl -n machdep.cpu.brand_string"); - my $flags = getCmdOutput("sysctl -n machdep.cpu.features | tr [A-Z] [a-z]"); - my $ncpu = getCmdOutput("sysctl -n hw.ncpu"); - - my $cpus = [ ]; - my $cpu = 0; - - for ($cpu = 0; $cpu < $ncpu; $cpu++) { - $cpus->[$cpu]{'model'} = $model; - $cpus->[$cpu]{'bogo'} = 0; - $cpus->[$cpu]{'flags'} = processCpuFlags($flags); - } - $cpus; - } -} - - -# Get number of available (active) CPUs (not including disabled CPUs) -# or, if not num of available CPUs, the total number of CPUs on the system -# Returns undef if the information can't be obtained. -# -# There is no shortage of platform-specific methods to obtain this info. -# This routine -is not- exhaustive, but adds some additional portability. -# Most modern unix systems implement sysconf(_SC_NPROCESSORS_ONLN). -sub getNumActiveCpus { - my $numCpus; - - #(POSIX::_SC_NPROCESSORS_ONLN value not typically provided by POSIX.pm) - #$numCpus = POSIX::sysconf(POSIX::_SC_NPROCESSORS_ONLN); - #if (defined($numCpus)) { chomp $numCpus; return $numCpus if $numCpus; } - - $numCpus = `getconf _NPROCESSORS_ONLN 2>/dev/null`; - if (defined($numCpus)) { chomp $numCpus; return $numCpus if $numCpus; } - - $numCpus = `getconf NPROCESSORS_ONLN 2>/dev/null`; - if (defined($numCpus)) { chomp $numCpus; return $numCpus if $numCpus; } - - $numCpus = `nproc 2>/dev/null`; - if (defined($numCpus)) { chomp $numCpus; return $numCpus if $numCpus; } - - $numCpus = `python -c 'import os; print os.sysconf(os.sysconf_names["SC_NPROCESSORS_ONLN"]);' 2>/dev/null`; - if (defined($numCpus)) { chomp $numCpus; return $numCpus if $numCpus; } - - # Windows - return $ENV{"NUMBER_OF_PROCESSORS"} if $ENV{"NUMBER_OF_PROCESSORS"}; - - return undef; -} - - -# Get information on the host system. Returns a reference to a hash -# with the following fields: -# name Host name -# os Host OS name -# osRel Host OS release -# osVer Host OS version -# mach Host machine name (eg. "SparcStation 20", but on -# PC/Linux usually "i686" etc.) -# platform Hardware platform; on Linux, the base CPU type? -# system System name (eg. hostname and Linux distro, like -# "hostname: openSUSE 10.2 (i586)"). -# cpus Value returned by getCpuInfo(), undef if not avail. -# numCpus Number of CPUs if known, else undef. -# load System load message as per "uptime". -# numUsers Number of users and/or open shell sessions. -sub getSystemInfo { - my $info = { }; - - # Get host system data. - $info->{'name'} = getCmdOutput("hostname"); - $info->{'os'} = getCmdOutput("uname -o") || getCmdOutput("uname -s"); - $info->{'osRel'} = getCmdOutput("uname -r"); - $info->{'osVer'} = getCmdOutput("uname -v"); - $info->{'mach'} = $^O ne "aix" - ? getCmdOutput("uname -m") - : getCmdOutput("uname -p"); - $info->{'platform'} = getCmdOutput("uname -i") || "unknown"; - - # Get the system name (SUSE, Red Hat, etc.) if possible. - $info->{'system'} = $info->{'os'}; - if ( -r "/etc/SuSE-release" ) { - $info->{'system'} = getCmdOutput("cat /etc/SuSE-release"); - } elsif ( -r "/etc/release" ) { - $info->{'system'} = getCmdOutput("cat /etc/release"); - } - - # Get the language info. - my $lang = getCmdOutput("printenv LANG"); - my $map = $^O ne "aix" - ? getCmdOutput("locale -k LC_CTYPE | grep charmap") || "" - : getCmdOutput("locale charmap") || ""; - $map =~ s/.*=//; - my $coll = $^O ne "aix" - ? getCmdOutput("locale -k LC_COLLATE | grep collate-codeset") || "" - : getCmdOutput("locale | grep LC_COLLATE") || ""; - $coll =~ s/.*=//; - $info->{'language'} = sprintf "%s (charmap=%s, collate=%s)", - $lang, $map, $coll; - - # Get details on the CPUs, if possible. - my $cpus = getCpuInfo(); - if (defined($cpus)) { - $info->{'cpus'} = $cpus; - $info->{'numCpus'} = scalar(@$cpus); - } - - # Get available number of CPUs (not disabled CPUs), if possible. - my $numCpus = getNumActiveCpus(); - if (defined($numCpus)) { - $info->{'numCpus'} = $numCpus; # overwrite value from getCpuinfo() - } - - # Get graphics hardware info. - $info->{'graphics'} = getCmdOutput("3dinfo | cut -f1 -d\'(\'"); - - # Get system run state, load and usage info. - $info->{'runlevel'} = getCmdOutput("who -r | awk '{print \$3}'"); - $info->{'load'} = getCmdOutput("uptime"); - $info->{'numUsers'} = getCmdOutput("who | wc -l"); - - $info; -} - - -############################################################################ -# ERROR HANDLING -############################################################################ - -# Abort the benchmarking run with an error message. -sub abortRun { - my ( $err ) = @_; - - printf STDERR "\n**********************************************\n"; - printf STDERR "Run: %s; aborting\n", $err; - exit(1); -} - - -############################################################################ -# TEST SETUP -############################################################################ - -# Do checks that everything's ready for testing. -sub preChecks { - # Set the language. - $ENV{'LANG'} = $language; - - # Check that the required files are in the proper places. - my $make = $ENV{MAKE} || "make"; - system("$make check"); - if ($? != 0) { - system("$make all"); - if ($? != 0) { - abortRun("\"$make all\" failed"); - } - } - - # Create a script to kill this run. - system("echo \"kill -9 $$\" > \"${TMPDIR}/kill_run\""); - chmod(0755, $TMPDIR . "/kill_run"); -} - - -# Parse the command arguments. -sub parseArgs { - my @words = @_; - - # The accumulator for the bench units to be run. - my $tests = [ ]; - my $params = { 'tests' => $tests }; - - # Generate the requested list of bench programs. - my $opt; - my $word; - while ($word = shift(@words)) { - if ($word !~ m/^-/) { # A test name. - if ($word eq "all") { - foreach my $t (keys(%$testList)) { - push(@$tests, $t) if (!defined($testList->{$t})); - } - } elsif (exists($testList->{$word})) { - my $val = $testList->{$word} || [ $word ]; - push(@$tests, @$val); - } else { - die("Run: unknown test \"$word\"\n"); - } - } elsif ($word eq "-q") { - $params->{'verbose'} = 0; - } elsif ($word eq "-v") { - $params->{'verbose'} = 2; - } elsif ($word eq "-i") { - $params->{'iterations'} = shift(@words); - } elsif ($word eq "-c") { - if (!defined($params->{'copies'})) { - $params->{'copies'} = [ ]; - } - push(@{$params->{'copies'}}, shift(@words)); - } else { - die("Run: unknown option $word\n"); - } - } - - $params; -} - - -############################################################################ -# RESULTS INPUT / OUTPUT -############################################################################ - -# Read a set of benchmarking results from the given file. -# Returns results in the form returned by runTests(), but without the -# individual pass results. -sub readResultsFromFile { - my ( $file ) = @_; - - # Attempt to get the baseline data file; if we can't, just return undef. - open(my $fd, "<", $file) || return undef; - - my $results = { }; - while (<$fd>) { - chomp; - - # Dump comments, ignore blank lines. - s/#.*//; - next if /^\s*$/; - - my ( $name, $time, $slab, $sum, $score, $iters ) = split(/\|/); - my $bresult = { }; - $bresult->{'score'} = $score; - $bresult->{'scorelabel'} = $slab; - $bresult->{'time'} = $time; - $bresult->{'iterations'} = $iters; - - $results->{$name} = $bresult; - } - - close($fd); - - $results; -} - - -############################################################################ -# RESULTS PROCESSING -############################################################################ - -# Process a set of results from a single test by averaging the individal -# pass results into a single final value. -# First, though, dump the worst 1/3 of the scores. The logic is that a -# glitch in the system (background process waking up, for example) may -# make one or two runs go slow, so let's discard those. -# -# $bresult is a hashed array representing the results of a single test; -# $bresult->{'passes'} is an array of the output from the individual -# passes. -sub combinePassResults { - my ( $bench, $tdata, $bresult, $logFile ) = @_; - - $bresult->{'cat'} = $tdata->{'cat'}; - - # Computed results. - my $iterations = 0; - my $totalTime = 0; - my $sum = 0; - my $product = 0; - my $label; - - my $pres = $bresult->{'passes'}; - - # We're going to throw away the worst 1/3 of the pass results. - # Figure out how many to keep. - my $npasses = scalar(@$pres); - my $ndump = int($npasses / 3); - - foreach my $presult (sort { $a->{'COUNT0'} <=> $b->{'COUNT0'} } @$pres) { - my $count = $presult->{'COUNT0'}; - my $timebase = $presult->{'COUNT1'}; - $label = $presult->{'COUNT2'}; - my $time = $presult->{'TIME'} || $presult->{'elapsed'}; - - # Skip this result if it's one of the worst ones. - if ($ndump > 0) { - printLog($logFile, "*Dump score: %12.1f\n", $count); - --$ndump; - next; - } - - # Count this result. - ++$iterations; - printLog($logFile, "Count score: %12.1f\n", $count); - - # If $timebase is 0 the figure is a rate; else compute - # counts per $timebase. $time is always seconds. - if ($timebase > 0 && $time > 0) { - $sum += $count / ($time / $timebase); - $product += log($count) - log($time / $timebase) if ($count > 0); - } else { - $sum += $count; - $product += log($count) if ($count > 0); - } - $totalTime += $time; - } - - # Save the results for the benchmark. - if ($iterations > 0) { - $bresult->{'score'} = exp($product / $iterations); - $bresult->{'scorelabel'} = $label; - $bresult->{'time'} = $totalTime / $iterations; - $bresult->{'iterations'} = $iterations; - } else { - $bresult->{'error'} = "No measured results"; - } -} - - -# Index the given full benchmark results against the baseline results. -# $results is a hashed array of test names to test results. -# -# Adds the following fields to each benchmark result: -# iscore The baseline score for this test -# index The index of this test against the baseline -# Adds the following fields to $results: -# indexed The number of tests for which index values were -# generated -# fullindex Non-0 if all the index tests were indexed -# index The computed overall index for the run -# Note that the index values are computed as -# result / baseline * 10 -# so an index of 523 indicates that a test ran 52.3 times faster than -# the baseline. -sub indexResults { - my ( $results ) = @_; - - # Read in the baseline result data. If we can't get it, just return - # without making indexed results. - my $index = readResultsFromFile($BINDIR . "/index.base"); - if (!defined($index)) { - return; - } - - # Count the number of results we have (indexed or not) in - # each category. - my $numCat = { }; - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - ++$numCat->{$bresult->{'cat'}}; - } - $results->{'numCat'} = $numCat; - - my $numIndex = { }; - my $indexed = { }; - my $sum = { }; - foreach my $bench (sort(keys(%$index))) { - # Get the test data for this benchmark. - my $tdata = $testParams->{$bench}; - if (!defined($tdata)) { - abortRun("unknown benchmark \"$bench\" in $BINDIR/index.base"); - } - - # Get the test category. Count the total tests in this cat. - my $cat = $tdata->{'cat'}; - ++$numIndex->{$cat}; - - # If we don't have a result for this test, skip. - next if (!defined($results->{$bench})); - - # Get the index and actual results. Calcluate the score. - my $iresult = $index->{$bench}; - my $bresult = $results->{$bench}; - my $ratio = $bresult->{'score'} / $iresult->{'score'}; - - # Save the indexed score. - $bresult->{'iscore'} = $iresult->{'score'}; - $bresult->{'index'} = $ratio * 10; - - # Sun the scores, and count this test for this category. - $sum->{$cat} += log($ratio) if ($ratio > 0.000001); - ++$indexed->{$cat}; - } - - # Calculate the index scores per category. - $results->{'indexed'} = $indexed; - $results->{'numIndex'} = $numIndex; - foreach my $c (keys(%$indexed)) { - if ($indexed->{$c} > 0) { - $results->{'index'}{$c} = exp($sum->{$c} / $indexed->{$c}) * 10; - } - } -} - - -############################################################################ -# TEST EXECUTION -############################################################################ - -# Exec the given command in a sub-process. -# -# In the child process, we run the command and store its standard output. -# We also time its execution, and catch its exit status. We then write -# the command's output, plus lines containing the execution time and status, -# to a pipe. -# -# In the parent process, we immediately return an array containing the -# child PID and the filehandle to the pipe. This allows the caller to -# kick off multiple commands in parallel, then gather their output. -sub commandBuffered { - my ( $cmd ) = @_; - - # Create a pipe for parent-child communication. - my $childReader; - my $parentWriter; - pipe($childReader, $parentWriter) || abortRun("pipe() failed"); - $parentWriter->autoflush(1); - - # Fork off the child process. - my $pid = fork(); - if (!defined($pid)) { - abortRun("fork() failed (undef)"); - } elsif ($pid == 0) { - # Close the other end of the pipe. - close $childReader; - - # Start the clock and spawn the command. - my $benchStart = Time::HiRes::time(); - my ( $cmdPid, $cmdFd ) = command($cmd); - - # Read and buffer all the command's output. - my $output = [ ]; - while (<$cmdFd>) { - push(@$output, $_); - } - - # Stop the clock and save the time. - my $elTime = Time::HiRes::time() - $benchStart; - push(@$output, sprintf "elapsed|%f\n", $elTime); - - # Wait for the child to die so we can get its status. - # close($cmdFd); Doesn't work??? - waitpid($cmdPid, 0); - my $status = $?; - push(@$output, sprintf "status|%d\n", $status); - - # Now that we've got the time, play back all the output to the pipe. - # The parent can read this at its leisure. - foreach my $line (@$output) { - print $parentWriter $line; - } - - # Terminate this child. - close $parentWriter; - exit(0); - } - - # Close the other end of the pipe. - close $parentWriter; - - return ( $pid, $childReader ); -} - - -# Read the results of a benchmark execution from a child process, given -# its process ID and its filehandle. Create a results hash structure -# containing the fields returned by the child, plus: -# pid The child's process ID -# status The child's exit status -# ERROR Any stderr output from the child that isn't result data -# Note that ay result fields with ultiple values are split; so eg. -# COUNT|x|y|x -# becomes -# COUNT0 = x -# COUNT1 = y -# COUNT2 = z -sub readResults { - my ( $pid, $fd ) = @_; - - my $presult = { 'pid' => $pid }; - - # Read all the result lines from the child. - while (<$fd>) { - chomp; - - my ( $field, @params ) = split(/\|/); - if (scalar(@params) == 0) { # Error message. - $presult->{'ERROR'} .= "\n" if ($presult->{'ERROR'}); - $presult->{'ERROR'} .= $field; - } elsif (scalar(@params) == 1) { # Simple data. - $presult->{$field} = $params[0]; - } else { # Compound data. - # Store the values in separate fields, named "FIELD$i". - for (my $x = 0; $x < scalar(@params); ++$x) { - $presult->{$field . $x} = $params[$x]; - } - } - } - - # If the command had an error, make an appropriate message if we - # don't have one. - if ($presult->{'status'} != 0 && !defined($presult->{'ERROR'})) { - $presult->{'ERROR'} = "command returned status " . $presult->{'status'}; - } - - # Wait for the child to die. - close($fd); - waitpid($pid, 0); - - $presult; -} - - -# Execute a benchmark command. We set off a given number of copies in -# parallel to exercise multiple CPUs. -# -# We return an array of results hashes, one per copy; each one is as -# returned by readResults(). -sub executeBenchmark { - my ( $command, $copies ) = @_; - - # Array of contexts for all the copies we're running. - my $ctxt = [ ]; - - # Kick off all the commands at once. - for (my $i = 0; $i < $copies; ++$i) { - my ( $cmdPid, $cmdFd ) = commandBuffered($command); - $ctxt->[$i] = { - 'pid' => $cmdPid, - 'fd' => $cmdFd, - }; - } - - # Now, we can simply read back the command results in order. Because - # the child processes read and buffer the results and time the commands, - # there's no need to use select() to read the results as they appear. - my $pres = [ ]; - for (my $i = 0; $i < $copies; ++$i) { - my $presult = readResults($ctxt->[$i]{'pid'}, $ctxt->[$i]{'fd'}); - push(@$pres, $presult); - } - - $pres; -} - - -# Run one iteration of a benchmark, as specified by the given -# benchmark parameters. We run multiple parallel copies as -# specified by $copies. -sub runOnePass { - my ( $params, $verbose, $logFile, $copies ) = @_; - - # Get the command to run. - my $command = $params->{'command'}; - if ($verbose > 1) { - printf "\n"; - printf "COMMAND: \"%s\"\n", $command; - printf "COPIES: \"%d\"\n", $copies; - } - - # Remember where we are, and move to the test directory. - my $pwd = `pwd`; - chdir($TESTDIR); - - # Execute N copies of the benchmark in parallel. - my $copyResults = executeBenchmark($command, $copies); - printLog($logFile, "\n"); - - # Move back home. - chdir($pwd); - - # Sum up the scores of the copies. - my $count = 0; - my $time = 0; - my $elap = 0; - foreach my $res (@$copyResults) { - # Log the result data for each copy. - foreach my $k (sort(keys(%$res))) { - printLog($logFile, "# %s: %s\n", $k, $res->{$k}); - } - printLog($logFile, "\n"); - - # If it failed, bomb out. - if (defined($res->{'ERROR'})) { - my $name = $params->{'logmsg'}; - abortRun("\"$name\": " . $res->{'ERROR'}); - } - - # Count up the score. - $count += $res->{'COUNT0'}; - $time += $res->{'TIME'} || $res->{'elapsed'}; - $elap += $res->{'elapsed'}; - } - - # Make up a combined result. - my $passResult = $copyResults->[0]; - $passResult->{'COUNT0'} = $count; - $passResult->{'TIME'} = $time / $copies; - $passResult->{'elapsed'} = $elap / $copies; - - $passResult; -} - - -sub runBenchmark { - my ( $bench, $tparams, $verbose, $logFile, $copies ) = @_; - - # Make up the actual benchmark parameters. - my $params = mergeParams($baseParams, $tparams); - - # Make up the command string based on the parameters. - my $prog = $params->{'prog'} || $BINDIR . "/" . $bench; - my $command = sprintf "\"%s\" %s", $prog, $params->{'options'}; - $command .= " < \"" . $params->{'stdin'} . "\"" if ($params->{'stdin'}); - $command .= " 2>&1"; - $command .= $params->{'stdout'} ? (" >> \"" . $logFile . "\"") : " > /dev/null"; - $params->{'command'} = $command; - - # Set up the benchmark results structure. - my $bresult = { 'name' => $bench, 'msg' => $params->{'logmsg'} }; - - if ($verbose > 0) { - printf "\n%d x %s ", $copies, $params->{'logmsg'}; - } - - printLog($logFile, - "\n########################################################\n"); - printLog($logFile, "%s -- %s\n", - $params->{'logmsg'}, number($copies, "copy", "copies")); - printLog($logFile, "==> %s\n\n", $command); - - # Run the test iterations, as given by the "repeat" parameter. - my $repeats = $shortIterCount; - $repeats = $longIterCount if $params->{'repeat'} eq 'long'; - $repeats = 1 if $params->{'repeat'} eq 'single'; - my $pres = [ ]; - for (my $i = 1; $i <= $repeats; ++$i) { - printLog($logFile, "#### Pass %d\n\n", $i); - - # make an attempt to flush buffers - system("sync; sleep 1; sync; sleep 2"); - # display heartbeat - if ($verbose > 0) { - printf " %d", $i; - } - - # Execute one pass of the benchmark. - my $presult = runOnePass($params, $verbose, $logFile, $copies); - push(@$pres, $presult); - } - $bresult->{'passes'} = $pres; - - # Calculate the averaged results for this benchmark. - combinePassResults($bench, $tparams, $bresult, $logFile); - - # Log the results. - if ($copies == 1) { - printLog($logFile, "\n>>>> Results of 1 copy\n"); - } else { - printLog($logFile, "\n>>>> Sum of %d copies\n", $copies); - } - foreach my $k ( 'score', 'time', 'iterations' ) { - printLog($logFile, ">>>> %s: %s\n", $k, $bresult->{$k}); - } - printLog($logFile, "\n"); - - # Some specific cleanup routines. - if ($bench eq "C") { - unlink(${TESTDIR} . "/cctest.o"); - unlink(${TESTDIR} . "/a.out"); - } - - if ($verbose > 0) { - printf "\n"; - } - - $bresult; -} - - -# Run the named benchmarks. -sub runTests { - my ( $tests, $verbose, $logFile, $copies ) = @_; - - # Run all the requested tests and gather the results. - my $results = { 'start' => time(), 'copies' => $copies }; - foreach my $bench (@$tests) { - # Get the parameters for this benchmark. - my $params = $testParams->{$bench}; - if (!defined($params)) { - abortRun("unknown benchmark \"$bench\""); - } - - # If the benchmark doesn't want to run with this many copies, skip it. - my $cat = $params->{'cat'}; - my $maxCopies = $testCats->{$cat}{'maxCopies'}; - next if ($maxCopies > 0 && $copies > $maxCopies); - - # Run the benchmark. - my $bresult = runBenchmark($bench, $params, $verbose, $logFile, $copies); - $results->{$bench} = $bresult; - } - $results->{'end'} = time(); - - # Generate a sorted list of benchmarks for which we have results. - my @benches = grep { - ref($results->{$_}) eq "HASH" && defined($results->{$_}{'msg'}) - } keys(%$results); - @benches = sort { - $results->{$a}{'msg'} cmp $results->{$b}{'msg'} - } @benches; - $results->{'list'} = \@benches; - - # Generate index scores for the results relative to the baseline data. - indexResults($results); - - $results; -} - - -############################################################################ -# TEXT REPORTS -############################################################################ - -# Display a banner indicating the configuration of the system under test -# to the given file desc. -sub displaySystem { - my ( $info, $fd ) = @_; - - # Display basic system info. - printf $fd " System: %s: %s\n", $info->{'name'}, $info->{'system'}; - printf $fd " OS: %s -- %s -- %s\n", - $info->{'os'}, $info->{'osRel'}, $info->{'osVer'}; - printf $fd " Machine: %s (%s)\n", $info->{'mach'}, $info->{'platform'}; - printf $fd " Language: %s\n", $info->{'language'}; - - # Get and display details on the CPUs, if possible. - my $cpus = $info->{'cpus'}; - if (!defined($cpus)) { - printf $fd " CPU: no details available\n"; - } else { - for (my $i = 0; $i <= $#$cpus; ++$i) { - printf $fd " CPU %d: %s (%.1f bogomips)\n", - $i, $cpus->[$i]{'model'}, $cpus->[$i]{'bogo'}; - printf $fd " %s\n", $cpus->[$i]{'flags'}; - } - } - - if ($info->{'graphics'}) { - printf $fd " Graphics: %s\n", $info->{'graphics'}; - } - - # Display system load and usage info. - printf $fd " %s; runlevel %s\n\n", $info->{'load'}, $info->{'runlevel'}; -} - - -# Display the test scores from the given set of test results. -sub logResults { - my ( $results, $outFd ) = @_; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - - printf $outFd "%-40s %12.1f %-5s (%.1f s, %d samples)\n", - $bresult->{'msg'}, - $bresult->{'score'}, - $bresult->{'scorelabel'}, - $bresult->{'time'}, - $bresult->{'iterations'}; - } - - printf $outFd "\n"; -} - - -# Display index scores, if any, for the given run results. -sub logIndexCat { - my ( $results, $cat, $outFd ) = @_; - - my $total = $results->{'numIndex'}{$cat}; - my $indexed = $results->{'indexed'}{$cat}; - my $iscore = $results->{'index'}{$cat}; - my $full = $total == $indexed; - - # If there are no indexed scores, just say so. - if (!defined($indexed) || $indexed == 0) { - printf $outFd "No index results available for %s\n\n", - $testCats->{$cat}{'name'}; - return; - } - - # Display the header, depending on whether we have a full set of index - # scores, or a partial set. - my $head = $testCats->{$cat}{'name'} . - ($full ? " Index Values" : " Partial Index"); - printf $outFd "%-40s %12s %12s %8s\n", - $head, "BASELINE", "RESULT", "INDEX"; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - next if $bresult->{'cat'} ne $cat; - - if (defined($bresult->{'iscore'}) && defined($bresult->{'index'})) { - printf $outFd "%-40s %12.1f %12.1f %8.1f\n", - $bresult->{'msg'}, $bresult->{'iscore'}, - $bresult->{'score'}, $bresult->{'index'}; - } else { - printf $outFd "%-40s %12s %12.1f %8s\n", - $bresult->{'msg'}, "---", - $bresult->{'score'}, "---"; - } - } - - # Display the overall score. - my $title = $testCats->{$cat}{'name'} . " Index Score"; - if (!$full) { - $title .= " (Partial Only)"; - } - printf $outFd "%-40s %12s %12s %8s\n", "", "", "", "========"; - printf $outFd "%-66s %8.1f\n", $title, $iscore; - - printf $outFd "\n"; -} - - -# Display index scores, if any, for the given run results. -sub logIndex { - my ( $results, $outFd ) = @_; - - my $count = $results->{'indexed'}; - foreach my $cat (keys(%$count)) { - logIndexCat($results, $cat, $outFd); - } -} - - -# Dump the given run results into the given report file. -sub summarizeRun { - my ( $systemInfo, $results, $verbose, $reportFd ) = @_; - - # Display information about this test run. - printf $reportFd "------------------------------------------------------------------------\n"; - printf $reportFd "Benchmark Run: %s %s - %s\n", - strftime("%a %b %d %Y", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'end'})); - printf $reportFd "%s in system; running %s of tests\n", - number($systemInfo->{'numCpus'}, "CPU"), - number($results->{'copies'}, "parallel copy", "parallel copies"); - printf $reportFd "\n"; - - # Display the run scores. - logResults($results, $reportFd); - - # Display the indexed scores, if any. - logIndex($results, $reportFd); -} - - -# Write CSV Headers. -# e.g.: "Concurrency,Dhrystone 2 using register variables,Double-Precision Whetstone" -# -sub summarizeRunCsvHeader { - my ( $results, $reportFd ) = @_; - - # First col is for Concurrency value. - printf $reportFd "Concurrency"; - - # Write CSV Headers of test. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - printf $reportFd ",%s", $bresult->{'msg'}; - } - printf $reportFd "\n"; -} - -# Write CSV data rows per concurrency as "./Run -c 1 -c 2". -# e.g.: 1,33526940.9,3623.9 -# 2,30386997.8,3678.8 -# 4,31439797.3,3781.4 -# 8,32872262.9,3826.2 -sub summarizeRunCsvRows { - my ( $results, $reportFd) = @_; - - # Write concurrency value. - printf $reportFd "%d", $results->{'copies'}; - - # Write test results. - my $isFirstColumn = 1; - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - - printf $reportFd ",%.1f", $bresult->{'score'}; - $isFirstColumn = 0; - } - - printf $reportFd "\n"; -} - - - -############################################################################ -# HTML REPORTS -############################################################################ - -# Dump the given run results into the given report file. -sub runHeaderHtml { - my ( $systemInfo, $reportFd ) = @_; - - # Display information about this test run. - my $title = sprintf "Benchmark of %s / %s on %s", - $systemInfo->{'name'}, $systemInfo->{'system'}, - strftime("%a %b %d %Y", localtime()); - - print $reportFd < - - - - - $title - - - -EOF - - # Display information about this test run. - printf $reportFd "

%s

\n", $title; - printf $reportFd "

BYTE UNIX Benchmarks (Version %s)

\n\n", - $version; -} - - -# Display a banner indicating the configuration of the system under test -# to the given file desc. -sub displaySystemHtml { - my ( $info, $fd ) = @_; - - printf $fd "

Test System Information

\n"; - printf $fd "

\n"; - - # Display basic system info. - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'name'}, $info->{'system'}; - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'os'}, $info->{'osRel'}, $info->{'osVer'}; - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'mach'}, $info->{'platform'}; - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", $info->{'language'}; - printf $fd "\n"; - - # Get and display details on the CPUs, if possible. - my $cpus = $info->{'cpus'}; - if (!defined($cpus)) { - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd "\n"; - } else { - for (my $i = 0; $i <= $#$cpus; ++$i) { - printf $fd "\n"; - if ($i == 0) { - printf $fd " \n", $#$cpus + 1; - } - printf $fd " \n", $i; - printf $fd " \n", $cpus->[$i]{'flags'}; - printf $fd "\n"; - } - } - - # Display graphics hardware info. - if ($info->{'graphics'}) { - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", $info->{'graphics'}; - printf $fd "\n"; - } - - # Display system runlevel, load and usage info. - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n", - $info->{'load'}, $info->{'runlevel'}; - printf $fd "\n"; - - printf $fd "
System:%s: %s
OS:%s -- %s -- %s
Machine:%s: %s
Language:%s
CPUs:no details available
CPUs:%d:%s (%.1f bogomips)
\n", - $cpus->[$i]{'model'}, $cpus->[$i]{'bogo'}; - printf $fd " %s
Graphics:%s
Uptime:%s; runlevel %s

\n\n"; -} - - -# Display the test scores from the given set of test results -# for a given category of tests. -sub logCatResultsHtml { - my ( $results, $cat, $fd ) = @_; - - my $numIndex = $results->{'numIndex'}{$cat}; - my $indexed = $results->{'indexed'}{$cat}; - my $iscore = $results->{'index'}{$cat}; - my $full = defined($indexed) && $indexed == $numIndex; - - # If there are no results in this category, just ignore it. - if (!defined($results->{'numCat'}{$cat}) || - $results->{'numCat'}{$cat} == 0) { - return; - } - - # Say the category. If there are no indexed scores, just say so. - my $warn = ""; - if (!defined($indexed) || $indexed == 0) { - $warn = " — no index results available"; - } elsif (!$full) { - $warn = " — not all index tests were run;" . - " only a partial index score is available"; - } - printf $fd "

%s%s

\n", $testCats->{$cat}{'name'}, $warn; - - printf $fd "

\n"; - - printf $fd "\n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd " \n"; - printf $fd "\n"; - - # Display the individual test scores. - foreach my $bench (@{$results->{'list'}}) { - my $bresult = $results->{$bench}; - next if $bresult->{'cat'} ne $cat; - - printf $fd "\n"; - printf $fd " \n", $bresult->{'msg'}; - printf $fd " \n", - $bresult->{'score'}; - printf $fd " \n", - $bresult->{'scorelabel'}; - printf $fd " \n", - $bresult->{'time'}; - printf $fd " \n", - $bresult->{'iterations'}; - - if (defined($bresult->{'index'})) { - printf $fd " \n", - $bresult->{'iscore'}; - printf $fd " \n", - $bresult->{'index'}; - } - printf $fd "\n"; - } - - # Display the overall score. - if (defined($indexed) && $indexed > 0) { - my $title = $testCats->{$cat}{'name'} . " Index Score"; - if (!$full) { - $title .= " (Partial Only)"; - } - printf $fd "\n"; - printf $fd " \n", $title; - printf $fd " \n", $iscore; - printf $fd "\n"; - } - - printf $fd "
TestScoreUnitTimeIters.BaselineIndex
%s%.1f%s%.1f s%d%.1f%.1f
%s:%.1f

\n\n"; -} - - -# Display index scores, if any, for the given run results. -sub logResultsHtml { - my ( $results, $fd ) = @_; - - foreach my $cat (keys(%$testCats)) { - logCatResultsHtml($results, $cat, $fd); - } -} - - -# Dump the given run results into the given report file. -sub summarizeRunHtml { - my ( $systemInfo, $results, $verbose, $reportFd ) = @_; - - # Display information about this test run. - my $time = $results->{'end'} - $results->{'start'}; - printf $reportFd "


\n"; - printf $reportFd "

Benchmark Run: %s; %s

\n", - number($systemInfo->{'numCpus'}, "CPU"), - number($results->{'copies'}, "parallel process", "parallel processes"); - printf $reportFd "

Time: %s - %s; %dm %02ds

\n", - strftime("%H:%M:%S", localtime($results->{'start'})), - strftime("%H:%M:%S", localtime($results->{'end'})), - int($time / 60), $time % 60; - printf $reportFd "\n"; - - # Display the run scores. - logResultsHtml($results, $reportFd); -} - - -sub runFooterHtml { - my ( $reportFd ) = @_; - - print $reportFd <

-
No Warranties: This information is provided free of charge and "as -is" without any warranty, condition, or representation of any kind, -either express or implied, including but not limited to, any warranty -respecting non-infringement, and the implied warranties of conditions -of merchantability and fitness for a particular purpose. All logos or -trademarks on this site are the property of their respective owner. In -no event shall the author be liable for any -direct, indirect, special, incidental, consequential or other damages -howsoever caused whether arising in contract, tort, or otherwise, -arising out of or in connection with the use or performance of the -information contained on this web site.
- - -EOF -} - - -############################################################################ -# MAIN -############################################################################ - -sub main { - my @args = @_; - - my $params = parseArgs(@args); - my $verbose = $params->{'verbose'} || 1; - if ($params->{'iterations'}) { - $longIterCount = $params->{'iterations'}; - $shortIterCount = int(($params->{'iterations'} + 1) / 3); - $shortIterCount = 1 if ($shortIterCount < 1); - } - - # If no benchmark units have be specified, do "index". - my $tests = $params->{'tests'}; - if ($#$tests < 0) { - $tests = $index; - } - - # Create directories. - my @creatingDirectories = ( ${TMPDIR}, ${RESULTDIR} ); - createDirrectoriesIfNotExists(@creatingDirectories); - - preChecks(); - my $systemInfo = getSystemInfo(); - - # If the number of copies to run was not set, set it to 1 - # and the number of CPUs in the system (if > 1). - my $copies = $params->{'copies'}; - if (!$copies || scalar(@$copies) == 0) { - push(@$copies, 1); - if (defined($systemInfo->{'numCpus'}) && $systemInfo->{'numCpus'} > 1) { - push(@$copies, $systemInfo->{'numCpus'}); - } - } - - # Display the program banner. - system("cat \"${BINDIR}/unixbench.logo\""); - - # Show output output directories, if not in quiet mode. - if ($verbose > 0) { - printUsingDirectories(); - } - - if ($verbose > 1) { - printf "\n", join(", ", @$tests); - printf "Tests to run: %s\n", join(", ", @$tests); - } - - # Generate unique file names for the report and log file. - my $reportFile = logFile($systemInfo); - my $reportHtml = $reportFile . ".html"; - my $reportCsv = $reportFile . ".csv"; - my $logFile = $reportFile . ".log"; - - # If defined "UB_OUTPUT_CSV" on Environment, output csv file. - my $ubOutputCsv = $ENV{"UB_OUTPUT_CSV"}; - my $isOutputFormatCsv = defined($ubOutputCsv) && $ubOutputCsv eq "true"; - # If write CSV, header needs only once. - my $is_csv_header_written = 0; - - # Open the log file for writing. - open(my $reportFd, ">", $reportFile) || - die("Run: can't write to $reportFile\n"); - open(my $reportFd2, ">", $reportHtml) || - die("Run: can't write to $reportHtml\n"); - my $reportFd_Csv; - if ($isOutputFormatCsv) { - open($reportFd_Csv, ">", $reportCsv) || - die("Run: can't write to $reportCsv\n"); - } - - printf $reportFd " BYTE UNIX Benchmarks (Version %s)\n\n", $version; - runHeaderHtml($systemInfo, $reportFd2); - - # Dump information about the system under test. - displaySystem($systemInfo, $reportFd); - displaySystemHtml($systemInfo, $reportFd2); - - # Run the tests! Do a test run once for each desired number of copies; - # for example, on a 2-CPU system, we may do a single-processing run - # followed by a dual-processing run. - foreach my $c (@$copies) { - if ($verbose > 1) { - printf "Run with %s\n", number($c, "copy", "copies"); - } - my $results = runTests($tests, $verbose, $logFile, $c); - - summarizeRun($systemInfo, $results, $verbose, $reportFd); - summarizeRunHtml($systemInfo, $results, $verbose, $reportFd2); - - if ($isOutputFormatCsv) { - if ( $is_csv_header_written == 0 ) { - summarizeRunCsvHeader($results, $reportFd_Csv); - $is_csv_header_written = 1; - } - summarizeRunCsvRows($results, $reportFd_Csv); - } - } - - runFooterHtml($reportFd2); - - # Finish the report. - close($reportFd); - close($reportFd2); - if ($isOutputFormatCsv) { - close($reportFd_Csv); - } - - # Display the report, if not in quiet mode. - if ($verbose > 0) { - printf "\n"; - printf "========================================================================\n"; - system("cat \"$reportFile\""); - } - - 0; -} - - -exit(main(@ARGV)); - diff --git a/rk3308/unixbench-master_32/USAGE b/rk3308/unixbench-master_32/USAGE deleted file mode 100755 index 29e5785..0000000 --- a/rk3308/unixbench-master_32/USAGE +++ /dev/null @@ -1,400 +0,0 @@ -Running the Tests -================= - -All the tests are executed using the "Run" script in the top-level directory. - -The simplest way to generate results is with the commmand: - ./Run - -This will run a standard "index" test (see "The BYTE Index" below), and -save the report in the "results" directory, with a filename like - hostname-2007-09-23-01 -An HTML version is also saved. - -If you want to generate both the basic system index and the graphics index, -then do: - ./Run gindex - -If your system has more than one CPU, the tests will be run twice -- once -with a single copy of each test running at once, and once with N copies, -where N is the number of CPUs. Some categories of tests, however (currently -the graphics tests) will only run with a single copy. - -Since the tests are based on constant time (variable work), a "system" -run usually takes about 29 minutes; the "graphics" part about 18 minutes. -A "gindex" run on a dual-core machine will do 2 "system" passes (single- -and dual-processing) and one "graphics" run, for a total around one and -a quarter hours. - -============================================================================ - -Detailed Usage -============== - -The Run script takes a number of options which you can use to customise a -test, and you can specify the names of the tests to run. The full usage -is: - - Run [ -q | -v ] [-i ] [-c [-c ...]] [test ...] - -The option flags are: - - -q Run in quiet mode. - -v Run in verbose mode. - -i Run iterations for each test -- slower tests - use / 3, but at least 1. Defaults to 10 (3 for - slow tests). - -c Run copies of each test in parallel. - -The -c option can be given multiple times; for example: - - ./Run -c 1 -c 4 - -will run a single-streamed pass, then a 4-streamed pass. Note that some -tests (currently the graphics tests) will only run in a single-streamed pass. - -The remaining non-flag arguments are taken to be the names of tests to run. -The default is to run "index". See "Tests" below. - -When running the tests, I do *not* recommend switching to single-user mode -("init 1"). This seems to change the results in ways I don't understand, -and it's not realistic (unless your system will actually be running in this -mode, of course). However, if using a windowing system, you may want to -switch to a minimal window setup (for example, log in to a "twm" session), -so that randomly-churning background processes don't randomise the results -too much. This is particularly true for the graphics tests. - - -Output can be specified by setting the following environment variables: - - * "UB_RESULTDIR" : Absolute path of output directory of result files. - * "UB_TMPDIR" : Absolute path of temporary files for IO tests. - * "UB_OUTPUT_FILE_NAME" : Output file name. If exists it will be overwritten. - * "UB_OUTPUT_CSV" : If set "true", output results(score only) to .csv. -============================================================================ - -Tests -===== - -The available tests are organised into categories; when generating index -scores (see "The BYTE Index" below) the results for each category are -produced separately. The categories are: - - system The original Unix system tests (not all are actually - in the index) - 2d 2D graphics tests (not all are actually in the index) - 3d 3D graphics tests - misc Various non-indexed tests - -The following individual tests are available: - - system: - dhry2reg Dhrystone 2 using register variables - whetstone-double Double-Precision Whetstone - syscall System Call Overhead - pipe Pipe Throughput - context1 Pipe-based Context Switching - spawn Process Creation - execl Execl Throughput - fstime-w File Write 1024 bufsize 2000 maxblocks - fstime-r File Read 1024 bufsize 2000 maxblocks - fstime File Copy 1024 bufsize 2000 maxblocks - fsbuffer-w File Write 256 bufsize 500 maxblocks - fsbuffer-r File Read 256 bufsize 500 maxblocks - fsbuffer File Copy 256 bufsize 500 maxblocks - fsdisk-w File Write 4096 bufsize 8000 maxblocks - fsdisk-r File Read 4096 bufsize 8000 maxblocks - fsdisk File Copy 4096 bufsize 8000 maxblocks - shell1 Shell Scripts (1 concurrent) (runs "looper 60 multi.sh 1") - shell8 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 8") - shell16 Shell Scripts (8 concurrent) (runs "looper 60 multi.sh 16") - - 2d: - 2d-rects 2D graphics: rectangles - 2d-lines 2D graphics: lines - 2d-circle 2D graphics: circles - 2d-ellipse 2D graphics: ellipses - 2d-shapes 2D graphics: polygons - 2d-aashapes 2D graphics: aa polygons - 2d-polys 2D graphics: complex polygons - 2d-text 2D graphics: text - 2d-blit 2D graphics: images and blits - 2d-window 2D graphics: windows - - 3d: - ubgears 3D graphics: gears - - misc: - C C Compiler Throughput ("looper 60 $cCompiler cctest.c") - arithoh Arithoh (huh?) - short Arithmetic Test (short) (this is arith.c configured for - "short" variables; ditto for the ones below) - int Arithmetic Test (int) - long Arithmetic Test (long) - float Arithmetic Test (float) - double Arithmetic Test (double) - dc Dc: sqrt(2) to 99 decimal places (runs - "looper 30 dc < dc.dat", using your system's copy of "dc") - hanoi Recursion Test -- Tower of Hanoi - grep Grep for a string in a large file, using your system's - copy of "grep" - sysexec Exercise fork() and exec(). - -The following pseudo-test names are aliases for combinations of other -tests: - - arithmetic Runs arithoh, short, int, long, float, double, - and whetstone-double - dhry Alias for dhry2reg - dhrystone Alias for dhry2reg - whets Alias for whetstone-double - whetstone Alias for whetstone-double - load Runs shell1, shell8, and shell16 - misc Runs C, dc, and hanoi - speed Runs the arithmetic and system groups - oldsystem Runs execl, fstime, fsbuffer, fsdisk, pipe, context1, - spawn, and syscall - system Runs oldsystem plus shell1, shell8, and shell16 - fs Runs fstime-w, fstime-r, fstime, fsbuffer-w, - fsbuffer-r, fsbuffer, fsdisk-w, fsdisk-r, and fsdisk - shell Runs shell1, shell8, and shell16 - - index Runs the tests which constitute the official index: - the oldsystem group, plus dhry2reg, whetstone-double, - shell1, and shell8 - See "The BYTE Index" below for more information. - graphics Runs the tests which constitute the graphics index: - 2d-rects, 2d-ellipse, 2d-aashapes, 2d-text, 2d-blit, - 2d-window, and ubgears - gindex Runs the index and graphics groups, to generate both - sets of index results - - all Runs all tests - - -============================================================================ - -The BYTE Index -============== - -The purpose of this test is to provide a basic indicator of the performance -of a Unix-like system; hence, multiple tests are used to test various -aspects of the system's performance. These test results are then compared -to the scores from a baseline system to produce an index value, which is -generally easier to handle than the raw sores. The entire set of index -values is then combined to make an overall index for the system. - -Since 1995, the baseline system has been "George", a SPARCstation 20-61 -with 128 MB RAM, a SPARC Storage Array, and Solaris 2.3, whose ratings -were set at 10.0. (So a system which scores 520 is 52 times faster than -this machine.) Since the numbers are really only useful in a relative -sense, there's no particular reason to update the base system, so for the -sake of consistency it's probably best to leave it alone. George's scores -are in the file "pgms/index.base"; this file is used to calculate the -index scores for any particular run. - -Over the years, various changes have been made to the set of tests in the -index. Although there is a desire for a consistent baseline, various tests -have been determined to be misleading, and have been removed; and a few -alternatives have been added. These changes are detailed in the README, -and should be born in mind when looking at old scores. - -A number of tests are included in the benchmark suite which are not part of -the index, for various reasons; these tests can of course be run manually. -See "Tests" above. - - -============================================================================ - -Graphics Tests -============== - -As of version 5.1, UnixBench now contains some graphics benchmarks. These -are intended to give a rough idea of the general graphics performance of -a system. - -The graphics tests are in categories "2d" and "3d", so the index scores -for these tests are separate from the basic system index. This seems -like a sensible division, since the graphics performance of a system -depends largely on the graphics adaptor. - -The tests currently consist of some 2D "x11perf" tests and "ubgears". - -* The 2D tests are a selection of the x11perf tests, using the host - system's x11perf command (which must be installed and in the search - path). Only a few of the x11perf tests are used, in the interests - of completing a test run in a reasonable time; if you want to do - detailed diagnosis of an X server or graphics chip, then use x11perf - directly. - -* The 3D test is "ubgears", a modified version of the familiar "glxgears". - This version runs for 5 seconds to "warm up", then performs a timed - run and displays the average frames-per-second. - -On multi-CPU systems, the graphics tests will only run in single-processing -mode. This is because the meaning of running two copies of a test at once -is dubious; and the test windows tend to overlay each other, meaning that -the window behind isn't actually doing any work. - - -============================================================================ - -Multiple CPUs -============= - -If your system has multiple CPUs, the default behaviour is to run the selected -tests twice -- once with one copy of each test program running at a time, -and once with N copies, where N is the number of CPUs. (You can override -this with the "-c" option; see "Detailed Usage" above.) This is designed to -allow you to assess: - - - the performance of your system when running a single task - - the performance of your system when running multiple tasks - - the gain from your system's implementation of parallel processing - -The results, however, need to be handled with care. Here are the results -of two runs on a dual-processor system, one in single-processing mode, one -dual-processing: - - Test Single Dual Gain - -------------------- ------ ------ ---- - Dhrystone 2 562.5 1110.3 97% - Double Whetstone 320.0 640.4 100% - Execl Throughput 450.4 880.3 95% - File Copy 1024 759.4 595.9 -22% - File Copy 256 535.8 438.8 -18% - File Copy 4096 1261.8 1043.4 -17% - Pipe Throughput 481.0 979.3 104% - Pipe-based Switching 326.8 1229.0 276% - Process Creation 917.2 1714.1 87% - Shell Scripts (1) 1064.9 1566.3 47% - Shell Scripts (8) 1567.7 1709.9 9% - System Call Overhead 944.2 1445.5 53% - -------------------- ------ ------ ---- - Index Score: 678.2 1026.2 51% - -As expected, the heavily CPU-dependent tasks -- dhrystone, whetstone, -execl, pipe throughput, process creation -- show close to 100% gain when -running 2 copies in parallel. - -The Pipe-based Context Switching test measures context switching overhead -by sending messages back and forth between 2 processes. I don't know why -it shows such a huge gain with 2 copies (ie. 4 processes total) running, -but it seems to be consistent on my system. I think this may be an issue -with the SMP implementation. - -The System Call Overhead shows a lesser gain, presumably because it uses a -lot of CPU time in single-threaded kernel code. The shell scripts test with -8 concurrent processes shows no gain -- because the test itself runs 8 -scripts in parallel, it's already using both CPUs, even when the benchmark -is run in single-stream mode. The same test with one process per copy -shows a real gain. - -The filesystem throughput tests show a loss, instead of a gain, when -multi-processing. That there's no gain is to be expected, since the tests -are presumably constrained by the throughput of the I/O subsystem and the -disk drive itself; the drop in performance is presumably down to the -increased contention for resources, and perhaps greater disk head movement. - -So what tests should you use, how many copies should you run, and how should -you interpret the results? Well, that's up to you, since it depends on -what it is you're trying to measure. - -Implementation --------------- - -The multi-processing mode is implemented at the level of test iterations. -During each iteration of a test, N slave processes are started using fork(). -Each of these slaves executes the test program using fork() and exec(), -reads and stores the entire output, times the run, and prints all the -results to a pipe. The Run script reads the pipes for each of the slaves -in turn to get the results and times. The scores are added, and the times -averaged. - -The result is that each test program has N copies running at once. They -should all finish at around the same time, since they run for constant time. - -If a test program itself starts off K multiple processes (as with the shell8 -test), then the effect will be that there are N * K processes running at -once. This is probably not very useful for testing multi-CPU performance. - - -============================================================================ - -The Language Setting -==================== - -The $LANG environment variable determines how programs abnd library -routines interpret text. This can have a big impact on the test results. - -If $LANG is set to POSIX, or is left unset, text is treated as ASCII; if -it is set to en_US.UTF-8, foir example, then text is treated as being -encoded in UTF-8, which is more complex and therefore slower. Setting -it to other languages can have varying results. - -To ensure consistency between test runs, the Run script now (as of version -5.1.1) sets $LANG to "en_US.utf8". - -This setting which is configured with the variable "$language". You -should not change this if you want to share your results to allow -comparisons between systems; however, you may want to change it to see -how different language settings affect performance. - -Each test report now includes the language settings in use. The reported -language is what is set in $LANG, and is not necessarily supported by the -system; but we also report the character mapping and collation order which -are actually in use (as reported by "locale"). - - -============================================================================ - -Interpreting the Results -======================== - -Interpreting the results of these tests is tricky, and totally depends on -what you're trying to measure. - -For example, are you trying to measure how fast your CPU is? Or how good -your compiler is? Because these tests are all recompiled using your host -system's compiler, the performance of the compiler will inevitably impact -the performance of the tests. Is this a problem? If you're choosing a -system, you probably care about its overall speed, which may well depend -on how good its compiler is; so including that in the test results may be -the right answer. But you may want to ensure that the right compiler is -used to build the tests. - -On the other hand, with the vast majority of Unix systems being x86 / PC -compatibles, running Linux and the GNU C compiler, the results will tend -to be more dependent on the hardware; but the versions of the compiler and -OS can make a big difference. (I measured a 50% gain between SUSE 10.1 -and OpenSUSE 10.2 on the same machine.) So you may want to make sure that -all your test systems are running the same version of the OS; or at least -publish the OS and compuiler versions with your results. Then again, it may -be compiler performance that you're interested in. - -The C test is very dubious -- it tests the speed of compilation. If you're -running the exact same compiler on each system, OK; but otherwise, the -results should probably be discarded. A slower compilation doesn't say -anything about the speed of your system, since the compiler may simply be -spending more time to super-optimise the code, which would actually make it -faster. - -This will be particularly true on architectures like IA-64 (Itanium etc.) -where the compiler spends huge amounts of effort scheduling instructions -to run in parallel, with a resultant significant gain in execution speed. - -Some tests are even more dubious in terms of host-dependency -- for example, -the "dc" test uses the host's version of dc (a calculator program). The -version of this which is available can make a huge difference to the score, -which is why it's not in the index group. Read through the release notes -for more on these kinds of issues. - -Another age-old issue is that of the benchmarks being too trivial to be -meaningful. With compilers getting ever smarter, and performing more -wide-ranging flow path analyses, the danger of parts of the benchmarks -simply being optimised out of existance is always present. - -All in all, the "index" and "gindex" tests (see above) are designed to -give a reasonable measure of overall system performance; but the results -of any test run should always be used with care. - diff --git a/rk3308/unixbench-master_32/WRITING_TESTS b/rk3308/unixbench-master_32/WRITING_TESTS deleted file mode 100755 index 28cd968..0000000 --- a/rk3308/unixbench-master_32/WRITING_TESTS +++ /dev/null @@ -1,133 +0,0 @@ -Writing a Test -============== - -Writing a test program is pretty easy. Basically, a test is configured via -a monster array in the Run script, which specifics (among other things) the -program to execute and the parameters to pass it. - -The test itself is simply a program which is given the optional parameters -on the command line, and produces logging data on stdout and its results on -stderr. - - -============================================================================ - -Test Configuration -================== - -In Run, all tests are named in the "$testList" array. This names the -individual tests, and also sets up aliases for groups of tests, eg. "index". - -The test specifications are in the "$testParams" array. This contains the -details of each individual test as a hash. The fields in the hash are: - - * "logmsg": the full name to display for this test. - * "cat": the category this test belongs to; must be configured - in $testCats. - * "prog": the name of the program to execute; defaults to the name of - the benchmark. - * "repeat": number of passes to run; either 'short' (the default), - 'long', or 'single'. For 'short' and 'long', the actual numbers of - passes are given by $shortIterCount and $longIterCount, which are - configured at the top of the script or by the "-i" flag. 'single' - means just run one pass; this should be used for test which do their - own multi-pass handling internally. - * "stdout": non-0 to add the test's stdout to the log file; defaults to 1. - Set to 0 for tests that are too wordy. - * "stdin": name of a file to send to the program's stdin; default null. - * "options": options to be put on the program's command line; default null. - - -============================================================================ - -Output Format -============= - -The results on stderr take the form of a line header and fields, separated -by "|" characters. A result line can be one of: - - COUNT|score|timebase|label - TIME|seconds - ERROR|message - -Any other text on stderr is treated as if it were: - - ERROR|text - -Any output to stdout is placed in a log file, and can be used for debugging. - -COUNT ------ - -The COUNT line is the line used to report a test score. - - * "score" is the result, typically the number of loops performed during - the run - * "timebase" is the time base used for the final report to the user. A - value of 1 reports the score as is; a value of 60, for example, divides - the time taken by 60 to get loops per minute. Atimebase of zero indicates - that the score is already a rate, ie. a count of things per second. - * "label" is the label to use for the score; like "lps" (loops per - second), etc. - -TIME ----- - -The TIME line is optionally used to report the time taken. The Run script -normally measures this, but if your test has signifant overhead outside the -actual test loop, you should use TIME to report the time taken for the actual -test. The argument is the time in seconds in floating-point. - -ERROR ------ - -The argument is an error message; this will abort the benchmarking run and -display the message. - -Any output to stderr which is not a formatted line will be treated as an -error message, so use of ERROR is optional. - - -============================================================================ - -Test Examples -============= - -Iteration Count ---------------- - -The simplest thing is to count the number of loops executed in a given time; -see eg. arith.c. The utlilty functions in timeit.c can be used to implement -the fixed time interval, which is generally passed in on the command line. - -The result is reported simply as the number of iterations completed: - - fprintf(stderr,"COUNT|%lu|1|lps\n", iterations); - -The bnenchmark framework will measure the time taken itself. If the test -code has significant overhead (eg. a "pump-priming" pass), then you should -explicitly report the time taken for the test by adding a line like this: - - fprintf(stderr, "TIME|%.1f\n", seconds); - -If you want results reported as loops per minute, then set timebase to 60: - - fprintf(stderr,"COUNT|%lu|60|lpm\n", iterations); - -Note that this only affects the final report; all times passed to or -from the test are still in seconds. - -Rate ----- - -The other technique is to calculate the rate (things per second) in the test, -and report that directly. To do this, just set timebase to 0: - - fprintf(stderr, "COUNT|%ld|0|KBps\n", kbytes_per_sec); - -Again, you can use TIME to explicitly report the time taken: - - fprintf(stderr, "TIME|%.1f\n", end - start); - -but this isn't so important since you've already calculated the rate. - diff --git a/rk3308/unixbench-master_32/pgms/arithoh b/rk3308/unixbench-master_32/pgms/arithoh deleted file mode 100755 index 21a9749f2c18c3a55bccfbb88176136b8637c8f7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10284 zcmeHNYit}>6~42(PVCrq*NO9J6T&1;z@^!Ty^d?AE%+5XX9jzI*O-?%aE3Kf7`3sHSN`Q9yJGYF0=SJxEil@h2o2dW0^TU|TEJ z&|Z46L5LR{G?yCzA%{Gkhm>V|fF$cEKt>T46GS==eH`_YuYgWwd1Skc#FI2}4h6|9 zPb)Evmy-i7o5*m$te5rx{w{-k8SLb<(8;u`HkN=dp>DE{e6q6LbjC?R{U>7iFnA~I z{`xNvC4^%J4w(n+EtzOC;n`ZEQa1(d| zSP{#xZzR@5;+U{q9DkOrrw{lt7DDc>)7ch9v|p_hq}11dOW}Z9Uz=De`s?hn2QpJf z!nP}7`7Wg(rM|RI7Z|G~N2ga==17TM%PgHxCUP-O(P{CiBIDXERV^!1qsSawTz0)flvAvzO7j#cN|6 z{a%cTVE@}O&@D)ppg@=@CZe4w27MV*OvnnRm<*jM%B^9FiRomD$y~=26WTZsd3mD{ zuQVPwaW-^1awc^8@R`=r2TwPi`{|id3o7`D8I%rw7kZg-)Iigf?Xs ztQM=PGe4RZ9Sx%c=euqKPHWXWZmZ55o$eg1&irJ$BcmlcZhPw`@#O2Tg*ObJJ77PD zv@|Bfj&q_J+!{E3`)KH>cJ!|o!Y8*~2>m9wDYT_J^X=*C%(3YoU@vLFp7Tkh?eW8B z+V#U{g7Jf=1Nwb0;fZ@;Wi_V#s#^Wqt<9Q*y$jPlzWS*cu-T zZ3<=)CxF zbnOkJgQI<;hsM4;^ycqQoW1ac`GZQB_Deq1!w+PI7->p`QZ^M5TDcgAt>5I_+ zUE=*%(E-%+eyp%69q8=1O6MQ5oOLqMgkS5Ca>x0?M+TZQrWVz?<&1F+7pe7!H=OT z(=sQrLR$~9VHh_lngR>vw*(i47HRF;Vr^+~nRbPCWnhK2TKkAL z66D0lE^=xRbif3+x{~~}9f`EanQs}xARhApC57C|8WDJOeSN=Vj-QKjPZ6Xl4U1t+D_i& zWVkC>Gz8f0;7V;tEEWjD5!W=D{jDvRxoaF*wV^>=LLctxk`tgu^2t)=`lK9n zGqD~S>0HrtBAAlvGiEu(3?K?tu@Fg@81LBIhdi21vy`-D90WN|rjnmvnzJ#-X3k1F z*H0EIdSp_M?6ASzGEGl#m1GN(D9H}$k#wOL!A(E8CsHaG9h57Uk$L0hVQI=15NfeR zEK|%|F*})wmG>kP@xGYrhpR1CtT?5L6Dt>!dy<)K$&dge(QTBioMqaU#KME^U?Cyd*1sgi+DKRfZpzq~)a3)h zeM{;xlEi&W>hb~MekOJ9ZixTYbVi_r;C@63y3s4SZ%LgymfX*z&d8SBSEcS=T?O|Z zsmmx*$m>whTcO;4rJcJmp3<5wA85uSNkM_&AL!MZE+ahLw@E>FSC#v=)VaH2@2%On z%gXp6)1bR6L-67TL2vi&tLaM!bK^uzgR`Ps)R!^4MnU&2Qd42)TjU}I-M5Qh=O%WM zg2ErwqyLo`jgW%kgLS%Ir?2u>fXohw*QR8-dRA z23IcoLw|gTB}soRwRdxy>rbv?B36o<>-;65tM#kllt6#SeEv$XKjGo3c{?X!Oo<<(hA)fcXfM~ip>cE;M+V-3#>{qY@o(H9C&}mom#Zw*Q$V%t@`HAgW4Z9i-_9p>do!t;I71u#m{iA&cbhTe; z*su6JsQs0vP}YA>ZT&a9_WON0`*+Nzv;1+NZoxlewj8#FSicq4=Mi_)m!PZXTgUUT zQ@%o61nqahp7q8*jQRgD^!+~ltF!D&#DmZkvHTCA--7yiUS|D8=<0c2i0Ail=zf1c zt1JHp=>G2-XP~S0hSA>hb@t}NhZ+dBlE`yaF0 z@YLT_XHP+A?E5;{*`Fxt_qYEQ==>%}qTK=BZ{H6+@A;?wF6jR8y$?F$>pbq#ehB&$ z`hx`RSt`B>LB++{{_jKYg1=4=e}q7f*q;J)wcjq}?dHF5_?qMworwI~9qsOoboKN` zCR2%cJl@llT%SmoiBw`HJP;?t86%VBbh4}EE|Ya z(U{B?C(N9Y!X3dj%*vEV7W1W?+ z`TRZ<@o5H-&Mc^vOGlXLHga_5;I@s%#_c0$rEId2XO&9{W8{|YgWEO_1Ld>0D?}J$ zTXzf%ZZ&p{j_%qxZj28OZQV$Bd_oK?akshN_tAj?|Ci4R+pc~1G^~{AnBF&4KkN>3 zk7eX7I3(OEq|jFP^Qs>tKfC&KFuw37-K)l2`~d4m?iwE6l|>h4e~3kSyJ%$0LMmsu zpL_j9jLkdX8P`6eVp}Qoop%=EV)r><@Ge`9IhsIGtP-z7atz&%!da{k0-?^F;TYmD$TuQdxhNRK^ZVkO3W3}0O{9rtKJ&#pE9P#HyqCt^0y)KcOj~zeESK} zfed_JcX7P4FgT+t5K{H3;%^UlQ>8!NZSMhY0J5?(C0_&PjgvUuc~1d%1TxD)bTK@8 zGZ@2T0dc$wAApd8tcX|=K6$%wIpTOneh~t1h^k)ZlgGh~<5A|_`E3X~WPiO-MH0*# zGjWUsu0SK$9_5cIZ`e67aYJCn4%Y#<2{P@BH!55OGTX{qJKy0BaEu8lQhrOwP&gSE z6yi<=LP{L@Q(%STohXFNN0{LeF{ICd6^>`MBj~7%47g_37p1#A9Q(^N(=p)I(cqEt zcMv-Kl|GMD{uW94Q+-jmZ%CXwemsM}3V+H*mr8yJGVfy;4)-kW{>BHgBI3wg8;mdz z$1`~tit>RhSEha(%>EL`vwiy|a4?Cdz?=)>VdK#?!xo3MU0{+fQ z5pm?-f)$Qu+CJd?zr#^u{zu>_vtFKsv%smbLso5W{tTJ^;>-jQ*C$2yFKy%(!0=Ua zGx6tJzzrw`DKVrsz)Gh69B?`XkCct!Bqh_NAsD_>fv9_OYaMPEaHE&Nt*FC&9k@?i z0@qoG`#W&D563Ykt@m(osfuNVhg13A!S!y9QalXREp@nG0*9fg5q*)E v^4U=I3~(h(WCt?RV78CtxEFA46>UNo=_%ECWGa8?-Yi_xD%`D<(TDp7LYWiP diff --git a/rk3308/unixbench-master_32/pgms/context1 b/rk3308/unixbench-master_32/pgms/context1 deleted file mode 100755 index d1a678121f04e452f223de5aa4753e53253684cf..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10528 zcmeHNe{fXCecyL?0)d1g0R~|ReIU__hWjB-$dVk_5)vmc0$dQSJx=HO>F%BG%<1mt z?j06WQq#l}la6N~6WV&}O#F}2p^clu4DQewi1AFObv7@V)(h_xsuJ{(QUp-PJRD#uA2M2ps~VOHi6xZipVlk9FWD zBpP~zDONzYO>|OUe0hZsFRw5pH3C8kd9(~M^Y#EqGLeDQ;OT>i$03iST+#-}B<4r5 zBt#x(hzlr-#Qf9})A$KeK+;JhJP%^I)Ccg}4t+cHq+5_l)ayDHfiI$Ll8JPZ*8Tbo zHwE!00`s?m4nyxRe*tMEUH@n)Q@nZDgQ6?>x zOTG=X8q`by0nLzUQt*D**X%>~2W{!F5=;{aT_sv{;?IcLPYkR#1M9S0a;mRZd3=2o z@g=aXi8^^xoqV`XzOPQ+5BYR!K-Qy8tQU{f>8I-CFZyH?Jlm`T>*HtvG38x#^5b=~ zzYmG$l-GfBmlhCHw(4Yo@lJAtdxbNdbwx2-bVSi9l?o+M&Q9j-oJbc+$AmpmD7m8S zmfS*4SQY^Jf|V;IZ8uxUi;`ofpiCiI*g4D^k$$?Absdq+70QlC7fac^n?^pCW|dvL zoB~;rA{J@qXi}tG7b4;TxgZmu|Goo|$dD{mmOaO{myk zo%w$2o6mp0=c6k(G{3{gcY>cDy_V3rcAu`*$2aC-)d~B&3~h6n6>#U)u9j1 zhM*6v+`2a-kp8)<=x9g`-sv}hHd@S%Cu{T1R=X0l`O{Tn%PQnKSq+@8&0nq#UL3r0 zTiUG6pOV)8pc;baPphKqgX^{VSE}0*3l{?~)aEZ$8}NIf+B_Rb7=IE1KVA)WJ$&7G zackzwyN_Kr;vG+hwr)=Zq4@$d@6Q;q@(k;jt1|6em3huqSIw#%@#}%a_5^L_fgZTt zkx>>_y&o`G-Y+Bdy&2Ip20PzHO@5;q%*1bpf^W?X-WhLYFVyC{s(*h|bUg^fn?Q_s zh;ISW?jhDyuiixOp&tT9V`lJ9rj0$qoB`C#tQxUDL(VSbG(WX|hW5XP+-JSq3v*vx zl6!}jd!@`Rssc7<=fv&LF4X3iNgF?sHa-saX!CdRW@al6b1^y|tKX|cy6y?13EWVZJKnx9g zm+cF5thoP+tzY`+2OqRu40OkH~wX9{_UF4(NS!@DqedYy2e*SdxM!Z zGS3U6S8p{A{^QNH*iRW6U&ZVTpnn5;maaxWUz0PhJyV-^=h#b6&B1n}1>E4AnYenZ zbr9S)!|yE&Hyvy~@H-=)9XY;ndE#{9OPHgp=l-H~#f<1QdX6rH9(*J*l;}&mF#5;4 z?_9rnYvJ=}ml@&wEs4%mv>23#2f!vdxmTDy>PDw?)AXE9=O*7zxW>b zB=*zag3sH4n1R0}`e7GBhM+HW2|D_z^gA~)2Hg*8g-oA^eiZ2}d`kLeBSAy@oSgwf zaPxTqL|=txZS4cmj}zEyxvQQ6tp(8sTn~z&T=FJj`W?)-2E=lIT&ooX(rbuW&)bN< zhw}cnf&Q@$S77hwA#I28U{}X>lRw@r+#N{{#3OMt95#1FyLUzVyUp(Yfp~17yHAV` z4-c4KyDQmTs#L%xx--_@7m4i*my+>tEY{t#Gd!7mpu4v>IT21}OYVv7W^be?5)(rY zqafR{C$hqzkJm5^|H_#T55;$hWi88x8jTf!mCKuht3zvywZ?tM`e3`U!MH!L$=GWA zsxcDen5out)P(p9#^3KZA4Hncpkol4M-jpKSP}xfNia6+yvA1^fjams6HT%*ghI;) zXu@bSwzaHj3bt%)Il8{hXvHW8B1W)fpy|P;U)j)RtX?+Q{)f9a7#QVXk5K9R)&u|x zb&95@Uqen<`Atwju<-%oqn1?y_cziw>_;L|^&d{+WHK2=@29hqQF@aiS#r}j!A#k1 zCX$qsTQme%@8D)*T{Id9!jPQrm{T>JtxmQyuUfo^(Cx#Wg*n{atwz8MPbG_$hmxw> z?f5Pvq;mz^4P#6m%Gjk86M)D&g?u<&$N=STk->_0;^SFj_ zL^M;Fa-!vACR#cckH`9=(hjFvv`}%26*pQcB#$LC*`lQYR(z*ba&nGcb`%yC$}VPN zGMvuBr4P$F=&8`Iu+HyY`=IiinVB?igOE#9^1KUhQYPGZ@zcjCx^$Q<3%0GY<60a$ zIQ(t~R{`Z@$tvTJDW1ger9jI?SqzSJZh48qQy@be!TgYcL&v0_8F&xJfJq=@-is+& zeIZ=alq^>)!8J|E>I>mor(~{tLg=+9=-tY3tzrmzH%wg9l+4vkt#wM~?Uh<{l`N0# za$Qogy6rg!A4$pTri^Q|lDSd~ab;0fUo`J38G`agO^8<(Wv;4fO=leRZriw~E14@V z*5yT+E3mp-WE_+$HW<&B3VJL1ql@x7!sPu4Y^-*+fZgGM&%Y9Dw zH*YYR|Ey0RQd3A;(f0Lt@#d0v-G4%?6Fb$=E#l8Z{)*4u*L`vTD;D!#_UVb|9CMj~}rL%3ky0zTGD6saZ_KX7NOwy)2?3qC#DH|}dM^M@AO+bm8azdmQ_^4fj+uR_oJ2e#P2_dh*Z>#J2%@Epz<*f%A16^twNIdhCU)&vm#(5)VVx?W6uM zWW8P)Sg+2bzC8D-&?L+MTc1q*j8A6!e&~~_U+~Ep*ylYKdvG=8?=;Kv0$0<`*fR9@ zZQ^_QJ?IzjqB!1l_%|V+^TwkU`twWh>%^be;a`VrqkNvFS$+w!{@z#Pd;c5AetU1# z<^R_@c>%KjePX5B@??wHo=v{?w4y!R>+}i8{Qh?U+XnpspWZ~;XCUkEr7f^dWPN(t zKLMHRBa_>(Dx9v_SoJw$o;5)msg*5b%bE`U|)!@A-`U~SHqLq zUZ?*>9PTFhsuEVeJ0m-L!`(f-;mK4y7K``uiu6$^KnE zy}fuJY6W~KB02ertf7Iz)eI*;h%j@qbs*{!70aF{IXk>SJE*uEp< zmL>2?=S*gKZEU$y7GCJ`P8o<)!J5n!ChVM*f*Vq{?8-FCn=0lU*GWaXWAWa;k4IVX zDY7`~m+TXklXpueM7m^8IaaDNHFW}6e3AvEy9Daw(qSgbN+u2u9oS>-IXHq^sw!)K zNxqb@Mjk&nbYR~wP(F>EB*GdUJG6Uf%sP}v9N9B&jSuY}+e35wNG!~9SzYfXb8yiA z069@EFTOwW27HOuN!hOLJ&F1iPus^FYsx_Z%W^a zV}deOm2?B{zMcA!M}~)wWYH~4-%ydiT(B~BK9zIi>#jeGweJut!yC0KWjrK!Pqa%A zce{+84C_l@eV1ZY%T+I^fshZpe6#Qa@hDu9FUtNz%Njp0?2UWLDe|RRzBl_Z@+H}y zAR^@xQ?5OM7_ZVY&S(y9s#7Y8h+NEucJB+j_N0hp>~cm#QYZ4rsA9JyB5-yi$DLA{ z@7R99f(}kJ@ro(t;QPv+i12MjL^$uED#)1;89IC*f+zIUw1_wvHR%;x&36UY{8LF- zhh=*zn?$JvSW{Loi9`gS-4wngA#K{OTgpyUTzsUFlrvG8gbh1Cc~{Kl(}hLV#6-zC zt|fVU;AN6up;n-uG>bQU&3b0#-t|BEp*P2!m}e#R_b}>jVRT-#ORK+0;NNBtM_-J$ zPNoJS)>~{hcy5ox(Puvk+&Q0~IMM?kdeX$vm+u5)0?WLJB!)f(Z<%-;B91=(2n3!_ zbs&}j&ztftk~sSQ-vh1_ysj71N#h{iU6Pk~0sxp`{pC`X1o0+-INlYQz_C8s9%J4d z8x2+}xp7b?WtQlox|BmFi= zx!w-iMFYTOxxD}D~K`ui(o zPq#(m{z&0u|IxR21)ZdIG^qKP!PC!V)#)S1ziR@o197AaApALo#L?Gig`jO9S@M*x zfY@HzqtCMTV{lN3*FhW$;?&=1srGUnFwhqJ=}nNfCw&%7f=7|2197BpgEWr5#iPLW zXb@uE=WlsD%jKE=S>SZv5w3Cn44(F4Ohh2APqFfsI?}r!*ec3I{9FNUPz#8OVfX<^ z^OXMsIFpPQw2s$mnx|?L9smw&5Mr%ssl(-g^Zy-|#%-#@T>|cjkFnQPhik>b+w|es z#|*nXTug}~2HY8>=|Gk==;3twYj9gTwcueW?ytkW3mm#;G0+wRV}2Vt{1CVq3|0pc m(jeB4`M4HvY<2h+F8K%aq(Y*$hi+Jc`|%bb9@k1A?!N%)&5!W_ diff --git a/rk3308/unixbench-master_32/pgms/dhry2 b/rk3308/unixbench-master_32/pgms/dhry2 deleted file mode 100755 index 7756012c6287bcbfc4d2089021b1f30125ab6759..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11012 zcmeHN4{%(?ecpF>`e(_~NtSKd2KU*LgNV4(Nhiy)joXt>K3O13MiR-mw0%0=opcAM zyUX3loQnxgA%wI{Z4wh)aK@gJNl5FV5bTaSV+W@JLMc!aXd60E9D;*^!N_vQK8FnI z_r3S_^rSdxJMB!{8P@)3_uJpMzy166?f%}~wYe{#D2kB5CK?4%Z?PiU5MRFre;lHy zO{k&_x;3JK`jl^h5WWS9E~TJQ9HtZ_X4y6{DJlw3icGy7@i6c(>ZL3Lrm#G+bwOmN z6w!yeC@fDcIi*z;n=Yfs*mbQCte1Kl{?B_?yOV0t1t=FTMDUQl!nZJ>J5(|u_?#!~56B0EM(Xd)#vjdrw5Hki(86Tw(Q^z`+1`L$Me8+79n z$%LuXP`_=?KZ%Xi#&MTpR2z#eq;|X6<~EGWVoVmtrxp`Yfp`fDI2mKIY8hjq>KJ3v zu40TyQW;~?8W^K|BV$b9b&N6b9>y5BQlIO+r-b;RboYs~j?=C)j?)LvRG!{*x^(LM zXSyp;(Q|v=cD```+ugrBenzIRx1`&U&h?xONL`~vr&`i=mbBB7F14hEC7u5m^&z<4 zn;O_1c<>32S{3kJJY0KnT5-%=dif*&^pVWTX`8($;8Cw0nYr}bNB0Jb1642jrw{Bz ze%V_ePLv1oj$10vJB$7fiqBEBs{zy@0F6~A8_U(YSI337E#j2T0qaTTSN5D6|_zPrKs$R7D4?a=@ z%7gxkyUW2WC-zStz8&cjrQScicM_>0|MX)y#HIe}V}FQvfp^4rQCtTb6${UMDmxpF zqiuhZ7rw_4EA<;k>|Z~C)WB3%#p7p7M8$XAXtCq56%~J)7asOXO#SWUCw{T}!oXBX z%;`NSv?cKk+Aa--kxd)v;=iK0*+MW3+;uJQ3fhkcAsk9xxHQ+d+ z9C<76Jh?URc*(xSapTOT7thUHdi&g0%M{V+P{dZmwVs1#YSn{h?4CWRZR*$G#jg6Z zl{3xC@6XJJGeXG?wm{BAAY z6%3_Eu{zS2O`BXpHCRLY3u`EMPGAis&cSw|5~&-`seuz`oj#<#?|Luq#~RnyL`l>U z_zC88Q(!DGFjZ8Bwem$+t*m;v>RAfc?Ux}7)MxS)dw&F}k8T`!11n+U2+FDdnitNB zRx$OJ{2kGuC)Y%)UaESA@*v9G137Uve|K^H$0H@@OO-=WG4=U8N_jQ}7OYwk?aEhd z%tyiSi>dKr;%Tg6{b^W{>uf501ufD0XOGrI%e;>)w~uga(Qf^jT^_A@vL{mFfZC#*qYH zx74YMRnF8hy*^uG?J1>kV9IVUA1OLj{;H#~>J?jkIdrn#7140hr8kA1r|5s#PMb&R zt_ExB$ne$Ae@1+8Ps!Ir@yCvfzI=n{Xx+l@J4TL=w{$nf;z7mR6g(WP?w0npL28fR4pHwI!;^nb=d>TDWT4`R8be9-K9rg$laZLxU2ls%%%PhMPuok(#C?-in@HH z^u#Cc3bpjaq4PF@=XJ_gmg?c$m-km}{bKi>-8)v41nvtw5SY2N^25J&mYos}O54qO z$Hp51oq>+P{+@4lU3`V}cdSSek3-lN?)BW~d4ll|+}uCBYfU`7t7TU_mElo9bZ;5l zJ~TYgzggWjIM6fLx%E1AXkhDRb!d37w;#{o&6(hMWP`dUqlPEb!E7vE!PYU z7s_10X6rTm-M>liy=HJ$U;4|LyZC22CutKb-C2F%`I!2*SDxG>o<5k8(LdLkKeyRZS0zi#lz`j zGOMn|Bj*OU zPt0(oo-+RQBQi8Nnj%#6AUbB~3V&_%7bq)bS>?bDP}%Qcrplxq9FCF=G@(>0YbqC& z+bdU8-n_J0abhlQZpB`?p?qWcHOs1%nqptwLtV=h%%#0e82P=6Y@j(BM0xpdqa>_+ z9%2*hyiNJIa-o2iMmG-oZg;cs9~a#5P^dYSOpL|Gn|TKop>%dEn8{8Avr%_QU&5lu z#&+9RDodK1ZFU%fO|EOKof(`pc2!m^)G@LOqXXZB>ThW==0J5#gi@26LdL)cy=##% z7EcDVF3ifNXfPdS1{jG*GT|Ca(dV(F19?;jgQ-x)@P!~J8=agOWgO37nuGC3D7$Gq zF{!%7Ro6fU(rUzGqg*Jl#5hW_kE&}dk#ylMAG*zzN++`@mrNsbTd&_x#S(D0L_~8m zIT2~jgrd#q+q_;+N3(8+3$8ginN3Y*o72hAZJ}r^r5Oax+p47_@klTeF}Sdh$zm;r zTw^h~9bEb`|rL|0OM?_qY3G_iqL`L|!8&(}82tX7u#A##TBs1$b2Kw6=unhE={x1U? z&j|M}0~_8Y?p+2po)PY82IdY2f0McA*iFw9_ap|p?vcX&P{7=^j6IF9UH4WQd#i!< zBd)&x7})R{`HY7We6Cny4>t7Nk+H{OPiC;2&oq6K28II=VM_rU9vtr740e5IHTG@; zbBD$LThMc-HGGJS?fTA)gn6&Dn>%<%0WTp<_X9Du&+%#z*D<>g?E0Ie0HNobWH#9K zH;ff?ADa#KIbJ;4UpcEp$}WCu9#-e!RptsXvK`|5%zh;0df?vkh@}Me%^2D(g#}h2 z$d}_SM3s)?7Ja#~gmf#?zMLTeSEcX7=f^Aq2EWgj9C;yPS z-sn9g<~!R63&k;+Ln-pl=E;kC)^CKR z;uD!fhI}4&l^XUr|0VO*JKNJ&m|tVQc3Uv@_gV1#`C|QY|3Xhi4eU#-08=mLlP5zJ zdWku|*1+~&2fZ9G_IDGoJpaKXCj!8-|I}{>miwK8{f^HI&xb}R*XuuS!PK9$VD|4F z3#R_81t(#jK0yv^4aV;-;9VGRzG?WK;g5k^FuopB{{ZlNn7`E~-Vb~y^z9}-2rL74 z^C#!w&%vT_OW)b}#*t>%0?QU4pjd+|`R$81j(%HPvrECT9(K8L?V zdz@H$hW<4o&Hfdm{3f)YXMEzF(98X)1~-h#dG?nB%RqZg!28f1X0ttOk=Sp+ zUBK4wG{eBMzfSZw$bx2|y*u#=KVT`JnWx_i?8A6`7Tju-f5@U&k@sz2`FvN~mWZb; zdfNXXF!y_AS7R6ZF%sPW47^nQ6B0LJJiY-v$Ky@lAm)>ALgIIT7Xmk!_yb@WX#W%7 zqtNqYL4FbXL*MfCCSDAD$kM+i;BQ%Q8}MTm+=2P9&hPKSKEH!9(EcXyt@_Qt*7b4| zF#X$k6e0g}z=tjNM}QTyugdYs?jT*lv4cWb+= zrLEmH9`<@Xp0<|ITCX?g4R?6gh1QRG*9AT6*N=um>({omw|BIABJ0i5dvho=*^D!` zD}i&rJ1H8%|EF4}e_&*@vj4qi@!r8Z+^@f3WOioso6KY;t$$<26(YupgCc~_q1x?{ z;BDGi;(wtP3A92w0Os95uXM3NyEECzu`ygwHSGrfpw`zrG^}X?-*O}4u}n6S*0K{? zD4tA2GGK(0+IT!U8jNdUT>3Iva55)C$%#}vl8uDjEgo-s$CX(cZg4SNtkS`qS|pK8 z?-XO{;6y|VPfkqiL=g+pz|78}>iua&6C?347qxn818}=<);9Ndqq#=gWqOW*u+X}< z_jhjX^@C;6=xar2J$(aRoqgIsATYFfSR3x_>f20nd{#6pcD>!A3uBSchGP>Eu??3_ zt<8v9ji}xH%%r*3{Aeaq_~Px47V;zEU^ZxeWVetT=%tYXl2xrXC^Hw5e6SW!5FhAnY zRqJCg`_0~}9P;~zVi?)E-|k^ClhmTYL^vMNe=lGyqV*1-F8C3&$xI|H1|#Ff=(QNp zS|jqxpAJ}y>SMmnIxCz<(G5RWCd-wnZNoYp(t9d4V_5W=>P=+ja2rEq8Fus+KSJnA zCgTQ0AK(_DM_zorfjQkqei&NMlZ4Phsn~M(|h83)3;l?H~f#+K2HiOx0IyO3)#g^d?M@A>dVI!CrpM|l+ShAoR z9Zg4eNThp?%tHE}AQc!VndGlcOp0k%zDobYn7(4ZlX-Ha|AoJOq90iX>KN)FB@ps> z52w#jMOp@V{Aoc3U*+V{U$YmyZltB2F=ZWuUKH}^$9WCBlSs2Hk~)X?Mfw9-FL`B+KFQ5oP_?sp2eBk-O zV|%1M#ys1_A>?&I=o8-t-ZrGErypJNCXr@a`5P#nYYu^Dc#V*OAnm4*mOR7Xjd9c& z#vk%1cR(bM_k15R``8IHux$)qf=C|E^Y^2%Qb)b4_bVok{pGoMA9#mJFoWdn0fxQQ zA0ak=V`tcteUZF}4W2%JycceP3r*^1Ql`I!G<^sd4*h;t3fI{D_L2JpgkCf9cu#Z! zNE^t~)5OOi>@Rt|Z`NLc2bK5%gmXcj@moCOcX)CLX^ZW60V3_`_f046Ey$CBJjy>n zB#-x+4)9tf39;FFB*aozIgd9CUf>G6tLE_@2Je>Ope+W*d^Tiw13Vwr obUiX|g|K}r$Gw1aE5lWAeLg1_6*8qgbZ-vtZ5Y}vl@{K=1CmQJssI20 diff --git a/rk3308/unixbench-master_32/pgms/dhry2reg b/rk3308/unixbench-master_32/pgms/dhry2reg deleted file mode 100755 index c088762abb500a475e8413f5888d20bc928cbbae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 11004 zcmeHN4{%(?ecpF>l1_gton+a_Hn`774kF@CC!H+IGHy>g`DB4C2}va5lJ@CzchVi4 z?k;yHb1o)0#UZ3+YLl4Y0vUTsCMBtdn1DO(j2)Z?2xWkpK--WB#UVHt7>q1;taHeq ze&2hyr)R}U+i7Ro&an1RyWjr4{q5hkZ}<1^uFd^^MNxzd7SSk(`id3NhPZ7l{@6rO zn@~k5bZbQe^+|7;5Z*FH7dt2vTV9G0>t(@ggQzG#Daz}uh)01(Q7>f$FoorjtqY=j zN)i32i^B5Ml4Gx;Sai7+8N03(g7s2w!QVRQ>!7Ec0j5wd+n7X|B~RwJc?|8b1XXE91A&P(fD-M8AwezBtbm0;eeVL)`9Tv_kv8{%{0@_z_=-hXVElp{v33#8Ml&KEos0)!A~2pv0c28{ zL`+NtVzERJC=^bmL?DJy78A)-G@h9tB{-E7nnpWXIul4`w5dQeE_(a>x_w%!s|~uz zsYE=l(@?+Vl7A8#tBvC>$EY?MT|(`)e4E=aF3T`k9G_ZDL$*N_HiK=6a zNxPCUCP`(CNo!z?@{No!f!8s{#Jd?|;Ot)KyH5)7zJ1s6GqzLC)3#FwPFJ4VbILyR z{nI@asOY&pZ#iDL@2#F+A3H76*PGI9NN0Ob_@%DVq*G1lI#b$VO506oVM^ydLVXC% zcV`B7`5!p!R;&Ep3x{e?%qq6|xtBlm%^ppkn6+4o{BHHCvH7{@KD@_Y?5}#!H+ygg z@=M?RV5;1ov)xj8&QbIaIdRH=aZgF(r(Vugj8#4By>Mzd>=f@iQzdMNN}fG>&Qi5D zXW2M7Q&u_n-qQZ6m%o$~C#s(5n%!LlTSZ?7@7dhvep&KN16l7a_0=aUPj<~_t!zPO z#CzezYS>fiBJ*<(eWRg{oA1}PWiJDv1Wd5{6kT1^LuBPeV)up)q^(w@rSBk zdBAsJcR85l#J<_XwF``xC@vo-ywQaUE<_EIH?{>}oiM zw*7fdcppcs)NdTKe(e#Y24}h}9zRneD!%PPi*1jss`!hXaI;rp=I<{a{?)GYgEJ+S zx0G7GV_joWU&`4}Z5$IbcVDzTY`t3f%OSs*`QpXbx#i6KDA$zrHaTAx586&CZB5y? zzxQbO+E;yRRB-OUXnlBp_xV?>-xjKv*>cf)K`pdQX=BUY{vO-4KJt7{B-t*ui7hhP z)0BO*`}H4-1D=TD@r-@-o$sC(yEy0EgU+3Gboyp?rGJh%i49UnOExTlaAx!>7CHokJz?YBTMx%`+JX{v3vjJom#vr z80<%|I#QTTi(ErBSVQ{@YbbkGU=793!nVH>sT8Gr5YrKZ4XpHje!aD`Ddp%BlaF6OM{j zG4tiz9g&eI)<&vcs(Oa<0Ltu!96yu0ySV_FCi^RBiQYeJq$X18d3dFDjAM&->(A`UNX-*95zE-h2zYu< zMQfS50yT6Wm3HtX=oVIG@g(4xu_%w8>#{!itGmxTBDa)UeckHPF?%xE3%-RT3BGQr zOBHJz$rXBimd4tXO5@;+)mlDQbh7+aTVvHLmilt&WWB2*p{BVvgr2A9f7wopTj{9= zYv!rZtDgUqcz<`vS4HthwhP`|gZoI`lAb%pj!m}oG(}?p#nTix6sYcz_J(o|qUh=$ z@404pL2v){{9SxMeiA+zflo%@lM(p;IszAQ-`xQDIfQ|KDZGmvv?>CJ zesLepCA_Ed-noh~WF^D_d?jMu$tZhqr{tY%pGDE{W3K^o15ZJC7vf!F8H7Q4NNR`H;tF2Y_qwuoMzuTyR8o{q*ssYD{9 zwz^w7T<%t9D(G>#-7Rgc&dK2VmiG4GxHA+@Wp-Svw!7L~ZqfAzs4fr=j7NpC4#^@P zu7lyMv&*wi6jzpX*_BdDSxJSp#Yz?Bk{P$53Rz3@{ z2zK70d{ntaz+0mmhkci;+4zr(?qo3798AO~qLa3f0%rV$6Z+oC+qVHwBG>4|vugV` zhZ)%LGI0+xux4a}Vu z`*1tDD3P*?-&=&$MR-kN52wN=&du*fEY}0~o`+2(sBgy5ZYeCV3PHXc zZy~C595d<5jU}X8k@n?$3CtJE%kdLpxo9<}uE2i`IBT-^HzsVsdSLl|CO!EF3-7`* zLCklz5tfLfGKmbfhx*4B@%Nheu7y{m&S(E?OQewsK`hrNcT~2=ZEBA`KKb>*{t@pl z*sm1p7unwo%(Do0fA){|`A{3duwPMlZ&&N~j8#p>YO!sRy%4Zm&k9ZuwD*X~-shqJ zMjp%YJZ#dFf6Ro_;2&Saf6b(4|E@Rnm*r2H^!K2=JZH)J{>`L+1bX^t*kc9H5AE@c zWrP}W2uX=~GNS#Qsr;+Z^E}5O?N^xQi}vL6&(qeM;7iOC7xk>)2v>-YWfB>3{qM9J z_Br1ri`F;W(_ffhW4?BoF!lGE@Z$Mm{c^uTPel#vORNA>FXxjdLKS+6Iltz>_Ff0Q z9542F6Rj@?=Ik77;nB=_5PC*A?Q+>dH-v#4BTepob1Is}B9|IqOo+khcxS2i;d>rH14ISI_KXG`TCE)R1TLhSc7tzN>+c&G)ii-`xZ%lYIulN5nJFz8OT@!zFhU7! zGL{$*#Iz7Dd1);$ofW~vR5BLMghQ?tx2L`1@+=KEwkR%AslW~`9M7b7h>28SDy)U3 zr>1tGhzV(6W-g)X{b@xL!?6h`wR&s)aJO&PHV^cmxklS%`Vs>nq4jJV=-S%n1Iwh* z*NV`3`v<$b`n5s7e`NEhHrmzQznSLvtY}#5db>q8#v-nbMyJAJ2$xK)&4^l!s6GFA z+v{EUnr(a&H|KPvQd+Cli_wGm@pQWIrP~)N@);`YF8VGV6C z%7^12^j!bWZsrb8$F;tAD4Z2rqrp@n9fk`cl+J&aHxn_NsLg0b{^R|nYJChAzSWzR zBR=0q6eD}-H+xu2C$vZ)9*Twa-wBwDXnljI3w{G_Ivoy);qas}dM!q@&WJqnCj;i9 z`k1db&kE;J^uVu`&TwUFLs+LHdQZh>42wQfeesMOZeyrS!;b#q#|PbsM9iS*1Kc9? z$b+voFsIuX2xl|eP$p%tIE5|J=IB;me(_-f^SP;Tl0S&he-`l@9q16kmEJLx35+Ag zPbTy@B2#clgi}f3(l^$w?mlNGFezM-Ksq8^p&fA)G~!H3xZv1uZ4al?_?&bJ(4d3E zgnT2BjKR;LkCh94Ii&d%LY2_hnX7Afs|!Ep2xWyU95Eh2gKj*123@9a4Z=Ds2d1Jy z)S7@b!wS~2aA6ae!gDQjn}JLw6&;_>V9RiY!sFAEun~w)F2HDfB2iF{kEg=hCDJ`b z`9k`hAQc!VndGlb@|3(;`6~SnWBQ2sPUgvx{uch`iT-05sAH&ylt9Siy_-Hp6=@md z@uvkDe3g?&|IA+SdXScS#+3CCdQHfq-{v*&P9V*)Na`Hk7wJ>P(MRXepR*4IQou5h z%YaXdx}TZ$=;tX$A)aMqz09YKLg<%fS_O~y5*2B4y?`PJq1T5zFL++?*dA$*G0%1} z2zlKQ`of368$z0T`p+eA8fmtbzkTAl<{)^6mk1dM(ryxI$us=i7)PC9{2`BW2SoCC zulFLepPk4DwvFM75Xs|t{ysET>Zq6XemT!$e|aw62i_qP@P-};WaMe;gBy>6OUueurAMY z8&Ig=?LnRlOj5jgp3MI)Tn-HqlF5PZWJ-G&hD&%qg`sUyY2y7mzQ8fX diff --git a/rk3308/unixbench-master_32/pgms/double b/rk3308/unixbench-master_32/pgms/double deleted file mode 100755 index 6551c5fe9daeb81c9bcc748b8ac8d76c773c50fa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10300 zcmeHNYit}>6~42(N$l8l?Zi!-CWL96h?Ha>_ByWZrr=lBN#i6maYQSSCga`NN3-5t zW@dvU0)&?e5>kQ+B2Z9^KnR3{3TjJ`658^EN|g{P2vHEw@URkIcA6G!sgLFR=FWA- zO8_B|Kqx0UzUSQY-E*IF=iW2>{$1lKP1A&;kk~4y6(XAGLF#V8pNLr2BXrRS+ZM5z z_R99?J*Kx(nKE|l3AWs zVj6BHhdefs;euH&?IHZ_fV~5D@@42`+Ep7Xz*kT=Sw}uu+3uNfGf@AASiTi}5cXjG z3&8NQ><+CxT97_V?Vdw+Zi?RKOnYCCiWsXe~cm=o-yb`R4 zW!N_o>mqSX*e;Gg%huBy0vW3ych~7`iz3>$DFrF@&EQIF$g8hStQGxrcG&}&sUuuiTOw|~)7YaAgy4P=G0{y-P z6T$wsW1yRnu0erTrkIF!rWo`NrkIcoOfeZcQXVDxt>A^y;C z^8Dq<#pq1r;=?m77f)SmxbmHuN;4|>-l<==KKt=sS6)3gqvU)*J_31q?5UKpO$2P? z0eLha?+nO20eL%Rj?mV=#~Uv+<_E8K9C_-hCTjEFozwK%{KdJUtJ9IEu7)DJ^D8%r z+iLTV&WX-tslluL_W-B0=$%Ju^WU7?nySs8o$JhN$<8CMToO;dINLffa^@SZIA}-$La3;oXtFwfRTpYV%Lb{dt)t8nN$u8)* z;hAva)Wwkg;3YhDzu8!eYtPhbzrVLh6A83AvU}Oy^|vnH7W!#qZ}=M>W2ooJBF4B3 zCN0~&d{3ye@z#&Ee&UsvUTphD*hdjVCbd2u8Sk84|LxlR^EIurv(kEAJn6#L@OWf* zIKSE}b7t)P<%YqZTx!Q&qNw3_*2WaQ0M%tc*ly_(*s&EyKQ0iQqz&yN7lbvtPQ?p8evs zE6>@pUqH?yO|Q*fe9zk1(<|9ORe$t%3nNYYoAq@7*!g-wJMpz5yxkWaQJhQ}SMRGNgHTvkTC9 zsLFtOKjMAk7BI=Xv*EnSljUx3JGc$Zd(w8)OL+rQ-kn%(HJJ6jUaOT~h5s5-w(|U*9*ycd;$1u9{at!j|3ES^(A6)- zMn(qot;5wqF=Lm@uHK#K>Wd}1qjowOO(eQ{x}&-D9bLV>>B(rOV7td}*L!0(J3A}uh0n#YiwmA1vdA%+XIjjF8nxw~Nsf0&s%MUNtp z6$5mkwP{;gRyT!P*0&s9+orW*Dnl_X+%nK~XVaV3wP~xC4|aTdc%6o+4EG3`zh`v_ zu*hc7)btjVgqOF2LxP@pQSAE#%_79gDQ>W=+SP zGC3LE3Kq*kYf=9|1Vj;zI@L0m!~>FSabphu_DmFit- zIqGI|2Qsq7vgt-KC3odbJHrehN>;fP%~lxiIM#HOaK-V2;hA zm3HsSm8yC)r$-Ms;BJ{_C%H-rr5sAKgL*VuDo1hCPalm|?6Qk;WgD4q+cP3fg%Uz7 zmWb!eQ&!wb=i~O#WHQkg_xy0R#miNQk(W-n6Mdy6cpu=6c)jl$mBC8%=~ zyGCIVjOx+<#%o4MVe!T~U9Zz`^H+e(j))g)XCSKez>VjjKndF87~1^e0&5n;tML}% zW>5ckz^(;?k;=cCFM;`Dc{P4QtP$OE>bzQs{|fXo0e@c$=pn2JmVYc@C!X(VNh<|) zwO$0?-e;8w0OkKg9sX5+eGwnK_FDAt?0?^)y;)FK>yxJhw&&4+o%*1^KG;9%5q~|h z|GjnoN1^ll!L6VDqklfslBECU#dmd^=TEL`A~uS9>-?pmtM#nm)Ifjd0{$wnpZ9e& zo)-dk;(s2{ZQy@VhkrR>Xa9Bv`pfbnFdiR4d3Cl@^|c4=55dm38;fape&~^#Gfl>hAkJN>EWpC_ze0SM$YF9^=YN=llhU?b!so8V~j-30f0ZH9vt5Xp)=Ob z<1fpffj*7?AVGW9iqAt(F><#5tI+%5Z>x`glt7=@pAvMn|E}U~=Ra^5%kc|ORQ~Ob zb@xWQdU~U|Ofr#3^mL_nB$H+`)0ey>-Jea~VJ7*H%VXW`<$C90G~02j*(@$MhH>}EL1TQ+p$Wqf_(Evq3XW^phC5}X zi{+B#0FfyhxngdF}0=0_SC^NlArVbA6+hy$9KZ;h$CM$VKxr{JI@7q7LZ_fx& z0gJangfTXLV0dWUIFL#m+BIQJ3=NO(qB}kz29~(jT>pFN;9&4e=%nK;eh4+JjOm*G zH`XBR-R4otn6luIaI2I-TfNV%L6H0m8_dD@!k=`n8guP~Y!G>9WaLl*U0nKM7Ui9? zkvB`3qUC-54Hhx>9DrwB1C6RgFk^|^f!ht4cE%qSt_qoLVFy2eLew`Z+NBK^)Ka?bpGn=M7ek$(VIIG$ zI^1V~d&hNfTkCKy1E&XY9AnZBAD57-Xve|$6!H{NBoF#HmH$Ot?=~yN$57o{hkFJ% z49z0Z7m2C&Y_R`G4dC6fZ61ZUcDMAUAz4gmkblL$07-39 z^uFg_UFTY*yZiaP_xbGp@g^D1IdjgOIWu$S%-osL!&|ot9LF&wXjnNTEF+g=^+@+N z;*XAH)H9xCgI2~$i5@(1Gh;_?=A@_x3q@TjW+J7s_28t)qX5N>6kUsSH}Ku4m*NWG z6sbJ2rHCX2aqLOdMUl!Am3Z{GP}E5A17x(9GDh_hU4y?xpcjEoaSS*`qANNEQ6`AG zDe}mtsL;N6IpPBP2Z74xBklrSU4H_36sbJKvTC2VquS>(`Mmz_9+M-~C3?)^fVmDN ziHBqmU+a!Nl(kSV!DWb5i0cSRh1Ck6N(%tzAyS`|r}B+Rmm=PXScynuQiOObB85UZ zVM|geC94o~5LY4=BPxOPs7N7ekS}; zgs23f|G9!7CH%28obr@FFxj9I2>(n$kP?0vF`?3*##jkNpHvW}gx{HlJJRs^G@Ry< z5~%zOh}Ts0-L4Rk65X4IE4D`2ME|=qI@z8Qi2k91ASL`v8eWrze=Q9sAEE>*&)`R# zl>688*VBuiM>yLO?IOOoVSU3{$JG+AHK?m4^-k_TW-Jwt* z#O!u&IN;p4(H;&v9e%KTJN*uy%pW|+Jb}=D7V&nuS;+11`2tSn=m>-&pu4<&*6EG} zy)L%j>yt`E0zT$JUYM9&Ea>PCyNNT<9bukecf=|E4KrWZ?cUG2WO?9s`=M^c1%;Ty z2dBtf-9hFFxdTDBpE)55#UBiLBW~vO1t8LsYMR|n-Cz$#9HEH4%K?R2w{NpG+iT4m z7NB~xu(K=RPjT6)^ECf~f1k&t{Wr_e>O#|md^m;WUN2mmmK7nB@(Lwf!;Hj)@FK5a zH^EucJS~L7CVMZ0vn4w$EQbF>x{S(FiiN9?QZ4*8rEsLHDTR~gDTOmEr4&xKoKiUW zDoWuDEtJBM*Hda>tdUYU*DaJn!zN0xFtkt#Mem>#3&D0uVQf1og`T@9#X_-{QaIIp zl*0M9Q;G@Uq7;jYNGTkxk5VimK}xYuMJUBY?x7S5$3v9DQTI~HGxl{#u}~eM6cgxi zO5v>gD8-`lB&ArePEZPmev(owaHlB6qWCX=@w1l|c(n>HRKd9_Sf_%o#jcnALgkyDp;?AnF>yPdVLILRqz;KuG};}YrR1jKJC4fV`Rmm zn2hUYveta{k~LPfb7-Dp?m^8f%fL$@uA+S}{k356)x`CgZ=G&K#%}2Ub37%e!cspVbb{YqUnlyhAEggt~Xl zFg_W#&E&iQH*h@b1$JC>tXWtGo_)JeuZHS9FrzNpzEHM)#v1FDbWX;nra8!2v%r}n zT4O~EoNH!?nm!CAtX_-;av_C7LQE9g+G+@5l8GQMoaB>q`+ZaRNc z^K5VU1Ej%L#@c3?0WnW=_5nfnGWYWPiTt6v6S{Y_cj)d)#)qbA^+@Ut>uYP4Szg}9r1^2lGWV3$>hY@0*<{1zo)w+6`v{v=IAz3N7>d>S#n(id^l=_F$|272Et74RSOs^Dc>U z>PzY3}0HsPkxd;h-Jdm+L2 z54{yF?5IWD1{=72nj{TLlDM_p`loHWIbN4{f%mfV_@(UPWV~>ivwSc@H7V5D_iXdJ z0@R``UZe}Zw( z&N#lne>`KL?GyIZ%)ynAdFTp0EoPS-)(zY*W>;#CvyX2){B)wIDf9bhvCThjgpQgeg7%oEzpx}j&|cMiW)Lb0Uvr zelHpSUH0p(`YkBUL9T1!)>l!$7WyYeZjU;h>vIn3d=O@+Wxj#jXvY5wv1KT zhxsb~8(hQQ;Vo4UjD&kK=eV2#X!|8WHw@jsK=AKIzL{WstavB%-+iTdwN7BOgT3gB zU^2ex%3!-z$R3>#2gQOG?WgRkljzNyD_UW&eOv@jXhjQ;a=)C?u9> z<-K@iQhZU&*&Ho8YvTqg9m{@hjeWh6tb!Z;Yuk;#dQRZ#`!T*bmC)RHpFm~4iGH6Q z$(hq?D;@Pctm@ej>O<{Q`nlda(2oVWr|uW^bIHa##pXnL6IXj@dr4G3$LU3}ZQ_XG zK1et_RoBA_XqQ%OFBuN6##rUB(MN>ULe3n%pX-c0OVQ@o$h`tPnvB0S#h!X$PH)&H zSQ7?TBZ%Uy!cIW&qE{XecO?wGCXv z*T!a_P<$Hvu4HwqM3a$&9X~#${zwf3~j}eZeK;XC#inDe~mX zoWqRU0N$(`^k-~}th{2%8apCMPR931ImW3?BCELNk}?OavA%Lh)U&b@Z+0=OED~}r zCF8*<&SDiZhmBs6@`zMYhmzd-zmVqK*ut#R8CSNWC5KaU-+=lJrYo91JAOJ(cn|Ye z+h>hEFX?QJ4W=}CZmLey9oG(Ul}#Ui)Y~mq^q4A|X0PN#}GvP{gyiOFRAH@^qglv`qr1sg!i z+zk7FdlD;)#Ho?2_Lq}XA{pO1St;hC=B&dqzH_olTwb;NV(yk?{F6!BYC|8J^*qYT z?Uyt{4fQF?oSCc;H;YSG<+qsWW0ZpZ0qsKwoit>D)$zZ`pt!}iMoE0ZjJpWWnFTpVFSgOuRpuTl&3dv7i;c5>+IOS*b^oahFurm%yz(es45$AP>8PBa$8~ z#oUr(S3jOl#-E&|+!IP4qQ+yBRqc$2g-U)q>v$&SAv`3;U6VulTVtiokgv7IPHse+ z{Z69r)M-I?{8FOmRQ@P5%+G<8j!DH!a}6}x?-%*{XKaISuv;s|6I*-dTMyIvQYYA2 zwxxPCZ!&i<8Lyw@EcXn{?flk6GG05$JIJqZd8BzYo2`AMaBVL7cJE}yK-SgF4VM!2 zqpykyF)TRR9gb7&M(;V(EX@b5CG>cZ6=O-q< z;BD;5DSw{o%TLDjlgks_jXCAxrbo^6Eup06V}*^ z7Ltj5*6(Q-wQHg&_U;#v!{q^Mm}vXtJMGE1FhPBDea@d~1xFV8r%0Y}L)(C78_@ce ziRRUw;YyKJwq7!N`)q9XrCzSmI&9oe@b`K(m79l+T?GI4UTx+2VPk;cr+Rgj)x*C? z^znrC^=4F-13N8YkM(9&76Tgw_NT*Hm8$^H0zNRTue=FxesQ18Kb&28BVY?)&u~s< zHsGCrzcOs7)B+CHoUoM+(@yl7WLfg)G)x!-C{PS5~PRo1VjVTMrg~Vn_;_?xampdX$ zyR}7;*ErEOo2;vIh|wQ^oMlujXLfudYK^_D(rs+qFmNfFV|*)`Z9Es%8-EhbGQJtj zH2z~W!#EVx8DEcTjc218;}4^p@l2E%e-NcM!2{|rBD!RJ$2e)dZyamW_>=Hkn1hF~ zBdY~fgG0RHwbN@46|}?8ZPFjh6AVJR$c=IhT-~F#+zS}zrNpP1Y+iZ{-hO_Vc5Gbn zOf=&=Y+ci={jh#O+0o_kt3=!UZfPG@0!vvl-V_^=+HnKwUpAhBzvbhsM0fn1MCs{D zF{8iV);nKycu>fCmi9v0r!}M5b6hsR0lO~#=+-?KHi|zK-%7CHQQQ25Wc4|AJO+PL9*VJ@l>9~Jv;zkSc@9TK0P;JInR>MUGUW2qie z;fQcl_$>bCGe5}B9%iLn{howw^PPfKXcSJg{*^5@^4eJ9(Vh&B`4Q>+iq1ml9E4*U zRu;}-IG79ZWiI}KKnEo_#G{1H@!T#x;PK$Ni0#IyACT?{PltwHemm?f;hkSufv&BNXlz9-t zoA?fgiw`*>?kYay4!a|ybeXx;f&!Lw7pGEw8F7bFIUuM7l%%ur$t) zeb2Vmo!fS9<)ve3rB9GA6;bD=wLPVR3%!cEaTXluCYgZgNSt>DTy9w}L9$r%w_?sI z+a(+5@?tb$;qqYSEFKFIU8T$s#`c6Ao$gK49#=Q)%Ny|Lust@rZQG8PZM)F^vT(J0 z4o|SJn>DxX*|GZ}Ko8YCf#U(jwyHYV5#iA>vNRs7GzhZYVa2q-~Fk9V^1TcKz}LH{p66IlkP9y#@QF$Ez-R+-J8<=Dvt75K5)9D)umz|?v!z4 z&rZTGXCl3*;iNm(%@ByAdFh^j?kg$as2@g5cNL5^flha{bU#aXPrX^31h*rl`yd_> zNAyy?pCyw43-XglNzYlN=hYW_$or23x*u1<+i2&1<*4W>r>B}IZ7MHW$I~BaHkzHA z>dbY#$;7X(u32Bbv4*eNxT(&vsb(W8y4J{2p&-HQnjqA}`{>Qr+c_x235$+%zbP0BL{JWUB$H?7wq}{+_2a3Bn^lW}E_ZduT*Gw~zKsGA|^ABQQj#2|F)`&m@^(iiCEVl=7*kB~YFW zQ}ob_6O>}ze!AJk>W52V_Q!{*dhsaP?T~H9?#WI=hdb7Yh}+k&Gh`1t4!BtYqjwE4 zi-lQJOKI`5glOO=Id%_5QVKM5l=W{0dh0_0HXx<+_D06#2||8T#wD*P`Ar#@CkXjd z87J?`nBwoWsh2MoU)9Vr0ugkc!9g=*XjFXqfc?a%0C}>j_ zO8#HQ=`{;uzgWQK2~F>zC}^b@IJnbXz;WSa9TaC2i#6yM*wN1RqT3}R$9f>IIV8igH~GI)VTVk2Fb%k zuPSJzmpRunw2Z%w0MfGJlm*DyIyRR)0Z_3Aay$d75=5^?*XkAwESC|#(%%Z^t!259=ZkoLE!Z&c`vAN4eWu+?^Mx=pY{oI$YF#l_Cl|p z&L|`>kUm5|m&QMwvM1s<%R?=RQ`$eQs*n0Zu}|8ikUnQp<4^4&T!*_dIV_j(^9%Cx zSc5_$KIqc|oc2Ft$<#iQPm@{>vi#hIy}}Yno@~{`v4-tUljj7k*fWP+3(4E7(mx3L zeJNb&PoIiT{5>i>1pb3*{Cz4qwfALJ`>Fg%75y71uk7uV`cA9p1EAC0BDI*q?u+Cp z^OIilya2j_({7C9zpN_%bI@rINkNf+PDLkq%KWDt-oJuh!D*LAbgEwt%h~T05E;t& zKfW-2@^}oXaMI_T3Qr#olCSt9+JWjZKNOq;PIP6wXqQQEP!ycTPaQ}fBj`$hP3`z~-=zle?MdnJwkr@-|pecnl6c2>b{6Sn9u%Y4pDZo>2AI5O8{%u_eVnoW}n_8a|(fn_(Yn`@12ny_caVy$Pp4 z`tYFdgT2c7FK6Yz+g11v()zm^bhZ4ir`5j!^lPg2Yy*DX{+F{o!0G*o1u9VeUk0uO z>hA}DyHxcD)9Afv_|Y`{xitJ0;P1kI>HD7Q`w?*ET?&o&Pl11c{@9eF|1U7mvESCBj=X%@{)wvq$HaItW>NYqm8#i`1og3HJH#9UhSlk;^Upv*# zaCbGnNlZBTb(;gM)b(FVGwBs6sTKLZQ;WXS=!5r-ILAb-TSy5boK!+rHas+rE{=(G0P}#3gkZKIMJVt82W8 zZfa7VCD+SuqSYC8dq+6D@HiP4B`!5pe%!q%hu&AKGu-$tr)v&oce@-BN9qB$nuDHi zt8wZ1wK~HYg#M(4+wVfxNN=#!bUH{U6>+kzJOfvg@mgDr(((EtJmd)CJ-7VcTutzJ zLoguRQluV#FQQ4u{^}e*-CfX&bG#N;=fkqlY`F(a%v5f^r%OF*6S0%m;8Ah-;!nh#fQ>@DaZZ z1bsM{lLpv~LuA0@_CN|qv)XLkb+;K-=IUW)wCRVhWO}QlD-%A0#@p!e2Tjf>F$PLI}hpC{*I}BXmqwmkRae+p9D?#CV3>m~n?WOOcUx4pr>iASp z_?`d`zThKB<=>ym@|3nHeEl+?B#+KH`fy~a&`6{LKM$DByr?QVuaJJ%30MilNAWZw zHIMk{+#~;%>oSlf@etoxL}~}|(fLW?=kS5V{sEE3g81a$smi}mB|D%%vZ$ZlLR92Q z=O?_Z3$mr4==aaS6h8V6KFu?>QD%|;Dt-PgFoLOG`raPran5E0k_rmn1>hu)Y?A`< zHA;~DhiDW(M1(9d8z~eQGuD)XDIp#TA0sL-;STWe1f+sO!&F?vcNl!T6egq!UtSvD zzkpBp9KO|Qd@In_`#*=TJdG~`K3>H~{Y+tficjg6C&Bjw@{~Xz#ikUWl3!A~_&2Fa zk-O6PTEUmE6iUe=l=w9Sq{3eC1>wL-fOjI2epHTp0gbH^&Q&wk0H2@)rCz#YR`Y$< Mfb&>|2$}-^A0JEQg#Z8m diff --git a/rk3308/unixbench-master_32/pgms/float b/rk3308/unixbench-master_32/pgms/float deleted file mode 100755 index 3a6bf12c7ef3aa47b039ae5705af35a57bac06a2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10300 zcmeHNYit}>6~42(PMp|v*NK})6T-Aks7tdCdmYy{E%+5XXVq={an&XxER5>35A7fWEWqjo8p0K~=62dWnnJNGiHUN2>iO$&H2cnNqZSP{#x zZzR@5;+U{q9DkN=q__AomO<`m(AgG6v|pnXq}11g%i(}qU%OZ-1{&X`jxlGZ_3v;Sef#Ou0QXWals#JuT$54uNxsoe7X<-;F zY1ocgag2hQD~gHjTSrEXo@g&@nL?@P*$mVe@cmPoTuB^LH3l8I+$FSb^V%3kzc0f? zu>Tzx=vJgFP$0|{6VbsGgT9I>CS)yBOoq-B<<>LB#B?#mWNu`N32h#VytrA2mzocs zI1@SCofn0--J0rB{k8 zYIEP46`f7vL+8401Ws$yJ8!GaJuurfUYq;=Y-d(Wbl&#*Y4O-AGvUF}vxn{9BP~w| zvFog80k;Jn-8UY3Kzrc1`S9aA=0iUZZV7Fx&3$#YHg|mX#U@QOVbA#v(vJAur#kez zPX*&gPX_c)pT-mShjq1>_S0JJkGHmHB98e9ZE4!J=JG|?1fC6T3*NtK0`)wW$M}}O zq^2#4wgx(vT>i=Mov*+8O8fmm4@C@_6n-$Yy>oiaBel6_YFcM!IebDq=D^ncU}#G) zyWA~v*Tji4%|pLA-GP0C)chdEB!Dpq44*3pkp`Dx+~(c!Se32K?VJ@Dxv^RJ9&dy6 zwpo4r#F_9Aq{p87Q`^2k=fgANmc=v22DL=`n)#X2cU(2|%{y+N*?h~9nGO36%zS*` z*{Azvjv(i%JKltK<;=0A>>KNjyf{DFva@x^2gcq%c3{oo@lTB3iIIP1=ArPCr^I@# zckg`Y9XE^*kN1z?HSvv+KRaRe@oz{L@`I0FA~BXAkc zoqKWqd=pIKUvewh32Fit1MxC2yr=TsxrQnDa&Q>>TBN*_k&od{$$Q!1fac!Ko`TK; zRtn7f5$_w9fl2P24QEcCF1LU?!0lk(lQyDW%4?DG?!)sR_=+?Uj1{3kY?g23| zIy$I#jZ|~_RHamM^qzQke>C0`sU#DTc)Yu}Cz45C+uhffoQkA!73bj9dSA3R8W+Rw zLRF?^PUVEQ31ZVIZid!$WH_-&ENWXk+^j7LEM43hTozicb!eAqD}$@F)!OBOwc3^1 z+qJPECq{OWQ-h!bCb-R&jVbzxDP-vz;J=KPL zmWX9b1uJGJv$4wlL?YfFbNz6&#Y$DDTyLC=JKMO1k&w1dWTVP?~e_@I=i78$>=QaVBu;_pvWY~d^!27DW~N1kVeM{={0pWfob?$CLsQo7BMc%j{k%DgYO72@y=Z+=!GpRGOCHGaSyVp;_ zy+`UYiWKrX6!cao_g`t}ZY;!WbzMHtj7O4!0>M8}S6!D89`4(upu4NeeOv0>U9tDp z?c8N${E%tT-IXDDaf6_@d-vD%6@0t^fIsW4C*q^$UyClD{g2n}t%AB*pFAb7J@@$R)Q7zF!TwR7uiM*1 zUxWWK=sbULC9{9@&xcx)^xs;4SGT+V*c*GS;rXFIo~tA+ z6Zb+?I!{CN-{C9&OW1jaBPst~K0E!X=btC6-vO_5p0a3X{gPISHC!!h8htf6Zw5$2zDUWewrE~uL#P(bXyBZJnCjniZ&-irq@3>EA`A2=a1^jQV+gX8k4T>iJ)W z=lLP%et$n~DE}+y{_h;8psV(V(cW_n_)F1#KHpcPzINCd|5v0#*Z^HU5A6Z$Z$3NQ zI|7~iA+y`@)Zf@(PeEra{5sg#pD610xBn&R{62hp`O5@I>U_ zo@h^Bq`S8-l1U}v@px}{a#JEqKlia0k$OJaTMf2w8f=PNeL`S+T|b1N?ZfB%GPAGF<*aMiB3-zRYd&9P!rB+Y!@ zxZN`M8|mVIp%q26!n-^+++HqpE=IF$rt=k|3TJ|+%r15CxO~)wWXVyYB+T`R;ST;9a&Hb2NpbSS4PEP<<-D)Upb`@z}oTtw}I1;?C1im%XanpF~RcUGk=qVAqDJhC<7m>Chxns!!1 zQwNJEC{w2*qPQbQ4_Fl&U(*&S25h){5-){v9#=-UA5p|NAae!6RC4DmI=p*F6ko_w z(;{kRU8T^#Q)4A$rhgjBt%_*(?tTZb4?{>nRzxfbpTONX9dW!fzW{+ZMpZBK$&+Bl^(gZ${RRXb zvcF!aA_?XVnmEP=*P;<@kMhTqH|{)`xDha8i5r320-1Kk9~G_&nQi6oNcawSfMbkE zk@8zchQi4>p%5QcAf&{R-v?GW-jPDcJi`o+h#`FltZ+Q5y^M~^NP%m1eNj5%;n-iE znO*~KBMlxYe@CIiU-`pG(xLs34E<;v6_1+%}z@oe950US)?2{7k^IQbhU`THfV0}_34oSpb3^l zPk_VF)QP@GO!;gmdIq?|n8;3KWWa16%W*H@+$y>WVWlTjx!5MSIFjWDgjW(-HUh%4;Iz(uO=v%Z8qlv`KSvwmmKBX|imaY`onxhj!Clr;pPl4GSgVK(HknCqZsG8}09R z?;ZJCC4HRTJ=;GR7|;CX`{tXO@0*!BFMWFT29KgBLOP466GRVAS40cKz*79NiGmiP zifN!N6t$$sw#*V@%Pd9n#bBYg8NLu9^R|GKUPT7_0)uWv*a6&ua_O%Jre}U6YaW3S zQ^cJpi=O#OB~S4U^cKy37YVN|5Q62BZo%KVpw9)J{yZ=}>9US7Zf&lIs__^@ZuXLq|u&n9ubBx5<2$^>#{Iy2fhXpJ<-%UdT^;QB;`%u?2 z;EUj;lkF$I4jv}?Jy&l^VO>6-hu7s{S$`&ze^Va)OL_Q{d3b&vZp*{ewRDu<0Y9i( zH2sxBdp(kfkn~6%{`)-qtO=`#A24C&r~afP{+t8|iG6vvDG!J8@FRJ6VIICa4-51? zKYQC!5!@3_idZlnkH$qJ+!OKjiv9@t!c?}~63L&qY%Y3L^en_q{4LEBfrNHY-5Muswm==c>3(edXp#Ne985Q9Kv zh>l;&5QCwPAqL}33^8b&3^5Q}7-GOIV~CPhF~mT1F+{oT3{knY3^7nQFhqyg!Vm+g zgCPd}c7_;ScQVAF?qrBT5@3jlpx9-9r&STd#RD}1wVi(KZ0YF-P8W~;^Vyb4 zH0b3A&XoV;{xdDVd;YA9-(-r{n&R_J@d{IXhAD1Ce9xLekCZj>+ex`&rucbN{In_l znkhbHik~pW2Tk$gruZ>a{ApACaZ~(=DSp@#KWK^%Agubnz?9jh(5t-^3i8J1Mo@n?=lY59iR zeR9V{t&?)z9&(QlS|P{UI_^>j#}!c(T34+s$Y%D8TGVXjkx`{(dbL;pev!>MN1dJj zdCr_tH#*??<&>29qcPtrNYQiLJ}T;d^hWErQW^NsAn^)H<#$Fr9WpB&u82jOX%*j*>^uW}A98g1|Zu?31_>fjFWvs}u z)omHO)^d-#?nR5IyiUCP?E#BN)cjkA()_6V@~em4twW!yeClFZLHqnCEA7)tZz{N` z6l}gwFu(Ej$J2*bo=Wx9pGt*RPH#Ujx}|e)wBz*P==Q;Oed8*&D0Jv$ai~qc0b`Q?`0{%Chpz&!1bJ zXtxfr9{0L0|Jrrwu14rfnFecYPsba?*nI=L@%O-hzhOtNPpt5?j<;xhb3^D0=|XUS zAwAh+-d;c%b3+QW?(=6e_oS`h*k|In-{9E%-as6FRyGsUctR$g<0dKO=+rnqmTn!d zaMGsP%uQ+bnoUTlHBxG|l=if!5sPno?_=NDc+R4VvB3%c{n~F`9x)dGoej0uE!pu* zHuFJBS}>cLNKxL6>1^gbQ))V;da{|yG;<%Fuz5~tGQT`Qy!*E{&#%sj7dahsp5|Hu z)2+CeE?dzyHjwxY!q^%iHjjxC_)^RBpYqtgr+n|!MA_iR3ENMtYi;YZnKx4CQR!RC z74c3X)`18soR6KYP#-&Mbv|&~qCR{MtFyP}XB(8aKgeEiUZ)6tPi<=p)>l;))>~e; zt+zficMZyU@p;UrFT&FmtSww;shL)JU)h6~-@RD=jMd;G4?SJkv$hR2d#e5^n|Up( z)YQbvUJx(71X}U4wzbyKEG^HGH7}emcKzyH1=gI>xyMgvvx8Dy*~xN?pOrerXv*o1 z`LdbykZFHsY;%Ffp0XXf?^rhTPzw2{VI)rDT;fFQzmQ^VaZ1#>F8Q>cRjU4YLLhfV zst($(O)c;gKAp|DQiVrfpAen4hl`FDJS-fpv8M(ws*5122(yMZx-sgCfr~LKXmzwc zyfY-~Yft`iLL3vd*PhO1JSl5v`o)3iw)(>1${TmTs??w?=gyGQ@`StWo@MCQ zD^kpHk7xVpv#(l@DlJu^`RzMHPq@=>it2KWM)T$SAQ~P+gBCBDADFNq?DXY73}+2`iYW8^vGUq{y}Qh$yDLK z9-Cse^>fo{kD<0^*>HmSN&J|H+TOuCHDO&W!aa_>>BxpkQ!q)P(N!9C0UyXbD^WtfATU7-yDpPi#okM}}I*j~OkUt+liWIpU3@s3862K!ry|Wxe=|JQPFsduuv6!-Zm0dj zwnv*omWK)+eVlgE#`2_{hBQ0DUL~+siS?m%o^6J`>V|7VN8J3LtPGj0)dy?sgtgu_ ztmPVJznpCI*F$fGlm)Ze#bwJ_W9PBU?~V;Pi-*hV!~*u3mSNp;u0PRot+3n-ZRMel zx18m^@TXhO2Frc_LUq_|xzgXWTp=uX^c-#U+y%}Wu${kMquv@&yO zMaZ_e;?-}2sw&Tf&_9*M?FE;uV@gdkT0A~h+|Cw%_S~;0gxVZB+`jEx>o`6m+L!R@ z3)EM)$^H{bh5Z2pp;kmMDL!wq`xnsh%@^;R1o+E3Xa$kD& zh4T~l+DaAiyK8WE!1)~OajwM`VG)7;zP`OqtkoGd3elb5=4geg@0Eo3IQL9yAR0m1 z!rp)yj;ISAE!{V%34b&mWYToew)u`t9s3d8@7%xM9ZPVJ-?47v>ir8HjokoJrnWr; zZ7a5Ee?(+1k3SmQs~Q=l94*bLU>oUwxO$Vav`wNAcO-m0XpSyU{iNN$-yYv@-~HE! zcB!kn`nwb1eZgA^`_xr^zCGS9>@#;Jw3MW}DiQQYBLP6RQ`QN&x`T0*In}=YL{jYv zQdeHH|H{iUav>Bog!?q9XNJVCx|)xu{G)I19R3x}jwPzyt}bb4T+*<- zQEgnlqS?8kak*I2*0w^ebN7dP1Mz4ysWv$qmpPnG_PD>hH4S zJ~O%ZBkEE|i^C~eZ$nYOpsy<|lqHB3wBa%|xX0ewyhIe17PS^D(=5}AW>{z1W+@fQ zb;=y;T;+PD(lSq}Ru(JmR$5xu8Eq}Z0D28B`f_d)Vmd`P$BZ>-8%=9=7QR1F=F7aw zS8oT^`WYswjB3tiD_TLl;GBD5>8uiKX;tYRbIO%6^bLzcv6ikVSy{5^`f_Dvp=<6} z+}A7U8`c(~r>~o30h_H>l$6|xoRIQyc#B};Ez0H6YXn|>Xu=`i;b_qRusH{%x-9~SRn{9IZn_Qvp5IYUpnI&4I?S1 z9lUi=+Zr2n8>sd^f2{u&oO)34zGfJ>ue&$uOWI+TTSC5gfC=D;1fvmqcZ_c+b}d61 z$v$7qpU~e{ASD^<@9SdNn}C^py+MESmYztzYVT3)n-lO&df45?juMXaASc_X+PfoB zJ8q->JMFP}G>LrCI1;z4Ytu<#UX=$$Lnzu8Y)JS+4e_1L&CX>FnjChxhG>5>)}L&M z<2=J33dg)UgSWZK8xQsdeTksX3kiuN`l8?79mXBJUF(Gg3GK4W{Kj(;RGxh^)Xi5U z$b~z5Ufu_JT$B_aZmEW;hVptL+nWd`y|f-K+>;^P$|wABZ^E}LC_V!N9)vFv3E^t5 zUGNyem*5q#jb@{xk(u-d9pCTJp&Mu+-_z(=A0wO#b*#;Ff^(sc^)bRZQpcPzg^+WY zRhv15;2cM1HD01{F4Qq+L4A(YF<-FgbG44Oqa1BM(=lgWyaU3$5uMfOp`6ooj7xjC zwK=ShX})oyvl_2zw&yTsWBpse(5lVU`nN#GoUt*V8}qBxcr7F6TdREGqw0FHYA@R$obzG9bAmty=<8L?m z6ZsvJpIVKd?e8?!8=OH|{#u+X>u!c1eY**hK7q4mGhJLei8XrJBxZiu-=+M@qN#MJ z2{V7p)PCYUCVdj0$l*C!eK+LVS1NAKYu{>MuBkYZs9)BHlZx*2_Rh$yKguUI18*Ck5k)^4>Pdy9@NQ2A1}mFwx0xGwC}4{sIF_{&^-k+nda_ceYkuor(TA zhVdz}0!+HJ7gw%x zK$n>IGdtF&26U+pu7X>D<$Q^oesK%1Y#-@cfVp<2Q!syWd~*G&yArMZS4^1nArof% zYK`v!SLUSGnQ%Ac^Q{3(n2G+oi{%-Pv-=nE36t}+iuvY7{S&5fkbV&S-#6-42K#?G zFaJ}(98dfb@-5;p@M)Lc9`b)@O8(j6xxD-*^6=|bDH~Jp%k7 z%0B@+vHZvK_@4(p4Ec*Sea;sDYND%1`Y{{iQ3f zzWt=TCi~NF;9r<<67pX&;m?9@?hjuC{tV>n?SsC7ci{WOaiQrO*xaAL3CwpmMpVuxMG z2g-(H{yD(>e#!RyZ!G3}_=?r8|7~(KEwwkcEVcIpnw?H(OQU~DbF;5Gu&nuJ|MKqU zn|;pZ%e(yk;=aA!U?dseE4t&pzMxloaDXf(p z+s;I-lAf)t8&`W*Z)!&^^(xEwlzaiq4=aG5At7ZKhy8#cRJH+VODJlj@x zcsp9%8&*>shlm$_T&u3}%-rQNKlkoRByz94`5?he#co%BY3>aMd`X}2bluFMy-GJH zXwS^e30^Nt2u1>Eo+ivp)E~i{Q~cPjN4)(p{pq`zqCcNEr)X~&VC#hOEZ$7hU&WhK z%n#(v>F5M#o2~<~+shXOQgM?n3G@cF_y1<1 z_R7Pny)_Zub(>K*&O^NYiC_RtiN?JCXg^LEjHmnNj3$e+59mJun94BqCj@3%54tww zdZ9@#9(JGwx>;y;qcRvBdi!(TLTEn_;M){~#jmqWJ|Kadj*V?b59SXH%wK9j#k8I`A8gz#icU zhV-$mv+KiJXP5C$`5_&WeSKj+N{vFAE(JOj4$Llnm@tGU)0a%f!(IK*Ie3Cy{XLMu zr!_ekj&w(Jq^_=baF;~dk(iN5n`|TjdXiDR?~zeMto$bbv5+SeTpkr(B4Tm&*+V z8V~oSJUhiwN8{tU)eyi-z|xVIZVf!ozb4?fmM%45jWuwH}DjgWT-6CfN%oOIqdNWOl=SywKXxPIcgjpw)0 zk&iA0FZuL)0sQVoHWD=Q(cf+2<9Wk*2;QzAzk#CHjo|@!DUbUS7cM&VQ#?|2Sqwi9 zPd@5}>+QAVBQTt--vhwli`|1z|1F{}Pqsz!J*x9*^0+_Uj;*JpQKXDNi8$Btv@!RY z+Hd?2myUe&>_hlTkH*LSY8j5kqzoi$apLFU*$&F%zPI8Ed?1N`fu}9V$NesUyLcXP z_5(V~qMm*NFXd_bURAd_k~K#@`u~8JeB3|9R3UChnsid&JYYGhmOL8*vD613Q}GD2EQi QYYJZpuElmpB4{%D-}`nreE