From: weiyu Date: Wed, 16 Dec 2020 08:51:31 +0000 (-0800) Subject: update scripts X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=05bf65cb10758d1f2143ef41123cef7db057c885;p=tsan11-tsan11rec-docker.git update scripts --- diff --git a/scripts/app_assertion_test.sh b/scripts/app_assertion_test.sh new file mode 100755 index 0000000..3d59e27 --- /dev/null +++ b/scripts/app_assertion_test.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +MABAINLIB="../src" +MABAINDIR="mabain/examples" + +TESTS="silo mabain" + +TOTAL_RUN=$1 + +if [ -z "$1" ]; then + TOTAL_RUN=10 +fi + +function run_silo_test { + COUNT_ASSERT=0 + EXE='./dbtest --verbose -t 5' + + cd 'silo/out-perf.debug.check.masstree/benchmarks/' + for i in `seq 1 1 $TOTAL_RUN` + do + OUTPUT="$($EXE 2>&1)" + ASSERT="$(echo "$OUTPUT" | grep "Assert")" + if [ -n "$ASSERT" ] ; then + ((++COUNT_ASSERT)) + fi + done + + cd ../../.. + + AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f") + echo "Runs: ${TOTAL_RUN} | Assertions: ${COUNT_ASSERT} | Assertion rate: ${AVG_ASSERT}%" +} + +function run_mabain_test { + export LD_LIBRARY_PATH="${MABAINLIB}" + + COUNT_ASSERT=0 + EXE='./mb_multi_thread_insert_test_assert' + + cd ${MABAINDIR} + for i in `seq 1 1 $TOTAL_RUN` + do + OUTPUT="$(/usr/bin/time -f "time: %E" $EXE 2>&1)" + ASSERT="$(echo "$OUTPUT" | grep "Assert")" + if [ -n "$ASSERT" ] ; then + ((++COUNT_ASSERT)) + fi + + rm ./multi_test/* 2> /dev/null + done + + cd ../.. + + AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f") + echo "Runs: ${TOTAL_RUN} | Assertions: ${COUNT_ASSERT} | Assertion rate: ${AVG_ASSERT}%" +} + +echo "** Assertion test for some application benchmarks: ${TESTS} **" +for t in ${TESTS} +do + echo -n "${t} " + run_${t}_test +done diff --git a/scripts/app_test_all.sh b/scripts/app_test_all.sh new file mode 100755 index 0000000..b21c36c --- /dev/null +++ b/scripts/app_test_all.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +TOTAL_RUN=$1 + +if [ -z "$1" ]; then + TOTAL_RUN=10 +fi + +# Clear data +rm *.log 2> /dev/null + +echo "** Performance test for application benchmarks **" +# Run in all-core configuration +if [ ! -d "all-core" ]; then + mkdir all-core +fi + +echo "Running each benchmark with multiple cores for ${TOTAL_RUN} times" +#rm all-core/*.log 2> /dev/null +./run.sh $TOTAL_RUN +mv *.log all-core +echo "Done" +python calculator.py all-core + +# Run in single-core configuration +if [ ! -d "single-core" ]; then + mkdir single-core +fi + +echo "Running each benchmark with a single core for ${TOTAL_RUN} times" +#rm single-core/*.log 2> /dev/null +taskset -c 0 ./run.sh $TOTAL_RUN +mv *.log single-core +echo "Done" +python calculator.py single-core + diff --git a/scripts/assertion_test.sh b/scripts/assertion_test.sh deleted file mode 100644 index 8ee5011..0000000 --- a/scripts/assertion_test.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -MABAINLIB="../src" -MABAINDIR="mabain/examples" - -TESTS="silo" - -TOTAL_RUN=$1 - -if [ -z "$1" ]; then - TOTAL_RUN=10 -fi - -function run_silo_test { - echo "Silo assertion test" - COUNT_ASSERT=0 - EXE='./dbtest --verbose -t 5' - - cd 'silo/out-perf.debug.check.masstree/benchmarks/' - for i in `seq 1 1 $TOTAL_RUN` - do - OUTPUT="$($EXE 2>&1)" - ASSERT="$(echo "$OUTPUT" | grep "Assert")" - if [ -n "$ASSERT" ] ; then - ((++COUNT_ASSERT)) - fi - done - - cd ../../.. - - AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f") - echo "Runs: ${TOTAL_RUN} | Assertion rate: ${AVG_ASSERT}%" -} - -function run_mabain_test { - export LD_LIBRARY_PATH="$${MABAINLIB}" - - echo "Mabain assertion test" - COUNT_ASSERT=0 - EXE='./mb_multi_thread_insert_test_assert' - - cd ${MABAINDIR} - for i in `seq 1 1 $TOTAL_RUN` - do - OUTPUT="$(/usr/bin/time -f "time: %E" $EXE 2>&1)" - ASSERT="$(echo "$OUTPUT" | grep "Assert")" - if [ -n "$ASSERT" ] ; then - ((++COUNT_ASSERT)) - fi - - rm ./multi_test/* 2> /dev/null - done - - cd ../.. - - AVG_ASSERT=$(echo "${COUNT_ASSERT} * 100 / ${TOTAL_RUN}" | bc -l | xargs printf "%.1f") - echo "Runs: ${TOTAL_RUN} | Assertion rate: ${AVG_ASSERT}%" -} - -#function run_all_tests { -# for t in ${TESTS} -# do -# echo "running ${t}" -# (run_${t}_test 2>&1) > "${t}.log" -# run_${t}_test &> "${t}.log" -# done -#} - -run_silo_test -run_mabain_test diff --git a/scripts/calculator.py b/scripts/calculator.py index 8989701..e2a85b8 100644 --- a/scripts/calculator.py +++ b/scripts/calculator.py @@ -3,10 +3,19 @@ import re import statistics import sys +def checkRace(content): + has_race = re.search(r'race', content) + if has_race: + print 'data race detected' + else: + print 'data race not detected' + def GdaxStatistics(filename): gdax_data = [] with open(filename, 'r') as f: content = f.read() + checkRace(content) + allruns = re.findall(r'(0-0\.49.*?19.99 s: \d+)', content, flags=re.DOTALL) for run in allruns: iterations = 0 @@ -23,6 +32,7 @@ def SiloStatistics(filename): data = [] with open(filename, 'r') as f: content = f.read() + checkRace(content) allruns = re.findall(r'agg_throughput: (\d+\.?\d*) ops', content) data = [float(x) for x in allruns] @@ -32,6 +42,7 @@ def TimeStatistics(filename): data = [] with open(filename, 'r') as f: content = f.read() + checkRace(content) allruns = re.findall(r'real.*?(\d+)m(\d+\.\d+)s', content) for run in allruns: (minute,second) = run @@ -43,6 +54,7 @@ def TimeStatistics(filename): def JsbenchStatistics(filename): with open(filename, 'r') as f: content = f.read() + checkRace(content) result = re.search(r'(Final results.*?runs)', content, flags=re.DOTALL) print(result.group(0)) diff --git a/scripts/do_test_all.sh b/scripts/do_test_all.sh new file mode 100755 index 0000000..7c806e7 --- /dev/null +++ b/scripts/do_test_all.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# Test application benchmarks +./app_test_all.sh +echo "" + +# Test CDSChecker data structure benchmarks +cd cdschecker_modified_benchmarks +./test_all.sh +cd .. +echo "" + +# Test data structures with bugs that tsan11/tsan11rec cannot detect +cd tsan11-missingbug +./test_all.sh +cd .. +echo "" + +# Test assertion failures in Silo and Mabain +./app_assertion_test.sh diff --git a/scripts/run.sh b/scripts/run.sh index 865c44a..b6be652 100755 --- a/scripts/run.sh +++ b/scripts/run.sh @@ -62,17 +62,12 @@ function run_jsbench_test { cd .. } -function run_all_tests { - for t in ${TESTS} - do - echo "running ${t}" - (run_${t}_test 2>&1) > "${t}.log" -# run_${t}_test &> "${t}.log" - done -} - -# Remove previous output files -rm *.log 2> /dev/null rm $REDUNDANT 2> /dev/null -run_all_tests +echo "Benchmarks: ${TESTS}" +for t in ${TESTS} +do + rm "${t}.log" 2> /dev/null + echo "Running ${t}" + (run_${t}_test 2>&1) > "${t}.log" +done diff --git a/scripts/setup.sh b/scripts/setup.sh index 9cfd1fa..ab007ea 100755 --- a/scripts/setup.sh +++ b/scripts/setup.sh @@ -7,8 +7,10 @@ git clone git://plrg.eecs.uci.edu/c11concurrency-benchmarks.git tsan11-benchmark cd tsan11-benchmarks git checkout tsan11-docker cp /data/scripts/build.sh . +cp /data/scripts/do_test_all.sh . +cp /data/scripts/app_assertion_test.sh . +cp /data/scripts/app_test_all.sh . cp /data/scripts/run.sh . -cp /data/scripts/test_all.sh . cp /data/scripts/calculator.py . ./build.sh cd .. @@ -19,8 +21,10 @@ git clone git://plrg.eecs.uci.edu/c11concurrency-benchmarks.git tsan11rec-benchm cd tsan11rec-benchmarks git checkout tsan11-docker cp /data/scripts/build.sh . +cp /data/scripts/do_test_all.sh . +cp /data/scripts/app_assertion_test.sh . +cp /data/scripts/app_test_all.sh . cp /data/scripts/run.sh . -cp /data/scripts/test_all.sh . cp /data/scripts/calculator.py . sed -i "s/tsan11/tsan11rec/g" clang clang++ gcc g++ build.sh run.sh ./build.sh diff --git a/scripts/test_all.sh b/scripts/test_all.sh deleted file mode 100755 index 4ac1597..0000000 --- a/scripts/test_all.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -TOTAL_RUN=$1 - -if [ -z "$1" ]; then - TOTAL_RUN=10 -fi - -# Clear data -rm *.log 2> /dev/null - -echo "test application benchmarks" -# Run in all-core configuration -if [ ! -d "all-core" ]; then - mkdir all-core -fi - -echo "running each benchmark with multiple cores for ${TOTAL_RUN} times" -rm all-core/*.log 2> /dev/null -./run.sh $TOTAL_RUN -mv *.log all-core -echo "done" -python calculator.py all-core - -# Run in single-core configuration -if [ ! -d "single-core" ]; then - mkdir single-core -fi - -echo "running each benchmark with a single core for ${TOTAL_RUN} times" -rm single-core/*.log 2> /dev/null -taskset -c 0 ./run.sh $TOTAL_RUN -mv *.log single-core -echo "done" -python calculator.py single-core -