diff options
author | Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 2020-10-21 17:58:53 +0300 |
---|---|---|
committer | Max Reitz <mreitz@redhat.com> | 2020-12-18 12:35:55 +0100 |
commit | f52e1af0b08af93b5354fe2648eccaec6bb8a2b2 (patch) | |
tree | fc362dde3b0fb1fa48e5f0cab0ac570b50d5ce7c /scripts | |
parent | 4a44554a65dfc5ccd5dad428981d0ab2959b4b8f (diff) |
scripts/simplebench: use standard deviation for +- error
Standard deviation is more usual to see after +- than current maximum
of deviations.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20201021145859.11201-16-vsementsov@virtuozzo.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'scripts')
-rw-r--r-- | scripts/simplebench/simplebench.py | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py index 2251cd34ea..55ec1ad5db 100644 --- a/scripts/simplebench/simplebench.py +++ b/scripts/simplebench/simplebench.py @@ -18,6 +18,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. # +import statistics + def bench_one(test_func, test_env, test_case, count=5, initial_run=True): """Benchmark one test-case @@ -40,7 +42,7 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True): 'dimension': dimension of results, may be 'seconds' or 'iops' 'average': average value (iops or seconds) per run (exists only if at least one run succeeded) - 'delta': maximum delta between test_func result and the average + 'stdev': standard deviation of results (exists only if at least one run succeeded) 'n-failed': number of failed runs (exists only if at least one run failed) @@ -67,10 +69,9 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True): assert all('seconds' in r for r in succeeded) assert all('iops' not in r for r in succeeded) dim = 'seconds' - avg = sum(r[dim] for r in succeeded) / len(succeeded) result['dimension'] = dim - result['average'] = avg - result['delta'] = max(abs(r[dim] - avg) for r in succeeded) + result['average'] = statistics.mean(r[dim] for r in succeeded) + result['stdev'] = statistics.stdev(r[dim] for r in succeeded) if len(succeeded) < count: result['n-failed'] = count - len(succeeded) @@ -81,7 +82,7 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True): def ascii_one(result): """Return ASCII representation of bench_one() returned dict.""" if 'average' in result: - s = '{:.2f} +- {:.2f}'.format(result['average'], result['delta']) + s = '{:.2f} +- {:.2f}'.format(result['average'], result['stdev']) if 'n-failed' in result: s += '\n({} failed)'.format(result['n-failed']) return s |