aboutsummaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-01-16 15:45:15 +0000
committerPeter Maydell <peter.maydell@linaro.org>2018-01-16 15:45:15 +0000
commitc1d5b9add7b04661bedef9a3379a8b82547b53db (patch)
tree7c064b14e4a3645aedbd7d7f0009b9ee71ad9952 /scripts
parentaae39d24a387a273deab3eb930dbf730aa379e22 (diff)
parentb5976c2e46e86b36b01d8ac380a182e22209a7cd (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* QemuMutex tracing improvements (Alex) * ram_addr_t optimization (David) * SCSI fixes (Fam, Stefan, me) * do {} while (0) fixes (Eric) * KVM fix for PMU (Jan) * memory leak fixes from ASAN (Marc-André) * migration fix for HPET, icount, loadvm (Maria, Pavel) * hflags fixes (me, Tao) * block/iscsi uninitialized variable (Peter L.) * full support for GMainContexts in character devices (Peter Xu) * more boot-serial-test (Thomas) * Memory leak fix (Zhecheng) # gpg: Signature made Tue 16 Jan 2018 14:15:45 GMT # gpg: using RSA key 0xBFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (51 commits) scripts/analyse-locks-simpletrace.py: script to analyse lock times util/qemu-thread-*: add qemu_lock, locked and unlock trace events cpu: flush TB cache when loading VMState block/iscsi: fix initialization of iTask in iscsi_co_get_block_status find_ram_offset: Align ram_addr_t allocation on long boundaries find_ram_offset: Add comments and tracing cpu_physical_memory_sync_dirty_bitmap: Another alignment fix checkpatch: Enforce proper do/while (0) style maint: Fix macros with broken 'do/while(0); ' usage tests: Avoid 'do/while(false); ' in vhost-user-bridge chardev: Clean up previous patch indentation chardev: Use goto/label instead of do/break/while(0) mips: Tweak location of ';' in macros net: Drop unusual use of do { } while (0); irq: fix memory leak cpus: unify qemu_*_wait_io_event icount: fixed saving/restoring of icount warp timers scripts/qemu-gdb/timers.py: new helper to dump timer state scripts/qemu-gdb: add simple tcg lock status helper target-i386: update hflags on Hypervisor.framework ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/analyse-locks-simpletrace.py99
-rwxr-xr-xscripts/checkpatch.pl5
-rw-r--r--scripts/qemu-gdb.py4
-rw-r--r--scripts/qemugdb/tcg.py46
-rw-r--r--scripts/qemugdb/timers.py54
5 files changed, 207 insertions, 1 deletions
diff --git a/scripts/analyse-locks-simpletrace.py b/scripts/analyse-locks-simpletrace.py
new file mode 100755
index 0000000000..101e84dea5
--- /dev/null
+++ b/scripts/analyse-locks-simpletrace.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Analyse lock events and compute statistics
+#
+# Author: Alex Bennée <alex.bennee@linaro.org>
+#
+
+import os
+import simpletrace
+import argparse
+import numpy as np
+
+class MutexAnalyser(simpletrace.Analyzer):
+ "A simpletrace Analyser for checking locks."
+
+ def __init__(self):
+ self.locks = 0
+ self.locked = 0
+ self.unlocks = 0
+ self.mutex_records = {}
+
+ def _get_mutex(self, mutex):
+ if not mutex in self.mutex_records:
+ self.mutex_records[mutex] = {"locks": 0,
+ "lock_time": 0,
+ "acquire_times": [],
+ "locked": 0,
+ "locked_time": 0,
+ "held_times": [],
+ "unlocked": 0}
+
+ return self.mutex_records[mutex]
+
+ def qemu_mutex_lock(self, timestamp, mutex, filename, line):
+ self.locks += 1
+ rec = self._get_mutex(mutex)
+ rec["locks"] += 1
+ rec["lock_time"] = timestamp[0]
+ rec["lock_loc"] = (filename, line)
+
+ def qemu_mutex_locked(self, timestamp, mutex, filename, line):
+ self.locked += 1
+ rec = self._get_mutex(mutex)
+ rec["locked"] += 1
+ rec["locked_time"] = timestamp[0]
+ acquire_time = rec["locked_time"] - rec["lock_time"]
+ rec["locked_loc"] = (filename, line)
+ rec["acquire_times"].append(acquire_time)
+
+ def qemu_mutex_unlock(self, timestamp, mutex, filename, line):
+ self.unlocks += 1
+ rec = self._get_mutex(mutex)
+ rec["unlocked"] += 1
+ held_time = timestamp[0] - rec["locked_time"]
+ rec["held_times"].append(held_time)
+ rec["unlock_loc"] = (filename, line)
+
+
+def get_args():
+ "Grab options"
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--output", "-o", type=str, help="Render plot to file")
+ parser.add_argument("events", type=str, help='trace file read from')
+ parser.add_argument("tracefile", type=str, help='trace file read from')
+ return parser.parse_args()
+
+if __name__ == '__main__':
+ args = get_args()
+
+ # Gather data from the trace
+ analyser = MutexAnalyser()
+ simpletrace.process(args.events, args.tracefile, analyser)
+
+ print ("Total locks: %d, locked: %d, unlocked: %d" %
+ (analyser.locks, analyser.locked, analyser.unlocks))
+
+ # Now dump the individual lock stats
+ for key, val in sorted(analyser.mutex_records.iteritems(),
+ key=lambda (k,v): v["locks"]):
+ print ("Lock: %#x locks: %d, locked: %d, unlocked: %d" %
+ (key, val["locks"], val["locked"], val["unlocked"]))
+
+ acquire_times = np.array(val["acquire_times"])
+ if len(acquire_times) > 0:
+ print (" Acquire Time: min:%d median:%d avg:%.2f max:%d" %
+ (acquire_times.min(), np.median(acquire_times),
+ acquire_times.mean(), acquire_times.max()))
+
+ held_times = np.array(val["held_times"])
+ if len(held_times) > 0:
+ print (" Held Time: min:%d median:%d avg:%.2f max:%d" %
+ (held_times.min(), np.median(held_times),
+ held_times.mean(), held_times.max()))
+
+ # Check if any locks still held
+ if val["locks"] > val["locked"]:
+ print (" LOCK HELD (%s:%s)" % (val["locked_loc"]))
+ print (" BLOCKED (%s:%s)" % (val["lock_loc"]))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 3dc27d9656..accba24a31 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1622,6 +1622,11 @@ sub process {
}
}
+# 'do ... while (0/false)' only makes sense in macros, without trailing ';'
+ if ($line =~ /while\s*\((0|false)\);/) {
+ ERROR("suspicious ; after while (0)\n" . $herecurr);
+ }
+
# Check relative indent for conditionals and blocks.
if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
my ($s, $c) = ($stat, $cond);
diff --git a/scripts/qemu-gdb.py b/scripts/qemu-gdb.py
index b3f8e04f77..690827e6fc 100644
--- a/scripts/qemu-gdb.py
+++ b/scripts/qemu-gdb.py
@@ -26,7 +26,7 @@ import os, sys
sys.path.append(os.path.dirname(__file__))
-from qemugdb import aio, mtree, coroutine
+from qemugdb import aio, mtree, coroutine, tcg, timers
class QemuCommand(gdb.Command):
'''Prefix for QEMU debug support commands'''
@@ -38,6 +38,8 @@ QemuCommand()
coroutine.CoroutineCommand()
mtree.MtreeCommand()
aio.HandlersCommand()
+tcg.TCGLockStatusCommand()
+timers.TimersCommand()
coroutine.CoroutineSPFunction()
coroutine.CoroutinePCFunction()
diff --git a/scripts/qemugdb/tcg.py b/scripts/qemugdb/tcg.py
new file mode 100644
index 0000000000..8c7f1d7454
--- /dev/null
+++ b/scripts/qemugdb/tcg.py
@@ -0,0 +1,46 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# GDB debugging support, TCG status
+#
+# Copyright 2016 Linaro Ltd
+#
+# Authors:
+# Alex Bennée <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+#
+# Contributions after 2012-01-13 are licensed under the terms of the
+# GNU GPL, version 2 or (at your option) any later version.
+
+# 'qemu tcg-lock-status' -- display the TCG lock status across threads
+
+import gdb
+
+class TCGLockStatusCommand(gdb.Command):
+ '''Display TCG Execution Status'''
+ def __init__(self):
+ gdb.Command.__init__(self, 'qemu tcg-lock-status', gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE)
+
+ def invoke(self, arg, from_tty):
+ gdb.write("Thread, BQL (iothread_mutex), Replay, Blocked?\n")
+ for thread in gdb.inferiors()[0].threads():
+ thread.switch()
+
+ iothread = gdb.parse_and_eval("iothread_locked")
+ replay = gdb.parse_and_eval("replay_locked")
+
+ frame = gdb.selected_frame()
+ if frame.name() == "__lll_lock_wait":
+ frame.older().select()
+ mutex = gdb.parse_and_eval("mutex")
+ owner = gdb.parse_and_eval("mutex->__data.__owner")
+ blocked = ("__lll_lock_wait waiting on %s from %d" %
+ (mutex, owner))
+ else:
+ blocked = "not blocked"
+
+ gdb.write("%d/%d, %s, %s, %s\n" % (thread.num, thread.ptid[1],
+ iothread, replay, blocked))
diff --git a/scripts/qemugdb/timers.py b/scripts/qemugdb/timers.py
new file mode 100644
index 0000000000..be71a001e3
--- /dev/null
+++ b/scripts/qemugdb/timers.py
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+# GDB debugging support
+#
+# Copyright 2017 Linaro Ltd
+#
+# Author: Alex Bennée <alex.bennee@linaro.org>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+
+# 'qemu timers' -- display the current timerlists
+
+import gdb
+
+class TimersCommand(gdb.Command):
+ '''Display the current QEMU timers'''
+
+ def __init__(self):
+ 'Register the class as a gdb command'
+ gdb.Command.__init__(self, 'qemu timers', gdb.COMMAND_DATA,
+ gdb.COMPLETE_NONE)
+
+ def dump_timers(self, timer):
+ "Follow a timer and recursively dump each one in the list."
+ # timer should be of type QemuTimer
+ gdb.write(" timer %s/%s (cb:%s,opq:%s)\n" % (
+ timer['expire_time'],
+ timer['scale'],
+ timer['cb'],
+ timer['opaque']))
+
+ if int(timer['next']) > 0:
+ self.dump_timers(timer['next'])
+
+
+ def process_timerlist(self, tlist, ttype):
+ gdb.write("Processing %s timers\n" % (ttype))
+ gdb.write(" clock %s is enabled:%s, last:%s\n" % (
+ tlist['clock']['type'],
+ tlist['clock']['enabled'],
+ tlist['clock']['last']))
+ if int(tlist['active_timers']) > 0:
+ self.dump_timers(tlist['active_timers'])
+
+
+ def invoke(self, arg, from_tty):
+ 'Run the command'
+ main_timers = gdb.parse_and_eval("main_loop_tlg")
+
+ # This will break if QEMUClockType in timer.h is redfined
+ self.process_timerlist(main_timers['tl'][0], "Realtime")
+ self.process_timerlist(main_timers['tl'][1], "Virtual")
+ self.process_timerlist(main_timers['tl'][2], "Host")
+ self.process_timerlist(main_timers['tl'][3], "Virtual RT")