aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-04-14 17:27:00 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-04-14 17:27:00 +0100
commita457215ed2aaa9598bd4ebbc6745d2a494ba9990 (patch)
treedb6967ddbd90a7c9862ce25449d0556b36985c48
parent14e5526b51910efd62cd31cd95b49baca975c83f (diff)
parent84f82ddcbb4ac4ed04c8675e85155329f23184f0 (diff)
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20200414' into staging
patch queue: * Fix some problems that trip up Coverity's scanner * run-coverity-scan: New script automating the scan-and-upload process * docs: Improve our gdbstub documentation * configure: Honour --disable-werror for Sphinx * docs: Fix errors produced when building with Sphinx 3.0 * docs: Require Sphinx 1.6 or better * Add deprecation notice for KVM support on AArch32 hosts # gpg: Signature made Tue 14 Apr 2020 17:25:22 BST # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate] # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20200414: Deprecate KVM support for AArch32 docs: Require Sphinx 1.6 or better kernel-doc: Use c:struct for Sphinx 3.0 and later scripts/kernel-doc: Add missing close-paren in c:function directives configure: Honour --disable-werror for Sphinx docs: Improve our gdbstub documentation scripts/coverity-scan: Add Docker support scripts/run-coverity-scan: Script to run Coverity Scan build linux-user/flatload.c: Use "" for include of QEMU header target_flat.h thread.h: Remove trailing semicolons from Coverity qemu_mutex_lock() etc thread.h: Fix Coverity version of qemu_cond_timedwait() osdep.h: Drop no-longer-needed Coverity workarounds Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile2
-rwxr-xr-xconfigure9
-rw-r--r--docs/conf.py6
-rw-r--r--docs/sphinx/kerneldoc.py1
-rw-r--r--docs/system/deprecated.rst8
-rw-r--r--docs/system/gdb.rst24
-rw-r--r--include/qemu/osdep.h14
-rw-r--r--include/qemu/thread.h12
-rw-r--r--linux-user/flatload.c2
-rw-r--r--qemu-options.hx24
-rw-r--r--scripts/coverity-scan/coverity-scan.docker131
-rwxr-xr-xscripts/coverity-scan/run-coverity-scan401
-rwxr-xr-xscripts/kernel-doc18
14 files changed, 616 insertions, 41 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 5f93e8c01d..8cbc1fac2b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2038,6 +2038,11 @@ M: Markus Armbruster <armbru@redhat.com>
S: Supported
F: scripts/coverity-model.c
+Coverity Scan integration
+M: Peter Maydell <peter.maydell@linaro.org>
+S: Maintained
+F: scripts/coverity-scan/
+
Device Tree
M: Alistair Francis <alistair.francis@wdc.com>
R: David Gibson <david@gibson.dropbear.id.au>
diff --git a/Makefile b/Makefile
index 84ef881600..8a9113e666 100644
--- a/Makefile
+++ b/Makefile
@@ -1076,7 +1076,7 @@ sphinxdocs: $(MANUAL_BUILDDIR)/devel/index.html \
# Note the use of different doctree for each (manual, builder) tuple;
# this works around Sphinx not handling parallel invocation on
# a single doctree: https://github.com/sphinx-doc/sphinx/issues/2946
-build-manual = $(call quiet-command,CONFDIR="$(qemu_confdir)" $(SPHINX_BUILD) $(if $(V),,-q) -W -b $2 -D version=$(VERSION) -D release="$(FULL_VERSION)" -d .doctrees/$1-$2 $(SRC_PATH)/docs/$1 $(MANUAL_BUILDDIR)/$1 ,"SPHINX","$(MANUAL_BUILDDIR)/$1")
+build-manual = $(call quiet-command,CONFDIR="$(qemu_confdir)" $(SPHINX_BUILD) $(if $(V),,-q) $(SPHINX_WERROR) -b $2 -D version=$(VERSION) -D release="$(FULL_VERSION)" -d .doctrees/$1-$2 $(SRC_PATH)/docs/$1 $(MANUAL_BUILDDIR)/$1 ,"SPHINX","$(MANUAL_BUILDDIR)/$1")
# We assume all RST files in the manual's directory are used in it
manual-deps = $(wildcard $(SRC_PATH)/docs/$1/*.rst $(SRC_PATH)/docs/$1/*/*.rst) \
$(SRC_PATH)/docs/defs.rst.inc \
diff --git a/configure b/configure
index 233c671aaa..9b1f5b33e4 100755
--- a/configure
+++ b/configure
@@ -4928,6 +4928,12 @@ if check_include sys/kcov.h ; then
kcov=yes
fi
+# If we're making warnings fatal, apply this to Sphinx runs as well
+sphinx_werror=""
+if test "$werror" = "yes"; then
+ sphinx_werror="-W"
+fi
+
# Check we have a new enough version of sphinx-build
has_sphinx_build() {
# This is a bit awkward but works: create a trivial document and
@@ -4936,7 +4942,7 @@ has_sphinx_build() {
# sphinx-build doesn't exist at all or if it is too old.
mkdir -p "$TMPDIR1/sphinx"
touch "$TMPDIR1/sphinx/index.rst"
- "$sphinx_build" -c "$source_path/docs" -b html "$TMPDIR1/sphinx" "$TMPDIR1/sphinx/out" >/dev/null 2>&1
+ "$sphinx_build" $sphinx_werror -c "$source_path/docs" -b html "$TMPDIR1/sphinx" "$TMPDIR1/sphinx/out" >/dev/null 2>&1
}
# Check if tools are available to build documentation.
@@ -7631,6 +7637,7 @@ echo "INSTALL_PROG=$install -c -m 0755" >> $config_host_mak
echo "INSTALL_LIB=$install -c -m 0644" >> $config_host_mak
echo "PYTHON=$python" >> $config_host_mak
echo "SPHINX_BUILD=$sphinx_build" >> $config_host_mak
+echo "SPHINX_WERROR=$sphinx_werror" >> $config_host_mak
echo "GENISOIMAGE=$genisoimage" >> $config_host_mak
echo "CC=$cc" >> $config_host_mak
if $iasl -h > /dev/null 2>&1; then
diff --git a/docs/conf.py b/docs/conf.py
index 7768611e89..d6e173ef77 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -59,8 +59,10 @@ sys.path.insert(0, os.path.join(qemu_docdir, "sphinx"))
# If your documentation needs a minimal Sphinx version, state it here.
#
-# 1.3 is where the 'alabaster' theme was shipped with Sphinx.
-needs_sphinx = '1.3'
+# Sphinx 1.5 and earlier can't build our docs because they are too
+# picky about the syntax of the argument to the option:: directive
+# (see Sphinx bugs #646, #3366).
+needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
diff --git a/docs/sphinx/kerneldoc.py b/docs/sphinx/kerneldoc.py
index 1159405cb9..3e87940206 100644
--- a/docs/sphinx/kerneldoc.py
+++ b/docs/sphinx/kerneldoc.py
@@ -99,6 +99,7 @@ class KernelDocDirective(Directive):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
+ cmd += ['-sphinx-version', sphinx.__version__]
cmd += [filename]
try:
diff --git a/docs/system/deprecated.rst b/docs/system/deprecated.rst
index c633fe2bef..3142fac386 100644
--- a/docs/system/deprecated.rst
+++ b/docs/system/deprecated.rst
@@ -336,6 +336,14 @@ The ``compat`` property used to set backwards compatibility modes for
the processor has been deprecated. The ``max-cpu-compat`` property of
the ``pseries`` machine type should be used instead.
+KVM guest support on 32-bit Arm hosts (since 5.0)
+'''''''''''''''''''''''''''''''''''''''''''''''''
+
+The Linux kernel has dropped support for allowing 32-bit Arm systems
+to host KVM guests as of the 5.7 kernel. Accordingly, QEMU is deprecating
+its support for this configuration and will remove it in a future version.
+Running 32-bit guests on a 64-bit Arm host remains supported.
+
System emulator devices
-----------------------
diff --git a/docs/system/gdb.rst b/docs/system/gdb.rst
index 639f814b32..a40145fcf8 100644
--- a/docs/system/gdb.rst
+++ b/docs/system/gdb.rst
@@ -3,17 +3,25 @@
GDB usage
---------
-QEMU has a primitive support to work with gdb, so that you can do
-'Ctrl-C' while the virtual machine is running and inspect its state.
-
-In order to use gdb, launch QEMU with the '-s' option. It will wait for
-a gdb connection:
+QEMU supports working with gdb via gdb's remote-connection facility
+(the "gdbstub"). This allows you to debug guest code in the same
+way that you might with a low-level debug facility like JTAG
+on real hardware. You can stop and start the virtual machine,
+examine state like registers and memory, and set breakpoints and
+watchpoints.
+
+In order to use gdb, launch QEMU with the ``-s`` and ``-S`` options.
+The ``-s`` option will make QEMU listen for an incoming connection
+from gdb on TCP port 1234, and ``-S`` will make QEMU not start the
+guest until you tell it to from gdb. (If you want to specify which
+TCP port to use or to use something other than TCP for the gdbstub
+connection, use the ``-gdb dev`` option instead of ``-s``.)
.. parsed-literal::
- |qemu_system| -s -kernel bzImage -hda rootdisk.img -append "root=/dev/hda"
- Connected to host network interface: tun0
- Waiting gdb connection on port 1234
+ |qemu_system| -s -S -kernel bzImage -hda rootdisk.img -append "root=/dev/hda"
+
+QEMU will launch but will silently wait for gdb to connect.
Then launch gdb on the 'vmlinux' executable::
diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h
index 9bd3dcfd13..20f5c5f197 100644
--- a/include/qemu/osdep.h
+++ b/include/qemu/osdep.h
@@ -33,20 +33,6 @@
#else
#include "exec/poison.h"
#endif
-#ifdef __COVERITY__
-/* Coverity does not like the new _Float* types that are used by
- * recent glibc, and croaks on every single file that includes
- * stdlib.h. These typedefs are enough to please it.
- *
- * Note that these fix parse errors so they cannot be placed in
- * scripts/coverity-model.c.
- */
-typedef float _Float32;
-typedef double _Float32x;
-typedef double _Float64;
-typedef __float80 _Float64x;
-typedef __float128 _Float128;
-#endif
#include "qemu/compiler.h"
diff --git a/include/qemu/thread.h b/include/qemu/thread.h
index 047db0307e..d22848138e 100644
--- a/include/qemu/thread.h
+++ b/include/qemu/thread.h
@@ -57,17 +57,17 @@ extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
* hide them.
*/
#define qemu_mutex_lock(m) \
- qemu_mutex_lock_impl(m, __FILE__, __LINE__);
+ qemu_mutex_lock_impl(m, __FILE__, __LINE__)
#define qemu_mutex_trylock(m) \
- qemu_mutex_trylock_impl(m, __FILE__, __LINE__);
+ qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
#define qemu_rec_mutex_lock(m) \
- qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__);
+ qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
#define qemu_rec_mutex_trylock(m) \
- qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__);
+ qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
#define qemu_cond_wait(c, m) \
- qemu_cond_wait_impl(c, m, __FILE__, __LINE__);
+ qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
#define qemu_cond_timedwait(c, m, ms) \
- qemu_cond_wait_impl(c, m, ms, __FILE__, __LINE__);
+ qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
#else
#define qemu_mutex_lock(m) ({ \
QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
diff --git a/linux-user/flatload.c b/linux-user/flatload.c
index 0122ab3afe..66901f39cc 100644
--- a/linux-user/flatload.c
+++ b/linux-user/flatload.c
@@ -37,7 +37,7 @@
#include "qemu.h"
#include "flat.h"
-#include <target_flat.h>
+#include "target_flat.h"
//#define DEBUG
diff --git a/qemu-options.hx b/qemu-options.hx
index 16debd03cb..292d4e7c0c 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -3680,14 +3680,26 @@ SRST
ERST
DEF("gdb", HAS_ARG, QEMU_OPTION_gdb, \
- "-gdb dev wait for gdb connection on 'dev'\n", QEMU_ARCH_ALL)
+ "-gdb dev accept gdb connection on 'dev'. (QEMU defaults to starting\n"
+ " the guest without waiting for gdb to connect; use -S too\n"
+ " if you want it to not start execution.)\n",
+ QEMU_ARCH_ALL)
SRST
``-gdb dev``
- Wait for gdb connection on device dev (see
- :ref:`gdb_005fusage`). Typical connections will likely be
- TCP-based, but also UDP, pseudo TTY, or even stdio are reasonable
- use case. The latter is allowing to start QEMU from within gdb and
- establish the connection via a pipe:
+ Accept a gdb connection on device dev (see
+ :ref:`gdb_005fusage`). Note that this option does not pause QEMU
+ execution -- if you want QEMU to not start the guest until you
+ connect with gdb and issue a ``continue`` command, you will need to
+ also pass the ``-S`` option to QEMU.
+
+ The most usual configuration is to listen on a local TCP socket::
+
+ -gdb tcp::3117
+
+ but you can specify other backends; UDP, pseudo TTY, or even stdio
+ are all reasonable use cases. For example, a stdio connection
+ allows you to start QEMU from within gdb and establish the
+ connection via a pipe:
.. parsed-literal::
diff --git a/scripts/coverity-scan/coverity-scan.docker b/scripts/coverity-scan/coverity-scan.docker
new file mode 100644
index 0000000000..a4f64d1283
--- /dev/null
+++ b/scripts/coverity-scan/coverity-scan.docker
@@ -0,0 +1,131 @@
+# syntax=docker/dockerfile:1.0.0-experimental
+#
+# Docker setup for running the "Coverity Scan" tools over the source
+# tree and uploading them to the website, as per
+# https://scan.coverity.com/projects/qemu/builds/new
+# We do this on a fixed config (currently Fedora 30 with a known
+# set of dependencies and a configure command that enables a specific
+# set of options) so that random changes don't result in our accidentally
+# dropping some files from the scan.
+#
+# We don't build on top of the fedora.docker file because we don't
+# want to accidentally change or break the scan config when that
+# is updated.
+
+# The work of actually doing the build is handled by the
+# run-coverity-scan script.
+
+FROM fedora:30
+ENV PACKAGES \
+ alsa-lib-devel \
+ bc \
+ bison \
+ brlapi-devel \
+ bzip2 \
+ bzip2-devel \
+ ccache \
+ clang \
+ curl \
+ cyrus-sasl-devel \
+ dbus-daemon \
+ device-mapper-multipath-devel \
+ findutils \
+ flex \
+ gcc \
+ gcc-c++ \
+ gettext \
+ git \
+ glib2-devel \
+ glusterfs-api-devel \
+ gnutls-devel \
+ gtk3-devel \
+ hostname \
+ libaio-devel \
+ libasan \
+ libattr-devel \
+ libblockdev-mpath-devel \
+ libcap-devel \
+ libcap-ng-devel \
+ libcurl-devel \
+ libepoxy-devel \
+ libfdt-devel \
+ libgbm-devel \
+ libiscsi-devel \
+ libjpeg-devel \
+ libpmem-devel \
+ libnfs-devel \
+ libpng-devel \
+ librbd-devel \
+ libseccomp-devel \
+ libssh-devel \
+ libubsan \
+ libudev-devel \
+ libusbx-devel \
+ libxml2-devel \
+ libzstd-devel \
+ llvm \
+ lzo-devel \
+ make \
+ mingw32-bzip2 \
+ mingw32-curl \
+ mingw32-glib2 \
+ mingw32-gmp \
+ mingw32-gnutls \
+ mingw32-gtk3 \
+ mingw32-libjpeg-turbo \
+ mingw32-libpng \
+ mingw32-libtasn1 \
+ mingw32-nettle \
+ mingw32-nsis \
+ mingw32-pixman \
+ mingw32-pkg-config \
+ mingw32-SDL2 \
+ mingw64-bzip2 \
+ mingw64-curl \
+ mingw64-glib2 \
+ mingw64-gmp \
+ mingw64-gnutls \
+ mingw64-gtk3 \
+ mingw64-libjpeg-turbo \
+ mingw64-libpng \
+ mingw64-libtasn1 \
+ mingw64-nettle \
+ mingw64-pixman \
+ mingw64-pkg-config \
+ mingw64-SDL2 \
+ ncurses-devel \
+ nettle-devel \
+ nss-devel \
+ numactl-devel \
+ perl \
+ perl-Test-Harness \
+ pixman-devel \
+ pulseaudio-libs-devel \
+ python3 \
+ python3-sphinx \
+ PyYAML \
+ rdma-core-devel \
+ SDL2-devel \
+ snappy-devel \
+ sparse \
+ spice-server-devel \
+ systemd-devel \
+ systemtap-sdt-devel \
+ tar \
+ texinfo \
+ usbredir-devel \
+ virglrenderer-devel \
+ vte291-devel \
+ wget \
+ which \
+ xen-devel \
+ xfsprogs-devel \
+ zlib-devel
+ENV QEMU_CONFIGURE_OPTS --python=/usr/bin/python3
+
+RUN dnf install -y $PACKAGES
+RUN rpm -q $PACKAGES | sort > /packages.txt
+ENV PATH $PATH:/usr/libexec/python3-sphinx/
+ENV COVERITY_TOOL_BASE=/coverity-tools
+COPY run-coverity-scan run-coverity-scan
+RUN --mount=type=secret,id=coverity.token,required ./run-coverity-scan --update-tools-only --tokenfile /run/secrets/coverity.token
diff --git a/scripts/coverity-scan/run-coverity-scan b/scripts/coverity-scan/run-coverity-scan
new file mode 100755
index 0000000000..2e067ef5cf
--- /dev/null
+++ b/scripts/coverity-scan/run-coverity-scan
@@ -0,0 +1,401 @@
+#!/bin/sh -e
+
+# Upload a created tarball to Coverity Scan, as per
+# https://scan.coverity.com/projects/qemu/builds/new
+
+# This work is licensed under the terms of the GNU GPL version 2,
+# or (at your option) any later version.
+# See the COPYING file in the top-level directory.
+#
+# Copyright (c) 2017-2020 Linaro Limited
+# Written by Peter Maydell
+
+# Note that this script will automatically download and
+# run the (closed-source) coverity build tools, so don't
+# use it if you don't trust them!
+
+# This script assumes that you're running it from a QEMU source
+# tree, and that tree is a fresh clean one, because we do an in-tree
+# build. (This is necessary so that the filenames that the Coverity
+# Scan server sees are relative paths that match up with the component
+# regular expressions it uses; an out-of-tree build won't work for this.)
+# The host machine should have as many of QEMU's dependencies
+# installed as possible, for maximum coverity coverage.
+
+# To do an upload you need to be a maintainer in the Coverity online
+# service, and you will need to know the "Coverity token", which is a
+# secret 8 digit hex string. You can find that from the web UI in the
+# project settings, if you have maintainer access there.
+
+# Command line options:
+# --dry-run : run the tools, but don't actually do the upload
+# --docker : create and work inside a docker container
+# --update-tools-only : update the cached copy of the tools, but don't run them
+# --tokenfile : file to read Coverity token from
+# --version ver : specify version being analyzed (default: ask git)
+# --description desc : specify description of this version (default: ask git)
+# --srcdir : QEMU source tree to analyze (default: current working dir)
+# --results-tarball : path to copy the results tarball to (default: don't
+# copy it anywhere, just upload it)
+# --src-tarball : tarball to untar into src dir (default: none); this
+# is intended mainly for internal use by the Docker support
+#
+# User-specifiable environment variables:
+# COVERITY_TOKEN -- Coverity token
+# COVERITY_EMAIL -- the email address to use for uploads (default:
+# looks at your git user.email config)
+# COVERITY_BUILD_CMD -- make command (default: 'make -jN' where N is
+# number of CPUs as determined by 'nproc')
+# COVERITY_TOOL_BASE -- set to directory to put coverity tools
+# (default: /tmp/coverity-tools)
+#
+# You must specify the token, either by environment variable or by
+# putting it in a file and using --tokenfile. Everything else has
+# a reasonable default if this is run from a git tree.
+
+check_upload_permissions() {
+ # Check whether we can do an upload to the server; will exit the script
+ # with status 1 if the check failed (usually a bad token);
+ # will exit the script with status 0 if the check indicated that we
+ # can't upload yet (ie we are at quota)
+ # Assumes that PROJTOKEN, PROJNAME and DRYRUN have been initialized.
+
+ echo "Checking upload permissions..."
+
+ if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$PROJTOKEN&project=$PROJNAME" -q -O -)"; then
+ echo "Coverity Scan API access denied: bad token?"
+ exit 1
+ fi
+
+ # Really up_perm is a JSON response with either
+ # {upload_permitted:true} or {next_upload_permitted_at:<date>}
+ # We do some hacky string parsing instead of properly parsing it.
+ case "$up_perm" in
+ *upload_permitted*true*)
+ echo "Coverity Scan: upload permitted"
+ ;;
+ *next_upload_permitted_at*)
+ if [ "$DRYRUN" = yes ]; then
+ echo "Coverity Scan: upload quota reached, continuing dry run"
+ else
+ echo "Coverity Scan: upload quota reached; stopping here"
+ # Exit success as this isn't a build error.
+ exit 0
+ fi
+ ;;
+ *)
+ echo "Coverity Scan upload check: unexpected result $up_perm"
+ exit 1
+ ;;
+ esac
+}
+
+
+update_coverity_tools () {
+ # Check for whether we need to download the Coverity tools
+ # (either because we don't have a copy, or because it's out of date)
+ # Assumes that COVERITY_TOOL_BASE, PROJTOKEN and PROJNAME are set.
+
+ mkdir -p "$COVERITY_TOOL_BASE"
+ cd "$COVERITY_TOOL_BASE"
+
+ echo "Checking for new version of coverity build tools..."
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
+
+ if ! cmp -s coverity_tool.md5 coverity_tool.md5.new; then
+ # out of date md5 or no md5: download new build tool
+ # blow away the old build tool
+ echo "Downloading coverity build tools..."
+ rm -rf coverity_tool coverity_tool.tgz
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME" -O coverity_tool.tgz
+ if ! (cat coverity_tool.md5.new; echo " coverity_tool.tgz") | md5sum -c --status; then
+ echo "Downloaded tarball didn't match md5sum!"
+ exit 1
+ fi
+ # extract the new one, keeping it corralled in a 'coverity_tool' directory
+ echo "Unpacking coverity build tools..."
+ mkdir -p coverity_tool
+ cd coverity_tool
+ tar xf ../coverity_tool.tgz
+ cd ..
+ mv coverity_tool.md5.new coverity_tool.md5
+ fi
+
+ rm -f coverity_tool.md5.new
+}
+
+
+# Check user-provided environment variables and arguments
+DRYRUN=no
+UPDATE_ONLY=no
+DOCKER=no
+
+while [ "$#" -ge 1 ]; do
+ case "$1" in
+ --dry-run)
+ shift
+ DRYRUN=yes
+ ;;
+ --update-tools-only)
+ shift
+ UPDATE_ONLY=yes
+ ;;
+ --version)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--version needs an argument"
+ exit 1
+ fi
+ VERSION="$1"
+ shift
+ ;;
+ --description)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--description needs an argument"
+ exit 1
+ fi
+ DESCRIPTION="$1"
+ shift
+ ;;
+ --tokenfile)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--tokenfile needs an argument"
+ exit 1
+ fi
+ COVERITY_TOKEN="$(cat "$1")"
+ shift
+ ;;
+ --srcdir)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--srcdir needs an argument"
+ exit 1
+ fi
+ SRCDIR="$1"
+ shift
+ ;;
+ --results-tarball)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--results-tarball needs an argument"
+ exit 1
+ fi
+ RESULTSTARBALL="$1"
+ shift
+ ;;
+ --src-tarball)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--src-tarball needs an argument"
+ exit 1
+ fi
+ SRCTARBALL="$1"
+ shift
+ ;;
+ --docker)
+ DOCKER=yes
+ shift
+ ;;
+ *)
+ echo "Unexpected argument '$1'"
+ exit 1
+ ;;
+ esac
+done
+
+if [ -z "$COVERITY_TOKEN" ]; then
+ echo "COVERITY_TOKEN environment variable not set"
+ exit 1
+fi
+
+if [ -z "$COVERITY_BUILD_CMD" ]; then
+ NPROC=$(nproc)
+ COVERITY_BUILD_CMD="make -j$NPROC"
+ echo "COVERITY_BUILD_CMD: using default '$COVERITY_BUILD_CMD'"
+fi
+
+if [ -z "$COVERITY_TOOL_BASE" ]; then
+ echo "COVERITY_TOOL_BASE: using default /tmp/coverity-tools"
+ COVERITY_TOOL_BASE=/tmp/coverity-tools
+fi
+
+if [ -z "$SRCDIR" ]; then
+ SRCDIR="$PWD"
+fi
+
+PROJTOKEN="$COVERITY_TOKEN"
+PROJNAME=QEMU
+TARBALL=cov-int.tar.xz
+
+if [ "$UPDATE_ONLY" = yes ] && [ "$DOCKER" = yes ]; then
+ echo "Combining --docker and --update-only is not supported"
+ exit 1
+fi
+
+if [ "$UPDATE_ONLY" = yes ]; then
+ # Just do the tools update; we don't need to check whether
+ # we are in a source tree or have upload rights for this,
+ # so do it before some of the command line and source tree checks.
+ update_coverity_tools
+ exit 0
+fi
+
+if [ ! -e "$SRCDIR" ]; then
+ mkdir "$SRCDIR"
+fi
+
+cd "$SRCDIR"
+
+if [ ! -z "$SRCTARBALL" ]; then
+ echo "Untarring source tarball into $SRCDIR..."
+ tar xvf "$SRCTARBALL"
+fi
+
+echo "Checking this is a QEMU source tree..."
+if ! [ -e "$SRCDIR/VERSION" ]; then
+ echo "Not in a QEMU source tree?"
+ exit 1
+fi
+
+# Fill in defaults used by the non-update-only process
+if [ -z "$VERSION" ]; then
+ VERSION="$(git describe --always HEAD)"
+fi
+
+if [ -z "$DESCRIPTION" ]; then
+ DESCRIPTION="$(git rev-parse HEAD)"
+fi
+
+if [ -z "$COVERITY_EMAIL" ]; then
+ COVERITY_EMAIL="$(git config user.email)"
+fi
+
+# Run ourselves inside docker if that's what the user wants
+if [ "$DOCKER" = yes ]; then
+ # build docker container including the coverity-scan tools
+ # Put the Coverity token into a temporary file that only
+ # we have read access to, and then pass it to docker build
+ # using --secret. This requires at least Docker 18.09.
+ # Mostly what we are trying to do here is ensure we don't leak
+ # the token into the Docker image.
+ umask 077
+ SECRETDIR=$(mktemp -d)
+ if [ -z "$SECRETDIR" ]; then
+ echo "Failed to create temporary directory"
+ exit 1
+ fi
+ trap 'rm -rf "$SECRETDIR"' INT TERM EXIT
+ echo "Created temporary directory $SECRETDIR"
+ SECRET="$SECRETDIR/token"
+ echo "$COVERITY_TOKEN" > "$SECRET"
+ echo "Building docker container..."
+ # TODO: This re-downloads the tools every time, rather than
+ # caching and reusing the image produced with the downloaded tools.
+ # Not sure why.
+ # TODO: how do you get 'docker build' to print the output of the
+ # commands it is running to its stdout? This would be useful for debug.
+ DOCKER_BUILDKIT=1 docker build -t coverity-scanner \
+ --secret id=coverity.token,src="$SECRET" \
+ -f scripts/coverity-scan/coverity-scan.docker \
+ scripts/coverity-scan
+ echo "Archiving sources to be analyzed..."
+ ./scripts/archive-source.sh "$SECRETDIR/qemu-sources.tgz"
+ if [ "$DRYRUN" = yes ]; then
+ DRYRUNARG=--dry-run
+ fi
+ echo "Running scanner..."
+ # If we need to capture the output tarball, get the inner run to
+ # save it to the secrets directory so we can copy it out before the
+ # directory is cleaned up.
+ if [ ! -z "$RESULTSTARBALL" ]; then
+ RTARGS="--results-tarball /work/cov-int.tar.xz"
+ else
+ RTARGS=""
+ fi
+ # Arrange for this docker run to get access to the sources with -v.
+ # We pass through all the configuration from the outer script to the inner.
+ export COVERITY_EMAIL COVERITY_BUILD_CMD
+ docker run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
+ -v "$SECRETDIR:/work" coverity-scanner \
+ ./run-coverity-scan --version "$VERSION" \
+ --description "$DESCRIPTION" $DRYRUNARG --tokenfile /work/token \
+ --srcdir /qemu --src-tarball /work/qemu-sources.tgz $RTARGS
+ if [ ! -z "$RESULTSTARBALL" ]; then
+ echo "Copying results tarball to $RESULTSTARBALL..."
+ cp "$SECRETDIR/cov-int.tar.xz" "$RESULTSTARBALL"
+ fi
+ echo "Docker work complete."
+ exit 0
+fi
+
+# Otherwise, continue with the full build and upload process.
+
+check_upload_permissions
+
+update_coverity_tools
+
+TOOLBIN="$(cd "$COVERITY_TOOL_BASE" && echo $PWD/coverity_tool/cov-analysis-*/bin)"
+
+if ! test -x "$TOOLBIN/cov-build"; then
+ echo "Couldn't find cov-build in the coverity build-tool directory??"
+ exit 1
+fi
+
+export PATH="$TOOLBIN:$PATH"
+
+cd "$SRCDIR"
+
+echo "Doing make distclean..."
+make distclean
+
+echo "Configuring..."
+# We configure with a fixed set of enables here to ensure that we don't
+# accidentally reduce the scope of the analysis by doing the build on
+# the system that's missing a dependency that we need to build part of
+# the codebase.
+./configure --disable-modules --enable-sdl --enable-gtk \
+ --enable-opengl --enable-vte --enable-gnutls \
+ --enable-nettle --enable-curses --enable-curl \
+ --audio-drv-list=oss,alsa,sdl,pa --enable-virtfs \
+ --enable-vnc --enable-vnc-sasl --enable-vnc-jpeg --enable-vnc-png \
+ --enable-xen --enable-brlapi \
+ --enable-linux-aio --enable-attr \
+ --enable-cap-ng --enable-trace-backends=log --enable-spice --enable-rbd \
+ --enable-xfsctl --enable-libusb --enable-usb-redir \
+ --enable-libiscsi --enable-libnfs --enable-seccomp \
+ --enable-tpm --enable-libssh --enable-lzo --enable-snappy --enable-bzip2 \
+ --enable-numa --enable-rdma --enable-smartcard --enable-virglrenderer \
+ --enable-mpath --enable-libxml2 --enable-glusterfs \
+ --enable-virtfs --enable-zstd
+
+echo "Making libqemustub.a..."
+make libqemustub.a
+
+echo "Running cov-build..."
+rm -rf cov-int
+mkdir cov-int
+cov-build --dir cov-int $COVERITY_BUILD_CMD
+
+echo "Creating results tarball..."
+tar cvf - cov-int | xz > "$TARBALL"
+
+if [ ! -z "$RESULTSTARBALL" ]; then
+ echo "Copying results tarball to $RESULTSTARBALL..."
+ cp "$TARBALL" "$RESULTSTARBALL"
+fi
+
+echo "Uploading results tarball..."
+
+if [ "$DRYRUN" = yes ]; then
+ echo "Dry run only, not uploading $TARBALL"
+ exit 0
+fi
+
+curl --form token="$PROJTOKEN" --form email="$COVERITY_EMAIL" \
+ --form file=@"$TARBALL" --form version="$VERSION" \
+ --form description="$DESCRIPTION" \
+ https://scan.coverity.com/builds?project="$PROJNAME"
+
+echo "Done."
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index af470eb321..030b5c8691 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -71,6 +71,8 @@ Output selection (mutually exclusive):
DOC: sections. May be specified multiple times.
Output selection modifiers:
+ -sphinx-version VER Generate rST syntax for the specified Sphinx version.
+ Only works with reStructuredTextFormat.
-no-doc-sections Do not output DOC: sections.
-enable-lineno Enable output of #define LINENO lines. Only works with
reStructuredText format.
@@ -286,6 +288,7 @@ use constant {
};
my $output_selection = OUTPUT_ALL;
my $show_not_found = 0; # No longer used
+my $sphinx_version = "0.0"; # if not specified, assume old
my @export_file_list;
@@ -436,6 +439,8 @@ while ($ARGV[0] =~ m/^--?(.*)/) {
$enable_lineno = 1;
} elsif ($cmd eq 'show-not-found') {
$show_not_found = 1; # A no-op but don't fail
+ } elsif ($cmd eq 'sphinx-version') {
+ $sphinx_version = shift @ARGV;
} else {
# Unknown argument
usage();
@@ -853,7 +858,7 @@ sub output_function_rst(%) {
if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) {
# pointer-to-function
- print $1 . $parameter . ") (" . $2;
+ print $1 . $parameter . ") (" . $2 . ")";
} else {
print $type . " " . $parameter;
}
@@ -963,7 +968,16 @@ sub output_struct_rst(%) {
my $oldprefix = $lineprefix;
my $name = $args{'type'} . " " . $args{'struct'};
- print "\n\n.. c:type:: " . $name . "\n\n";
+ # Sphinx 3.0 and up will emit warnings for "c:type:: struct Foo".
+ # It wants to see "c:struct:: Foo" (and will add the word 'struct' in
+ # the rendered output).
+ if ((split(/\./, $sphinx_version))[0] >= 3) {
+ my $sname = $name;
+ $sname =~ s/^struct //;
+ print "\n\n.. c:struct:: " . $sname . "\n\n";
+ } else {
+ print "\n\n.. c:type:: " . $name . "\n\n";
+ }
print_lineno($declaration_start_line);
$lineprefix = " ";
output_highlight_rst($args{'purpose'});