aboutsummaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-06-12 23:06:22 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-06-12 23:06:22 +0100
commit7d3660e79830a069f1848bb4fa1cdf8f666424fb (patch)
tree5651ddf02414086c31d0bfca7713e1800d4f0fc8 /scripts
parent9e3903136d9acde2fb2dd9e967ba928050a6cb4a (diff)
parent3575b0aea983ad57804c9af739ed8ff7bc168393 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* Miscellaneous fixes and feature enablement (many) * SEV refactoring (David) * Hyper-V initial support (Jon) * i386 TCG fixes (x87 and SSE, Joseph) * vmport cleanup and improvements (Philippe, Liran) * Use-after-free with vCPU hot-unplug (Nengyuan) * run-coverity-scan improvements (myself) * Record/replay fixes (Pavel) * -machine kernel_irqchip=split improvements for INTx (Peter) * Code cleanups (Philippe) * Crash and security fixes (PJP) * HVF cleanups (Roman) # gpg: Signature made Fri 12 Jun 2020 16:57:04 BST # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (116 commits) target/i386: Remove obsolete TODO file stubs: move Xen stubs to accel/ replay: fix replay shutdown for console mode exec/cpu-common: Move MUSB specific typedefs to 'hw/usb/hcd-musb.h' hw/usb: Move device-specific declarations to new 'hcd-musb.h' header exec/memory: Remove unused MemoryRegionMmio type checkpatch: reversed logic with acpi test checks target/i386: sev: Unify SEVState and SevGuestState target/i386: sev: Remove redundant handle field target/i386: sev: Remove redundant policy field target/i386: sev: Remove redundant cbitpos and reduced_phys_bits fields target/i386: sev: Partial cleanup to sev_state global target/i386: sev: Embed SEVState in SevGuestState target/i386: sev: Rename QSevGuestInfo target/i386: sev: Move local structure definitions into .c file target/i386: sev: Remove unused QSevGuestInfoClass xen: fix build without pci passthrough i386: hvf: Drop HVFX86EmulatorState i386: hvf: Move mmio_buf into CPUX86State i386: hvf: Move lazy_flags into CPUX86State ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org> # Conflicts: # hw/i386/acpi-build.c
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/checkpatch.pl2
-rw-r--r--scripts/coverity-scan/coverity-scan.docker3
-rwxr-xr-xscripts/coverity-scan/run-coverity-scan139
3 files changed, 89 insertions, 55 deletions
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ba213e9f2..2d2e922d89 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1267,7 +1267,7 @@ sub checkfilename {
# files and when changing tests.
if ($name =~ m#^tests/data/acpi/# and not $name =~ m#^\.sh$#) {
$$acpi_testexpected = $name;
- } elsif ($name =~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
+ } elsif ($name !~ m#^tests/qtest/bios-tables-test-allowed-diff.h$#) {
$$acpi_nontestexpected = $name;
}
if (defined $$acpi_testexpected and defined $$acpi_nontestexpected) {
diff --git a/scripts/coverity-scan/coverity-scan.docker b/scripts/coverity-scan/coverity-scan.docker
index ad4d64c0f8..018c03de6d 100644
--- a/scripts/coverity-scan/coverity-scan.docker
+++ b/scripts/coverity-scan/coverity-scan.docker
@@ -125,5 +125,6 @@ RUN dnf install -y $PACKAGES
RUN rpm -q $PACKAGES | sort > /packages.txt
ENV PATH $PATH:/usr/libexec/python3-sphinx/
ENV COVERITY_TOOL_BASE=/coverity-tools
+COPY coverity_tool.tgz coverity_tool.tgz
+RUN mkdir -p /coverity-tools/coverity_tool && cd /coverity-tools/coverity_tool && tar xf /coverity_tool.tgz
COPY run-coverity-scan run-coverity-scan
-RUN --mount=type=secret,id=coverity.token,required ./run-coverity-scan --update-tools-only --tokenfile /run/secrets/coverity.token
diff --git a/scripts/coverity-scan/run-coverity-scan b/scripts/coverity-scan/run-coverity-scan
index 2e067ef5cf..03a791dec9 100755
--- a/scripts/coverity-scan/run-coverity-scan
+++ b/scripts/coverity-scan/run-coverity-scan
@@ -29,8 +29,11 @@
# Command line options:
# --dry-run : run the tools, but don't actually do the upload
-# --docker : create and work inside a docker container
+# --docker : create and work inside a container
+# --docker-engine : specify the container engine to use (docker/podman/auto);
+# implies --docker
# --update-tools-only : update the cached copy of the tools, but don't run them
+# --no-update-tools : do not update the cached copy of the tools
# --tokenfile : file to read Coverity token from
# --version ver : specify version being analyzed (default: ask git)
# --description desc : specify description of this version (default: ask git)
@@ -41,9 +44,10 @@
# is intended mainly for internal use by the Docker support
#
# User-specifiable environment variables:
-# COVERITY_TOKEN -- Coverity token
+# COVERITY_TOKEN -- Coverity token (default: looks at your
+# coverity.token config)
# COVERITY_EMAIL -- the email address to use for uploads (default:
-# looks at your git user.email config)
+# looks at your git coverity.email or user.email config)
# COVERITY_BUILD_CMD -- make command (default: 'make -jN' where N is
# number of CPUs as determined by 'nproc')
# COVERITY_TOOL_BASE -- set to directory to put coverity tools
@@ -58,11 +62,11 @@ check_upload_permissions() {
# with status 1 if the check failed (usually a bad token);
# will exit the script with status 0 if the check indicated that we
# can't upload yet (ie we are at quota)
- # Assumes that PROJTOKEN, PROJNAME and DRYRUN have been initialized.
+ # Assumes that COVERITY_TOKEN, PROJNAME and DRYRUN have been initialized.
echo "Checking upload permissions..."
- if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$PROJTOKEN&project=$PROJNAME" -q -O -)"; then
+ if ! up_perm="$(wget https://scan.coverity.com/api/upload_permitted --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -q -O -)"; then
echo "Coverity Scan API access denied: bad token?"
exit 1
fi
@@ -91,43 +95,62 @@ check_upload_permissions() {
}
+build_docker_image() {
+ # build docker container including the coverity-scan tools
+ echo "Building docker container..."
+ # TODO: This re-unpacks the tools every time, rather than caching
+ # and reusing the image produced by the COPY of the .tgz file.
+ # Not sure why.
+ tests/docker/docker.py --engine ${DOCKER_ENGINE} build \
+ -t coverity-scanner -f scripts/coverity-scan/coverity-scan.docker \
+ --extra-files scripts/coverity-scan/run-coverity-scan \
+ "$COVERITY_TOOL_BASE"/coverity_tool.tgz
+}
+
update_coverity_tools () {
# Check for whether we need to download the Coverity tools
# (either because we don't have a copy, or because it's out of date)
- # Assumes that COVERITY_TOOL_BASE, PROJTOKEN and PROJNAME are set.
+ # Assumes that COVERITY_TOOL_BASE, COVERITY_TOKEN and PROJNAME are set.
mkdir -p "$COVERITY_TOOL_BASE"
cd "$COVERITY_TOOL_BASE"
echo "Checking for new version of coverity build tools..."
- wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME&md5=1" -O coverity_tool.md5.new
if ! cmp -s coverity_tool.md5 coverity_tool.md5.new; then
# out of date md5 or no md5: download new build tool
# blow away the old build tool
echo "Downloading coverity build tools..."
rm -rf coverity_tool coverity_tool.tgz
- wget https://scan.coverity.com/download/linux64 --post-data "token=$PROJTOKEN&project=$PROJNAME" -O coverity_tool.tgz
+ wget https://scan.coverity.com/download/linux64 --post-data "token=$COVERITY_TOKEN&project=$PROJNAME" -O coverity_tool.tgz
if ! (cat coverity_tool.md5.new; echo " coverity_tool.tgz") | md5sum -c --status; then
echo "Downloaded tarball didn't match md5sum!"
exit 1
fi
- # extract the new one, keeping it corralled in a 'coverity_tool' directory
- echo "Unpacking coverity build tools..."
- mkdir -p coverity_tool
- cd coverity_tool
- tar xf ../coverity_tool.tgz
- cd ..
- mv coverity_tool.md5.new coverity_tool.md5
- fi
+ if [ "$DOCKER" != yes ]; then
+ # extract the new one, keeping it corralled in a 'coverity_tool' directory
+ echo "Unpacking coverity build tools..."
+ mkdir -p coverity_tool
+ cd coverity_tool
+ tar xf ../coverity_tool.tgz
+ cd ..
+ mv coverity_tool.md5.new coverity_tool.md5
+ fi
+ fi
rm -f coverity_tool.md5.new
+ cd "$SRCDIR"
+
+ if [ "$DOCKER" = yes ]; then
+ build_docker_image
+ fi
}
# Check user-provided environment variables and arguments
DRYRUN=no
-UPDATE_ONLY=no
+UPDATE=yes
DOCKER=no
while [ "$#" -ge 1 ]; do
@@ -136,9 +159,13 @@ while [ "$#" -ge 1 ]; do
shift
DRYRUN=yes
;;
+ --no-update-tools)
+ shift
+ UPDATE=no
+ ;;
--update-tools-only)
shift
- UPDATE_ONLY=yes
+ UPDATE=only
;;
--version)
shift
@@ -196,6 +223,17 @@ while [ "$#" -ge 1 ]; do
;;
--docker)
DOCKER=yes
+ DOCKER_ENGINE=auto
+ shift
+ ;;
+ --docker-engine)
+ shift
+ if [ $# -eq 0 ]; then
+ echo "--docker-engine needs an argument"
+ exit 1
+ fi
+ DOCKER=yes
+ DOCKER_ENGINE="$1"
shift
;;
*)
@@ -206,6 +244,9 @@ while [ "$#" -ge 1 ]; do
done
if [ -z "$COVERITY_TOKEN" ]; then
+ COVERITY_TOKEN="$(git config coverity.token)"
+fi
+if [ -z "$COVERITY_TOKEN" ]; then
echo "COVERITY_TOKEN environment variable not set"
exit 1
fi
@@ -225,19 +266,19 @@ if [ -z "$SRCDIR" ]; then
SRCDIR="$PWD"
fi
-PROJTOKEN="$COVERITY_TOKEN"
PROJNAME=QEMU
TARBALL=cov-int.tar.xz
-if [ "$UPDATE_ONLY" = yes ] && [ "$DOCKER" = yes ]; then
- echo "Combining --docker and --update-only is not supported"
- exit 1
-fi
-
-if [ "$UPDATE_ONLY" = yes ]; then
+if [ "$UPDATE" = only ]; then
# Just do the tools update; we don't need to check whether
# we are in a source tree or have upload rights for this,
# so do it before some of the command line and source tree checks.
+
+ if [ "$DOCKER" = yes ] && [ ! -z "$SRCTARBALL" ]; then
+ echo --update-tools-only --docker is incompatible with --src-tarball.
+ exit 1
+ fi
+
update_coverity_tools
exit 0
fi
@@ -269,17 +310,26 @@ if [ -z "$DESCRIPTION" ]; then
fi
if [ -z "$COVERITY_EMAIL" ]; then
+ COVERITY_EMAIL="$(git config coverity.email)"
+fi
+if [ -z "$COVERITY_EMAIL" ]; then
COVERITY_EMAIL="$(git config user.email)"
fi
+# Otherwise, continue with the full build and upload process.
+
+check_upload_permissions
+
+if [ "$UPDATE" != no ]; then
+ update_coverity_tools
+fi
+
# Run ourselves inside docker if that's what the user wants
if [ "$DOCKER" = yes ]; then
- # build docker container including the coverity-scan tools
# Put the Coverity token into a temporary file that only
# we have read access to, and then pass it to docker build
- # using --secret. This requires at least Docker 18.09.
- # Mostly what we are trying to do here is ensure we don't leak
- # the token into the Docker image.
+ # using a volume. A volume is enough for the token not to
+ # leak into the Docker image.
umask 077
SECRETDIR=$(mktemp -d)
if [ -z "$SECRETDIR" ]; then
@@ -290,38 +340,27 @@ if [ "$DOCKER" = yes ]; then
echo "Created temporary directory $SECRETDIR"
SECRET="$SECRETDIR/token"
echo "$COVERITY_TOKEN" > "$SECRET"
- echo "Building docker container..."
- # TODO: This re-downloads the tools every time, rather than
- # caching and reusing the image produced with the downloaded tools.
- # Not sure why.
- # TODO: how do you get 'docker build' to print the output of the
- # commands it is running to its stdout? This would be useful for debug.
- DOCKER_BUILDKIT=1 docker build -t coverity-scanner \
- --secret id=coverity.token,src="$SECRET" \
- -f scripts/coverity-scan/coverity-scan.docker \
- scripts/coverity-scan
echo "Archiving sources to be analyzed..."
./scripts/archive-source.sh "$SECRETDIR/qemu-sources.tgz"
+ ARGS="--no-update-tools"
if [ "$DRYRUN" = yes ]; then
- DRYRUNARG=--dry-run
+ ARGS="$ARGS --dry-run"
fi
echo "Running scanner..."
# If we need to capture the output tarball, get the inner run to
# save it to the secrets directory so we can copy it out before the
# directory is cleaned up.
if [ ! -z "$RESULTSTARBALL" ]; then
- RTARGS="--results-tarball /work/cov-int.tar.xz"
- else
- RTARGS=""
+ ARGS="$ARGS --results-tarball /work/cov-int.tar.xz"
fi
# Arrange for this docker run to get access to the sources with -v.
# We pass through all the configuration from the outer script to the inner.
export COVERITY_EMAIL COVERITY_BUILD_CMD
- docker run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
+ tests/docker/docker.py run -it --env COVERITY_EMAIL --env COVERITY_BUILD_CMD \
-v "$SECRETDIR:/work" coverity-scanner \
./run-coverity-scan --version "$VERSION" \
- --description "$DESCRIPTION" $DRYRUNARG --tokenfile /work/token \
- --srcdir /qemu --src-tarball /work/qemu-sources.tgz $RTARGS
+ --description "$DESCRIPTION" $ARGS --tokenfile /work/token \
+ --srcdir /qemu --src-tarball /work/qemu-sources.tgz
if [ ! -z "$RESULTSTARBALL" ]; then
echo "Copying results tarball to $RESULTSTARBALL..."
cp "$SECRETDIR/cov-int.tar.xz" "$RESULTSTARBALL"
@@ -330,12 +369,6 @@ if [ "$DOCKER" = yes ]; then
exit 0
fi
-# Otherwise, continue with the full build and upload process.
-
-check_upload_permissions
-
-update_coverity_tools
-
TOOLBIN="$(cd "$COVERITY_TOOL_BASE" && echo $PWD/coverity_tool/cov-analysis-*/bin)"
if ! test -x "$TOOLBIN/cov-build"; then
@@ -393,7 +426,7 @@ if [ "$DRYRUN" = yes ]; then
exit 0
fi
-curl --form token="$PROJTOKEN" --form email="$COVERITY_EMAIL" \
+curl --form token="$COVERITY_TOKEN" --form email="$COVERITY_EMAIL" \
--form file=@"$TARBALL" --form version="$VERSION" \
--form description="$DESCRIPTION" \
https://scan.coverity.com/builds?project="$PROJNAME"