aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docs/system/arm/emulation.rst2
-rw-r--r--hmp-commands.hx6
-rw-r--r--hw/adc/zynq-xadc.c4
-rw-r--r--hw/arm/boot.c2
-rw-r--r--hw/arm/musicpal.c2
-rw-r--r--hw/arm/virt.c4
-rw-r--r--hw/core/machine.c4
-rw-r--r--hw/display/artist.c168
-rw-r--r--hw/dma/xilinx_axidma.c2
-rw-r--r--hw/dma/xlnx_csu_dma.c2
-rw-r--r--hw/intc/arm_gicv3_common.c5
-rw-r--r--hw/intc/arm_gicv3_cpuif.c225
-rw-r--r--hw/intc/arm_gicv3_kvm.c16
-rw-r--r--hw/m68k/mcf5206.c2
-rw-r--r--hw/m68k/mcf5208.c2
-rw-r--r--hw/net/can/xlnx-zynqmp-can.c2
-rw-r--r--hw/net/fsl_etsec/etsec.c2
-rw-r--r--hw/net/lan9118.c2
-rw-r--r--hw/net/tulip.c4
-rw-r--r--hw/rtc/exynos4210_rtc.c4
-rw-r--r--hw/timer/allwinner-a10-pit.c2
-rw-r--r--hw/timer/altera_timer.c2
-rw-r--r--hw/timer/arm_timer.c2
-rw-r--r--hw/timer/digic-timer.c2
-rw-r--r--hw/timer/etraxfs_timer.c6
-rw-r--r--hw/timer/exynos4210_mct.c6
-rw-r--r--hw/timer/exynos4210_pwm.c2
-rw-r--r--hw/timer/grlib_gptimer.c2
-rw-r--r--hw/timer/imx_epit.c4
-rw-r--r--hw/timer/imx_gpt.c2
-rw-r--r--hw/timer/mss-timer.c2
-rw-r--r--hw/timer/sh_timer.c2
-rw-r--r--hw/timer/slavio_timer.c2
-rw-r--r--hw/timer/xilinx_timer.c2
-rw-r--r--include/hw/adc/zynq-xadc.h3
-rw-r--r--include/hw/intc/arm_gicv3_common.h8
-rw-r--r--include/hw/ptimer.h16
-rw-r--r--meson.build16
-rw-r--r--meson_options.txt2
-rw-r--r--net/clients.h11
-rw-r--r--net/meson.build7
-rw-r--r--net/net.c10
-rw-r--r--net/vmnet-bridged.m152
-rw-r--r--net/vmnet-common.m378
-rw-r--r--net/vmnet-host.c128
-rw-r--r--net/vmnet-shared.c114
-rw-r--r--net/vmnet_int.h63
-rw-r--r--pc-bios/hppa-firmware.imgbin701964 -> 719040 bytes
-rw-r--r--qapi/net.json133
-rw-r--r--qemu-options.hx25
m---------roms/seabios-hppa0
-rw-r--r--scripts/meson-buildoptions.sh1
-rw-r--r--target/arm/cpregs.h24
-rw-r--r--target/arm/cpu.c11
-rw-r--r--target/arm/cpu.h76
-rw-r--r--target/arm/cpu64.c30
-rw-r--r--target/arm/cpu_tcg.c6
-rw-r--r--target/arm/helper.c348
-rw-r--r--target/arm/internals.h11
-rw-r--r--target/arm/kvm64.c12
-rw-r--r--target/arm/op_helper.c9
-rw-r--r--target/arm/translate-a64.c36
-rw-r--r--target/arm/translate-a64.h9
-rw-r--r--tests/unit/ptimer-test.c6
64 files changed, 1866 insertions, 277 deletions
diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst
index 8ed466bf68..3e95bba0d2 100644
--- a/docs/system/arm/emulation.rst
+++ b/docs/system/arm/emulation.rst
@@ -31,6 +31,7 @@ the following architecture extensions:
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
- FEAT_HPDS (Hierarchical permission disables)
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
+- FEAT_IDST (ID space trap handling)
- FEAT_IESB (Implicit error synchronization event)
- FEAT_JSCVT (JavaScript conversion instructions)
- FEAT_LOR (Limited ordering regions)
@@ -52,6 +53,7 @@ the following architecture extensions:
- FEAT_RAS (Reliability, availability, and serviceability)
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
- FEAT_RNG (Random number generator)
+- FEAT_S2FWB (Stage 2 forced Write-Back)
- FEAT_SB (Speculation Barrier)
- FEAT_SEL2 (Secure EL2)
- FEAT_SHA1 (SHA1 instructions)
diff --git a/hmp-commands.hx b/hmp-commands.hx
index 03e6a73d1f..564f1de364 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -1269,7 +1269,11 @@ ERST
{
.name = "netdev_add",
.args_type = "netdev:O",
- .params = "[user|tap|socket|vde|bridge|hubport|netmap|vhost-user],id=str[,prop=value][,...]",
+ .params = "[user|tap|socket|vde|bridge|hubport|netmap|vhost-user"
+#ifdef CONFIG_VMNET
+ "|vmnet-host|vmnet-shared|vmnet-bridged"
+#endif
+ "],id=str[,prop=value][,...]",
.help = "add host network device",
.cmd = hmp_netdev_add,
.command_completion = netdev_add_completion,
diff --git a/hw/adc/zynq-xadc.c b/hw/adc/zynq-xadc.c
index cfc7bab065..032e19cbd0 100644
--- a/hw/adc/zynq-xadc.c
+++ b/hw/adc/zynq-xadc.c
@@ -86,7 +86,7 @@ static void zynq_xadc_update_ints(ZynqXADCState *s)
s->regs[INT_STS] |= INT_DFIFO_GTH;
}
- qemu_set_irq(s->qemu_irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK]));
+ qemu_set_irq(s->irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK]));
}
static void zynq_xadc_reset(DeviceState *d)
@@ -262,7 +262,7 @@ static void zynq_xadc_init(Object *obj)
memory_region_init_io(&s->iomem, obj, &xadc_ops, s, "zynq-xadc",
ZYNQ_XADC_MMIO_SIZE);
sysbus_init_mmio(sbd, &s->iomem);
- sysbus_init_irq(sbd, &s->qemu_irq);
+ sysbus_init_irq(sbd, &s->irq);
}
static const VMStateDescription vmstate_zynq_xadc = {
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index a47f38dfc9..a8de33fd64 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -761,7 +761,7 @@ static void do_cpu_reset(void *opaque)
env->cp15.scr_el3 |= SCR_ATA;
}
if (cpu_isar_feature(aa64_sve, cpu)) {
- env->cp15.cptr_el[3] |= CPTR_EZ;
+ env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
}
/* AArch64 kernels never boot in secure mode */
assert(!info->secure_boot);
diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c
index 7c840fb428..b65c020115 100644
--- a/hw/arm/musicpal.c
+++ b/hw/arm/musicpal.c
@@ -464,7 +464,7 @@ static void mv88w8618_timer_init(SysBusDevice *dev, mv88w8618_timer_state *s,
sysbus_init_irq(dev, &s->irq);
s->freq = freq;
- s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_DEFAULT);
+ s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_LEGACY);
}
static uint64_t mv88w8618_pit_read(void *opaque, hwaddr offset,
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 1a45f44435..097238faa7 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -925,8 +925,6 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev,
qemu_fdt_add_subnode(fdt, "/gpio-keys");
qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys");
- qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#size-cells", 0);
- qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#address-cells", 1);
qemu_fdt_add_subnode(fdt, "/gpio-keys/poweroff");
qemu_fdt_setprop_string(fdt, "/gpio-keys/poweroff",
@@ -1195,7 +1193,7 @@ static void virt_flash_fdt(VirtMachineState *vms,
qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay");
g_free(nodename);
- nodename = g_strdup_printf("/flash@%" PRIx64, flashbase);
+ nodename = g_strdup_printf("/flash@%" PRIx64, flashbase + flashsize);
qemu_fdt_add_subnode(ms->fdt, nodename);
qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash");
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
diff --git a/hw/core/machine.c b/hw/core/machine.c
index b03d9192ba..bb0dc8f6a9 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -41,7 +41,9 @@
#include "hw/virtio/virtio-pci.h"
#include "qom/object_interfaces.h"
-GlobalProperty hw_compat_7_0[] = {};
+GlobalProperty hw_compat_7_0[] = {
+ { "arm-gicv3-common", "force-8-bit-prio", "on" },
+};
const size_t hw_compat_7_0_len = G_N_ELEMENTS(hw_compat_7_0);
GlobalProperty hw_compat_6_2[] = {
diff --git a/hw/display/artist.c b/hw/display/artist.c
index 39fc0c4ca5..eadaef0d46 100644
--- a/hw/display/artist.c
+++ b/hw/display/artist.c
@@ -1,7 +1,8 @@
/*
* QEMU HP Artist Emulation
*
- * Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
+ * Copyright (c) 2019-2022 Sven Schnelle <svens@stackframe.org>
+ * Copyright (c) 2022 Helge Deller <deller@gmx.de>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
*/
@@ -81,9 +82,10 @@ struct ARTISTState {
uint32_t plane_mask;
uint32_t reg_100080;
- uint32_t reg_300200;
- uint32_t reg_300208;
- uint32_t reg_300218;
+ uint32_t horiz_backporch;
+ uint32_t active_lines_low;
+ uint32_t misc_video;
+ uint32_t misc_ctrl;
uint32_t dst_bm_access;
uint32_t src_bm_access;
@@ -98,6 +100,9 @@ struct ARTISTState {
int draw_line_pattern;
};
+/* hardware allows up to 64x64, but we emulate 32x32 only. */
+#define NGLE_MAX_SPRITE_SIZE 32
+
typedef enum {
ARTIST_BUFFER_AP = 1,
ARTIST_BUFFER_OVERLAY = 2,
@@ -135,8 +140,14 @@ typedef enum {
BG_COLOR = 0x118014,
PLANE_MASK = 0x118018,
IMAGE_BITMAP_OP = 0x11801c,
- CURSOR_POS = 0x300100,
- CURSOR_CTRL = 0x300104,
+ CURSOR_POS = 0x300100, /* reg17 */
+ CURSOR_CTRL = 0x300104, /* reg18 */
+ MISC_VIDEO = 0x300218, /* reg21 */
+ MISC_CTRL = 0x300308, /* reg27 */
+ HORIZ_BACKPORCH = 0x300200, /* reg19 */
+ ACTIVE_LINES_LOW = 0x300208,/* reg20 */
+ FIFO1 = 0x300008, /* reg34 */
+ FIFO2 = 0x380008,
} artist_reg_t;
typedef enum {
@@ -174,17 +185,25 @@ static const char *artist_reg_name(uint64_t addr)
REG_NAME(SRC_BM_ACCESS);
REG_NAME(CURSOR_POS);
REG_NAME(CURSOR_CTRL);
+ REG_NAME(HORIZ_BACKPORCH);
+ REG_NAME(ACTIVE_LINES_LOW);
+ REG_NAME(MISC_VIDEO);
+ REG_NAME(MISC_CTRL);
REG_NAME(LINE_XY);
REG_NAME(PATTERN_LINE_START);
REG_NAME(LINE_SIZE);
REG_NAME(LINE_END);
REG_NAME(FONT_WRITE_INCR_Y);
REG_NAME(FONT_WRITE_START);
+ REG_NAME(FIFO1);
+ REG_NAME(FIFO2);
}
return "";
}
#undef REG_NAME
+static void artist_invalidate(void *opaque);
+
/* artist has a fixed line length of 2048 bytes. */
#define ADDR_TO_Y(addr) extract32(addr, 11, 11)
#define ADDR_TO_X(addr) extract32(addr, 0, 11)
@@ -295,19 +314,15 @@ static void artist_rop8(ARTISTState *s, struct vram_buffer *buf,
static void artist_get_cursor_pos(ARTISTState *s, int *x, int *y)
{
/*
- * Don't know whether these magic offset values are configurable via
- * some register. They seem to be the same for all resolutions.
- * The cursor values provided in the registers are:
- * X-value: -295 (for HP-UX 11) and 338 (for HP-UX 10.20) up to 2265
- * Y-value: 1146 down to 0
* The emulated Artist graphic is like a CRX graphic, and as such
* it's usually fixed at 1280x1024 pixels.
- * Because of the maximum Y-value of 1146 you can not choose a higher
- * vertical resolution on HP-UX (unless you disable the mouse).
+ * Other resolutions may work, but no guarantee.
*/
- static int offset = 338;
- int lx;
+ unsigned int hbp_times_vi, horizBackPorch;
+ int16_t xHi, xLo;
+ const int videoInterleave = 4;
+ const int pipelineDelay = 4;
/* ignore if uninitialized */
if (s->cursor_pos == 0) {
@@ -315,16 +330,24 @@ static void artist_get_cursor_pos(ARTISTState *s, int *x, int *y)
return;
}
- lx = artist_get_x(s->cursor_pos);
- if (lx < offset) {
- offset = lx;
- }
- *x = (lx - offset) / 2;
+ /*
+ * Calculate X position based on backporch and interleave values.
+ * Based on code from Xorg X11R6.6
+ */
+ horizBackPorch = ((s->horiz_backporch & 0xff0000) >> 16) +
+ ((s->horiz_backporch & 0xff00) >> 8) + 2;
+ hbp_times_vi = horizBackPorch * videoInterleave;
+ xHi = s->cursor_pos >> 19;
+ *x = ((xHi + pipelineDelay) * videoInterleave) - hbp_times_vi;
- *y = 1146 - artist_get_y(s->cursor_pos);
+ xLo = (s->cursor_pos >> 16) & 0x07;
+ *x += ((xLo - hbp_times_vi) & (videoInterleave - 1)) + 8 - 1;
/* subtract cursor offset from cursor control register */
*x -= (s->cursor_cntrl & 0xf0) >> 4;
+
+ /* Calculate Y position */
+ *y = s->height - artist_get_y(s->cursor_pos);
*y -= (s->cursor_cntrl & 0x0f);
if (*x > s->width) {
@@ -336,10 +359,20 @@ static void artist_get_cursor_pos(ARTISTState *s, int *x, int *y)
}
}
+static inline bool cursor_visible(ARTISTState *s)
+{
+ /* cursor is visible if bit 0x80 is set in cursor_cntrl */
+ return s->cursor_cntrl & 0x80;
+}
+
static void artist_invalidate_cursor(ARTISTState *s)
{
int x, y;
+ if (!cursor_visible(s)) {
+ return;
+ }
+
artist_get_cursor_pos(s, &x, &y);
artist_invalidate_lines(&s->vram_buffer[ARTIST_BUFFER_AP],
y, s->cursor_height);
@@ -876,6 +909,7 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val,
{
ARTISTState *s = opaque;
int width, height;
+ uint64_t oldval;
trace_artist_reg_write(size, addr, artist_reg_name(addr & ~3ULL), val);
@@ -1025,16 +1059,33 @@ static void artist_reg_write(void *opaque, hwaddr addr, uint64_t val,
combine_write_reg(addr, val, size, &s->transfer_data);
break;
- case 0x300200:
- combine_write_reg(addr, val, size, &s->reg_300200);
+ case HORIZ_BACKPORCH:
+ /* overwrite HP-UX settings to fix X cursor position. */
+ val = (NGLE_MAX_SPRITE_SIZE << 16) + (NGLE_MAX_SPRITE_SIZE << 8);
+ combine_write_reg(addr, val, size, &s->horiz_backporch);
+ break;
+
+ case ACTIVE_LINES_LOW:
+ combine_write_reg(addr, val, size, &s->active_lines_low);
break;
- case 0x300208:
- combine_write_reg(addr, val, size, &s->reg_300208);
+ case MISC_VIDEO:
+ oldval = s->misc_video;
+ combine_write_reg(addr, val, size, &s->misc_video);
+ /* Invalidate and hide screen if graphics signal is turned off. */
+ if (((oldval & 0x0A000000) == 0x0A000000) &&
+ ((val & 0x0A000000) != 0x0A000000)) {
+ artist_invalidate(s);
+ }
+ /* Invalidate and redraw screen if graphics signal is turned back on. */
+ if (((oldval & 0x0A000000) != 0x0A000000) &&
+ ((val & 0x0A000000) == 0x0A000000)) {
+ artist_invalidate(s);
+ }
break;
- case 0x300218:
- combine_write_reg(addr, val, size, &s->reg_300218);
+ case MISC_CTRL:
+ combine_write_reg(addr, val, size, &s->misc_ctrl);
break;
case CURSOR_POS:
@@ -1119,12 +1170,11 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size)
case 0x100000:
case 0x300000:
case 0x300004:
- case 0x300308:
case 0x380000:
break;
- case 0x300008:
- case 0x380008:
+ case FIFO1:
+ case FIFO2:
/*
* FIFO ready flag. we're not emulating the FIFOs
* so we're always ready
@@ -1132,16 +1182,28 @@ static uint64_t artist_reg_read(void *opaque, hwaddr addr, unsigned size)
val = 0x10;
break;
- case 0x300200:
- val = s->reg_300200;
+ case HORIZ_BACKPORCH:
+ val = s->horiz_backporch;
+ break;
+
+ case ACTIVE_LINES_LOW:
+ val = s->active_lines_low;
+ /* activeLinesLo for cursor is in reg20.b.b0 */
+ val &= ~(0xff << 24);
+ val |= (s->height & 0xff) << 24;
break;
- case 0x300208:
- val = s->reg_300208;
+ case MISC_VIDEO:
+ /* emulate V-blank */
+ s->misc_video ^= 0x00040000;
+ /* activeLinesHi for cursor is in reg21.b.b2 */
+ val = s->misc_video;
+ val &= ~0xff00UL;
+ val |= (s->height & 0xff00);
break;
- case 0x300218:
- val = s->reg_300218;
+ case MISC_CTRL:
+ val = s->misc_ctrl;
break;
case 0x30023c:
@@ -1186,6 +1248,10 @@ static void artist_draw_cursor(ARTISTState *s)
struct vram_buffer *cursor0, *cursor1 , *buf;
int cx, cy, cursor_pos_x, cursor_pos_y;
+ if (!cursor_visible(s)) {
+ return;
+ }
+
cursor0 = &s->vram_buffer[ARTIST_BUFFER_CURSOR1];
cursor1 = &s->vram_buffer[ARTIST_BUFFER_CURSOR2];
buf = &s->vram_buffer[ARTIST_BUFFER_AP];
@@ -1217,6 +1283,12 @@ static void artist_draw_cursor(ARTISTState *s)
}
}
+static bool artist_screen_enabled(ARTISTState *s)
+{
+ /* We could check for (s->misc_ctrl & 0x00800000) too... */
+ return ((s->misc_video & 0x0A000000) == 0x0A000000);
+}
+
static void artist_draw_line(void *opaque, uint8_t *d, const uint8_t *src,
int width, int pitch)
{
@@ -1224,6 +1296,12 @@ static void artist_draw_line(void *opaque, uint8_t *d, const uint8_t *src,
uint32_t *cmap, *data = (uint32_t *)d;
int x;
+ if (!artist_screen_enabled(s)) {
+ /* clear screen */
+ memset(data, 0, s->width * sizeof(uint32_t));
+ return;
+ }
+
cmap = (uint32_t *)(s->vram_buffer[ARTIST_BUFFER_CMAP].data + 0x400);
for (x = 0; x < s->width; x++) {
@@ -1325,11 +1403,10 @@ static void artist_realizefn(DeviceState *dev, Error **errp)
framebuffer_update_memory_section(&s->fbsection, &buf->mr, 0,
buf->width, buf->height);
/*
- * no idea whether the cursor is fixed size or not, so assume 32x32 which
- * seems sufficient for HP-UX X11.
+ * Artist cursor max size
*/
- s->cursor_height = 32;
- s->cursor_width = 32;
+ s->cursor_height = NGLE_MAX_SPRITE_SIZE;
+ s->cursor_width = NGLE_MAX_SPRITE_SIZE;
/*
* These two registers are not initialized by seabios's STI implementation.
@@ -1339,6 +1416,10 @@ static void artist_realizefn(DeviceState *dev, Error **errp)
s->image_bitmap_op = 0x23000300;
s->plane_mask = 0xff;
+ /* enable screen */
+ s->misc_video |= 0x0A000000;
+ s->misc_ctrl |= 0x00800000;
+
s->con = graphic_console_init(dev, 0, &artist_ops, s);
qemu_console_resize(s->con, s->width, s->height);
}
@@ -1377,9 +1458,10 @@ static const VMStateDescription vmstate_artist = {
VMSTATE_UINT32(cursor_width, ARTISTState),
VMSTATE_UINT32(plane_mask, ARTISTState),
VMSTATE_UINT32(reg_100080, ARTISTState),
- VMSTATE_UINT32(reg_300200, ARTISTState),
- VMSTATE_UINT32(reg_300208, ARTISTState),
- VMSTATE_UINT32(reg_300218, ARTISTState),
+ VMSTATE_UINT32(horiz_backporch, ARTISTState),
+ VMSTATE_UINT32(active_lines_low, ARTISTState),
+ VMSTATE_UINT32(misc_video, ARTISTState),
+ VMSTATE_UINT32(misc_ctrl, ARTISTState),
VMSTATE_UINT32(dst_bm_access, ARTISTState),
VMSTATE_UINT32(src_bm_access, ARTISTState),
VMSTATE_UINT32(control_plane, ARTISTState),
diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c
index bc383f53cc..cbb8f0f169 100644
--- a/hw/dma/xilinx_axidma.c
+++ b/hw/dma/xilinx_axidma.c
@@ -552,7 +552,7 @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
st->dma = s;
st->nr = i;
- st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
+ st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(st->ptimer);
ptimer_set_freq(st->ptimer, s->freqhz);
ptimer_transaction_commit(st->ptimer);
diff --git a/hw/dma/xlnx_csu_dma.c b/hw/dma/xlnx_csu_dma.c
index 60ada3286b..1ce52ea5a2 100644
--- a/hw/dma/xlnx_csu_dma.c
+++ b/hw/dma/xlnx_csu_dma.c
@@ -666,7 +666,7 @@ static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
- s, PTIMER_POLICY_DEFAULT);
+ s, PTIMER_POLICY_LEGACY);
s->attr = MEMTXATTRS_UNSPECIFIED;
diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c
index 5634c6fc78..351843db4a 100644
--- a/hw/intc/arm_gicv3_common.c
+++ b/hw/intc/arm_gicv3_common.c
@@ -563,6 +563,11 @@ static Property arm_gicv3_common_properties[] = {
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
+ /*
+ * Compatibility property: force 8 bits of physical priority, even
+ * if the CPU being emulated should have fewer.
+ */
+ DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State, force_8bit_prio, 0),
DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
redist_region_count, qdev_prop_uint32, uint32_t),
DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 9efba798f8..8867e2e496 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -49,6 +49,14 @@ static inline int icv_min_vbpr(GICv3CPUState *cs)
return 7 - cs->vprebits;
}
+static inline int ich_num_aprs(GICv3CPUState *cs)
+{
+ /* Return the number of virtual APR registers (1, 2, or 4) */
+ int aprmax = 1 << (cs->vprebits - 5);
+ assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+ return aprmax;
+}
+
/* Simple accessor functions for LR fields */
static uint32_t ich_lr_vintid(uint64_t lr)
{
@@ -145,9 +153,7 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs)
* in the ICH Active Priority Registers.
*/
int i;
- int aprmax = 1 << (cs->vprebits - 5);
-
- assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+ int aprmax = ich_num_aprs(cs);
for (i = 0; i < aprmax; i++) {
uint32_t apr = cs->ich_apr[GICV3_G0][i] |
@@ -657,7 +663,7 @@ static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
* should match the ones reported in ich_vtr_read().
*/
value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
- (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+ ((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
value |= ICC_CTLR_EL1_EOIMODE;
@@ -787,6 +793,36 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
return intid;
}
+static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
+{
+ /*
+ * Return a mask word which clears the unimplemented priority bits
+ * from a priority value for a physical interrupt. (Not to be confused
+ * with the group priority, whose mask depends on the value of BPR
+ * for the interrupt group.)
+ */
+ return ~0U << (8 - cs->pribits);
+}
+
+static inline int icc_min_bpr(GICv3CPUState *cs)
+{
+ /* The minimum BPR for the physical interface. */
+ return 7 - cs->prebits;
+}
+
+static inline int icc_min_bpr_ns(GICv3CPUState *cs)
+{
+ return icc_min_bpr(cs) + 1;
+}
+
+static inline int icc_num_aprs(GICv3CPUState *cs)
+{
+ /* Return the number of APR registers (1, 2, or 4) */
+ int aprmax = 1 << MAX(cs->prebits - 5, 0);
+ assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
+ return aprmax;
+}
+
static int icc_highest_active_prio(GICv3CPUState *cs)
{
/* Calculate the current running priority based on the set bits
@@ -794,14 +830,14 @@ static int icc_highest_active_prio(GICv3CPUState *cs)
*/
int i;
- for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
+ for (i = 0; i < icc_num_aprs(cs); i++) {
uint32_t apr = cs->icc_apr[GICV3_G0][i] |
cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
if (!apr) {
continue;
}
- return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
+ return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
}
/* No current active interrupts: return idle priority */
return 0xff;
@@ -980,7 +1016,7 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
- value &= 0xff;
+ value &= icc_fullprio_mask(cs);
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
(env->cp15.scr_el3 & SCR_FIQ)) {
@@ -1004,7 +1040,7 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
*/
uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
int prio = cs->hppi.prio & mask;
- int aprbit = prio >> 1;
+ int aprbit = prio >> (8 - cs->prebits);
int regno = aprbit / 32;
int regbit = aprbit % 32;
@@ -1162,7 +1198,7 @@ static void icc_drop_prio(GICv3CPUState *cs, int grp)
*/
int i;
- for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
+ for (i = 0; i < icc_num_aprs(cs); i++) {
uint64_t *papr = &cs->icc_apr[grp][i];
if (!*papr) {
@@ -1303,9 +1339,7 @@ static int icv_drop_prio(GICv3CPUState *cs)
* 32 bits are actually relevant.
*/
int i;
- int aprmax = 1 << (cs->vprebits - 5);
-
- assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
+ int aprmax = ich_num_aprs(cs);
for (i = 0; i < aprmax; i++) {
uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
@@ -1590,7 +1624,7 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
return;
}
- minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
+ minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
if (value < minval) {
value = minval;
}
@@ -2171,19 +2205,19 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
(1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
- (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+ ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
(1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
- (7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
+ ((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
cs->icc_pmr_el1 = 0;
- cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
- cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
- cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
+ cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
+ cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
+ cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
(1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
- (7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
+ ((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
cs->ich_hcr_el2 = 0;
@@ -2238,27 +2272,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
.readfn = icc_ap_read,
.writefn = icc_ap_write,
},
- { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_fiq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
- { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_fiq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
- { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_fiq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
/* All the ICC_AP1R*_EL1 registers are banked */
{ .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
@@ -2267,27 +2280,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
.readfn = icc_ap_read,
.writefn = icc_ap_write,
},
- { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_irq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
- { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_irq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
- { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
- .type = ARM_CP_IO | ARM_CP_NO_RAW,
- .access = PL1_RW, .accessfn = gicv3_irq_access,
- .readfn = icc_ap_read,
- .writefn = icc_ap_write,
- },
{ .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
.type = ARM_CP_IO | ARM_CP_NO_RAW,
@@ -2430,6 +2422,54 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
},
};
+static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
+ { .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+};
+
+static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
+ { .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_fiq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+ { .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
+ .type = ARM_CP_IO | ARM_CP_NO_RAW,
+ .access = PL1_RW, .accessfn = gicv3_irq_access,
+ .readfn = icc_ap_read,
+ .writefn = icc_ap_write,
+ },
+};
+
static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
GICv3CPUState *cs = icc_cs_from_env(env);
@@ -2755,6 +2795,16 @@ void gicv3_init_cpuif(GICv3State *s)
ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
GICv3CPUState *cs = &s->cpu[i];
+ /*
+ * If the CPU doesn't define a GICv3 configuration, probably because
+ * in real hardware it doesn't have one, then we use default values
+ * matching the one used by most Arm CPUs. This applies to:
+ * cpu->gic_num_lrs
+ * cpu->gic_vpribits
+ * cpu->gic_vprebits
+ * cpu->gic_pribits
+ */
+
/* Note that we can't just use the GICv3CPUState as an opaque pointer
* in define_arm_cp_regs_with_opaque(), because when we're called back
* it might be with code translated by CPU 0 but run by CPU 1, in
@@ -2763,13 +2813,56 @@ void gicv3_init_cpuif(GICv3State *s)
* get back to the GICv3CPUState from the CPUARMState.
*/
define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
- if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
- && cpu->gic_num_lrs) {
+
+ /*
+ * The CPU implementation specifies the number of supported
+ * bits of physical priority. For backwards compatibility
+ * of migration, we have a compat property that forces use
+ * of 8 priority bits regardless of what the CPU really has.
+ */
+ if (s->force_8bit_prio) {
+ cs->pribits = 8;
+ } else {
+ cs->pribits = cpu->gic_pribits ?: 5;
+ }
+
+ /*
+ * The GICv3 has separate ID register fields for virtual priority
+ * and preemption bit values, but only a single ID register field
+ * for the physical priority bits. The preemption bit count is
+ * always the same as the priority bit count, except that 8 bits
+ * of priority means 7 preemption bits. We precalculate the
+ * preemption bits because it simplifies the code and makes the
+ * parallels between the virtual and physical bits of the GIC
+ * a bit clearer.
+ */
+ cs->prebits = cs->pribits;
+ if (cs->prebits == 8) {
+ cs->prebits--;
+ }
+ /*
+ * Check that CPU code defining pribits didn't violate
+ * architectural constraints our implementation relies on.
+ */
+ g_assert(cs->pribits >= 4 && cs->pribits <= 8);
+
+ /*
+ * gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
+ * for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
+ */
+ if (cs->prebits >= 6) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
+ }
+ if (cs->prebits == 7) {
+ define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
+ }
+
+ if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
int j;
- cs->num_list_regs = cpu->gic_num_lrs;
- cs->vpribits = cpu->gic_vpribits;
- cs->vprebits = cpu->gic_vprebits;
+ cs->num_list_regs = cpu->gic_num_lrs ?: 4;
+ cs->vpribits = cpu->gic_vpribits ?: 5;
+ cs->vprebits = cpu->gic_vprebits ?: 5;
/* Check against architectural constraints: getting these
* wrong would be a bug in the CPU code defining these,
diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index 2922c516e5..3ca643ecba 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -673,9 +673,19 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
s = c->gic;
c->icc_pmr_el1 = 0;
- c->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
- c->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
- c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
+ /*
+ * Architecturally the reset value of the ICC_BPR registers
+ * is UNKNOWN. We set them all to 0 here; when the kernel
+ * uses these values to program the ICH_VMCR_EL2 fields that
+ * determine the guest-visible ICC_BPR register values, the
+ * hardware's "writing a value less than the minimum sets
+ * the field to the minimum value" behaviour will result in
+ * them effectively resetting to the correct minimum value
+ * for the host GIC.
+ */
+ c->icc_bpr[GICV3_G0] = 0;
+ c->icc_bpr[GICV3_G1] = 0;
+ c->icc_bpr[GICV3_G1NS] = 0;
c->icc_sre_el1 = 0x7;
memset(c->icc_apr, 0, sizeof(c->icc_apr));
diff --git a/hw/m68k/mcf5206.c b/hw/m68k/mcf5206.c
index 6d93d761a5..2ab1b4f059 100644
--- a/hw/m68k/mcf5206.c
+++ b/hw/m68k/mcf5206.c
@@ -152,7 +152,7 @@ static m5206_timer_state *m5206_timer_init(qemu_irq irq)
m5206_timer_state *s;
s = g_new0(m5206_timer_state, 1);
- s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_LEGACY);
s->irq = irq;
m5206_timer_reset(s);
return s;
diff --git a/hw/m68k/mcf5208.c b/hw/m68k/mcf5208.c
index 655207e393..be1033f84f 100644
--- a/hw/m68k/mcf5208.c
+++ b/hw/m68k/mcf5208.c
@@ -197,7 +197,7 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic)
/* Timers. */
for (i = 0; i < 2; i++) {
s = g_new0(m5208_timer_state, 1);
- s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_LEGACY);
memory_region_init_io(&s->iomem, NULL, &m5208_timer_ops, s,
"m5208-timer", 0x00004000);
memory_region_add_subregion(address_space, 0xfc080000 + 0x4000 * i,
diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c
index 22bb8910fa..82ac48cee2 100644
--- a/hw/net/can/xlnx-zynqmp-can.c
+++ b/hw/net/can/xlnx-zynqmp-can.c
@@ -1079,7 +1079,7 @@ static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp)
/* Allocate a new timer. */
s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s,
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(s->can_timer);
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index 6d50c39543..4e6cc708de 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -393,7 +393,7 @@ static void etsec_realize(DeviceState *dev, Error **errp)
object_get_typename(OBJECT(dev)), dev->id, etsec);
qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a);
- etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT);
+ etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(etsec->ptimer);
ptimer_set_freq(etsec->ptimer, 100);
ptimer_transaction_commit(etsec->ptimer);
diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c
index 6aff424cbe..456ae38107 100644
--- a/hw/net/lan9118.c
+++ b/hw/net/lan9118.c
@@ -1363,7 +1363,7 @@ static void lan9118_realize(DeviceState *dev, Error **errp)
s->pmt_ctrl = 1;
s->txp = &s->tx_packet;
- s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(s->timer);
ptimer_set_freq(s->timer, 10000);
ptimer_set_limit(s->timer, 0xffff, 1);
diff --git a/hw/net/tulip.c b/hw/net/tulip.c
index d5b6cc5ee6..097e905bec 100644
--- a/hw/net/tulip.c
+++ b/hw/net/tulip.c
@@ -967,6 +967,8 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp)
pci_conf = s->dev.config;
pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
+ qemu_macaddr_default_if_unset(&s->c.macaddr);
+
s->eeprom = eeprom93xx_new(&pci_dev->qdev, 64);
tulip_fill_eeprom(s);
@@ -981,8 +983,6 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp)
s->irq = pci_allocate_irq(&s->dev);
- qemu_macaddr_default_if_unset(&s->c.macaddr);
-
s->nic = qemu_new_nic(&net_tulip_info, &s->c,
object_get_typename(OBJECT(pci_dev)),
pci_dev->qdev.id, s);
diff --git a/hw/rtc/exynos4210_rtc.c b/hw/rtc/exynos4210_rtc.c
index ae67641de6..d1620c7a2a 100644
--- a/hw/rtc/exynos4210_rtc.c
+++ b/hw/rtc/exynos4210_rtc.c
@@ -564,14 +564,14 @@ static void exynos4210_rtc_init(Object *obj)
Exynos4210RTCState *s = EXYNOS4210_RTC(obj);
SysBusDevice *dev = SYS_BUS_DEVICE(obj);
- s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_DEFAULT);
+ s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(s->ptimer);
ptimer_set_freq(s->ptimer, RTC_BASE_FREQ);
exynos4210_rtc_update_freq(s, 0);
ptimer_transaction_commit(s->ptimer);
s->ptimer_1Hz = ptimer_init(exynos4210_rtc_1Hz_tick,
- s, PTIMER_POLICY_DEFAULT);
+ s, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(s->ptimer_1Hz);
ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ);
ptimer_transaction_commit(s->ptimer_1Hz);
diff --git a/hw/timer/allwinner-a10-pit.c b/hw/timer/allwinner-a10-pit.c
index c3fc2a4daa..971f78462a 100644
--- a/hw/timer/allwinner-a10-pit.c
+++ b/hw/timer/allwinner-a10-pit.c
@@ -275,7 +275,7 @@ static void a10_pit_init(Object *obj)
tc->container = s;
tc->index = i;
- s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_DEFAULT);
+ s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_LEGACY);
}
}
diff --git a/hw/timer/altera_timer.c b/hw/timer/altera_timer.c
index c6e02d2b5a..0f1f54206a 100644
--- a/hw/timer/altera_timer.c
+++ b/hw/timer/altera_timer.c
@@ -185,7 +185,7 @@ static void altera_timer_realize(DeviceState *dev, Error **errp)
return;
}
- t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_DEFAULT);
+ t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(t->ptimer);
ptimer_set_freq(t->ptimer, t->freq_hz);
ptimer_transaction_commit(t->ptimer);
diff --git a/hw/timer/arm_timer.c b/hw/timer/arm_timer.c
index 84cf2726bb..69c8863472 100644
--- a/hw/timer/arm_timer.c
+++ b/hw/timer/arm_timer.c
@@ -180,7 +180,7 @@ static arm_timer_state *arm_timer_init(uint32_t freq)
s->freq = freq;
s->control = TIMER_CTRL_IE;
- s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_LEGACY);
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_arm_timer, s);
return s;
}
diff --git a/hw/timer/digic-timer.c b/hw/timer/digic-timer.c
index e3aae4a45a..d5186f4454 100644
--- a/hw/timer/digic-timer.c
+++ b/hw/timer/digic-timer.c
@@ -139,7 +139,7 @@ static void digic_timer_init(Object *obj)
{
DigicTimerState *s = DIGIC_TIMER(obj);
- s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_DEFAULT);
+ s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_LEGACY);
/*
* FIXME: there is no documentation on Digic timer
diff --git a/hw/timer/etraxfs_timer.c b/hw/timer/etraxfs_timer.c
index 139e5b86a4..ecc2831baf 100644
--- a/hw/timer/etraxfs_timer.c
+++ b/hw/timer/etraxfs_timer.c
@@ -370,9 +370,9 @@ static void etraxfs_timer_realize(DeviceState *dev, Error **errp)
ETRAXTimerState *t = ETRAX_TIMER(dev);
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
- t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_DEFAULT);
- t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_DEFAULT);
- t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_DEFAULT);
+ t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_LEGACY);
+ t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_LEGACY);
+ t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_LEGACY);
sysbus_init_irq(sbd, &t->irq);
sysbus_init_irq(sbd, &t->nmi);
diff --git a/hw/timer/exynos4210_mct.c b/hw/timer/exynos4210_mct.c
index d0e5343996..e175a9f5b9 100644
--- a/hw/timer/exynos4210_mct.c
+++ b/hw/timer/exynos4210_mct.c
@@ -1503,17 +1503,17 @@ static void exynos4210_mct_init(Object *obj)
/* Global timer */
s->g_timer.ptimer_frc = ptimer_init(exynos4210_gfrc_event, s,
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
memset(&s->g_timer.reg, 0, sizeof(struct gregs));
/* Local timers */
for (i = 0; i < 2; i++) {
s->l_timer[i].tick_timer.ptimer_tick =
ptimer_init(exynos4210_ltick_event, &s->l_timer[i],
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
s->l_timer[i].ptimer_frc =
ptimer_init(exynos4210_lfrc_event, &s->l_timer[i],
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
s->l_timer[i].id = i;
}
diff --git a/hw/timer/exynos4210_pwm.c b/hw/timer/exynos4210_pwm.c
index 220088120e..02924a9e5b 100644
--- a/hw/timer/exynos4210_pwm.c
+++ b/hw/timer/exynos4210_pwm.c
@@ -400,7 +400,7 @@ static void exynos4210_pwm_init(Object *obj)
sysbus_init_irq(dev, &s->timer[i].irq);
s->timer[i].ptimer = ptimer_init(exynos4210_pwm_tick,
&s->timer[i],
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
s->timer[i].id = i;
s->timer[i].parent = s;
}
diff --git a/hw/timer/grlib_gptimer.c b/hw/timer/grlib_gptimer.c
index d511890109..5c4923c1e0 100644
--- a/hw/timer/grlib_gptimer.c
+++ b/hw/timer/grlib_gptimer.c
@@ -383,7 +383,7 @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp)
timer->unit = unit;
timer->ptimer = ptimer_init(grlib_gptimer_hit, timer,
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
timer->id = i;
/* One IRQ line for each timer */
diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c
index ebd58254d1..2bf8c754b2 100644
--- a/hw/timer/imx_epit.c
+++ b/hw/timer/imx_epit.c
@@ -347,9 +347,9 @@ static void imx_epit_realize(DeviceState *dev, Error **errp)
0x00001000);
sysbus_init_mmio(sbd, &s->iomem);
- s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_DEFAULT);
+ s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_LEGACY);
- s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_DEFAULT);
+ s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_LEGACY);
}
static void imx_epit_class_init(ObjectClass *klass, void *data)
diff --git a/hw/timer/imx_gpt.c b/hw/timer/imx_gpt.c
index 5c0d9a269c..80b8302639 100644
--- a/hw/timer/imx_gpt.c
+++ b/hw/timer/imx_gpt.c
@@ -505,7 +505,7 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp)
0x00001000);
sysbus_init_mmio(sbd, &s->iomem);
- s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_LEGACY);
}
static void imx_gpt_class_init(ObjectClass *klass, void *data)
diff --git a/hw/timer/mss-timer.c b/hw/timer/mss-timer.c
index fe0ca905f3..ee7438f168 100644
--- a/hw/timer/mss-timer.c
+++ b/hw/timer/mss-timer.c
@@ -232,7 +232,7 @@ static void mss_timer_init(Object *obj)
for (i = 0; i < NUM_TIMERS; i++) {
struct Msf2Timer *st = &t->timers[i];
- st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
+ st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(st->ptimer);
ptimer_set_freq(st->ptimer, t->freq_hz);
ptimer_transaction_commit(st->ptimer);
diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c
index c72c327bfa..7788939766 100644
--- a/hw/timer/sh_timer.c
+++ b/hw/timer/sh_timer.c
@@ -239,7 +239,7 @@ static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq)
s->enabled = 0;
s->irq = irq;
- s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_DEFAULT);
+ s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_LEGACY);
sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor);
sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt);
diff --git a/hw/timer/slavio_timer.c b/hw/timer/slavio_timer.c
index 90fdce4c44..8c4f6eb06b 100644
--- a/hw/timer/slavio_timer.c
+++ b/hw/timer/slavio_timer.c
@@ -405,7 +405,7 @@ static void slavio_timer_init(Object *obj)
tc->timer_index = i;
s->cputimer[i].timer = ptimer_init(slavio_timer_irq, tc,
- PTIMER_POLICY_DEFAULT);
+ PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(s->cputimer[i].timer);
ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD);
ptimer_transaction_commit(s->cputimer[i].timer);
diff --git a/hw/timer/xilinx_timer.c b/hw/timer/xilinx_timer.c
index 1eb927eb84..c7f17cd646 100644
--- a/hw/timer/xilinx_timer.c
+++ b/hw/timer/xilinx_timer.c
@@ -223,7 +223,7 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
xt->parent = t;
xt->nr = i;
- xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_DEFAULT);
+ xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_LEGACY);
ptimer_transaction_begin(xt->ptimer);
ptimer_set_freq(xt->ptimer, t->freq_hz);
ptimer_transaction_commit(xt->ptimer);
diff --git a/include/hw/adc/zynq-xadc.h b/include/hw/adc/zynq-xadc.h
index 2017b7a803..c10cc4c379 100644
--- a/include/hw/adc/zynq-xadc.h
+++ b/include/hw/adc/zynq-xadc.h
@@ -39,8 +39,7 @@ struct ZynqXADCState {
uint16_t xadc_dfifo[ZYNQ_XADC_FIFO_DEPTH];
uint16_t xadc_dfifo_entries;
- struct IRQState *qemu_irq;
-
+ qemu_irq irq;
};
#endif /* ZYNQ_XADC_H */
diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h
index 4e41610055..ab5182a28a 100644
--- a/include/hw/intc/arm_gicv3_common.h
+++ b/include/hw/intc/arm_gicv3_common.h
@@ -51,11 +51,6 @@
/* Maximum number of list registers (architectural limit) */
#define GICV3_LR_MAX 16
-/* Minimum BPR for Secure, or when security not enabled */
-#define GIC_MIN_BPR 0
-/* Minimum BPR for Nonsecure when security is enabled */
-#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1)
-
/* For some distributor fields we want to model the array of 32-bit
* register values which hold various bitmaps corresponding to enabled,
* pending, etc bits. These macros and functions facilitate that; the
@@ -206,6 +201,8 @@ struct GICv3CPUState {
int num_list_regs;
int vpribits; /* number of virtual priority bits */
int vprebits; /* number of virtual preemption bits */
+ int pribits; /* number of physical priority bits */
+ int prebits; /* number of physical preemption bits */
/* Current highest priority pending interrupt for this CPU.
* This is cached information that can be recalculated from the
@@ -251,6 +248,7 @@ struct GICv3State {
uint32_t revision;
bool lpi_enable;
bool security_extn;
+ bool force_8bit_prio;
bool irq_reset_nonsecure;
bool gicd_no_migration_shift_bug;
diff --git a/include/hw/ptimer.h b/include/hw/ptimer.h
index c443218475..4dc02b0de4 100644
--- a/include/hw/ptimer.h
+++ b/include/hw/ptimer.h
@@ -33,9 +33,17 @@
* to stderr when the guest attempts to enable the timer.
*/
-/* The default ptimer policy retains backward compatibility with the legacy
- * timers. Custom policies are adjusting the default one. Consider providing
- * a correct policy for your timer.
+/*
+ * The 'legacy' ptimer policy retains backward compatibility with the
+ * traditional ptimer behaviour from before policy flags were introduced.
+ * It has several weird behaviours which don't match typical hardware
+ * timer behaviour. For a new device using ptimers, you should not
+ * use PTIMER_POLICY_LEGACY, but instead check the actual behaviour
+ * that you need and specify the right set of policy flags to get that.
+ *
+ * If you are overhauling an existing device that uses PTIMER_POLICY_LEGACY
+ * and are in a position to check or test the real hardware behaviour,
+ * consider updating it to specify the right policy flags.
*
* The rough edges of the default policy:
* - Starting to run with a period = 0 emits error message and stops the
@@ -54,7 +62,7 @@
* since the last period, effectively restarting the timer with a
* counter = counter value at the moment of change (.i.e. one less).
*/
-#define PTIMER_POLICY_DEFAULT 0
+#define PTIMER_POLICY_LEGACY 0
/* Periodic timer counter stays with "0" for a one period before wrapping
* around. */
diff --git a/meson.build b/meson.build
index c4fb8fa5d0..9ebc00f032 100644
--- a/meson.build
+++ b/meson.build
@@ -580,6 +580,18 @@ if cocoa.found() and get_option('gtk').enabled()
error('Cocoa and GTK+ cannot be enabled at the same time')
endif
+vmnet = dependency('appleframeworks', modules: 'vmnet', required: get_option('vmnet'))
+if vmnet.found() and not cc.has_header_symbol('vmnet/vmnet.h',
+ 'VMNET_BRIDGED_MODE',
+ dependencies: vmnet)
+ vmnet = not_found
+ if get_option('vmnet').enabled()
+ error('vmnet.framework API is outdated')
+ else
+ warning('vmnet.framework API is outdated, disabling')
+ endif
+endif
+
seccomp = not_found
if not get_option('seccomp').auto() or have_system or have_tools
seccomp = dependency('libseccomp', version: '>=2.3.0',
@@ -1741,6 +1753,7 @@ config_host_data.set('CONFIG_VHOST_KERNEL', have_vhost_kernel)
config_host_data.set('CONFIG_VHOST_USER', have_vhost_user)
config_host_data.set('CONFIG_VHOST_CRYPTO', have_vhost_user_crypto)
config_host_data.set('CONFIG_VHOST_VDPA', have_vhost_vdpa)
+config_host_data.set('CONFIG_VMNET', vmnet.found())
config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server)
config_host_data.set('CONFIG_PNG', png.found())
config_host_data.set('CONFIG_VNC', vnc.found())
@@ -3794,7 +3807,8 @@ summary(summary_info, bool_yn: true, section: 'Crypto')
# Libraries
summary_info = {}
if targetos == 'darwin'
- summary_info += {'Cocoa support': cocoa}
+ summary_info += {'Cocoa support': cocoa}
+ summary_info += {'vmnet.framework support': vmnet}
endif
summary_info += {'SDL support': sdl}
summary_info += {'SDL image support': sdl_image}
diff --git a/meson_options.txt b/meson_options.txt
index fcdc43f7db..2de94af037 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -197,6 +197,8 @@ option('netmap', type : 'feature', value : 'auto',
description: 'netmap network backend support')
option('vde', type : 'feature', value : 'auto',
description: 'vde network backend support')
+option('vmnet', type : 'feature', value : 'auto',
+ description: 'vmnet.framework network backend support')
option('virglrenderer', type : 'feature', value : 'auto',
description: 'virgl rendering support')
option('png', type : 'feature', value : 'auto',
diff --git a/net/clients.h b/net/clients.h
index 92f9b59aed..c9157789f2 100644
--- a/net/clients.h
+++ b/net/clients.h
@@ -63,4 +63,15 @@ int net_init_vhost_user(const Netdev *netdev, const char *name,
int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp);
+#ifdef CONFIG_VMNET
+int net_init_vmnet_host(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp);
+
+int net_init_vmnet_shared(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp);
+
+int net_init_vmnet_bridged(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp);
+#endif /* CONFIG_VMNET */
+
#endif /* QEMU_NET_CLIENTS_H */
diff --git a/net/meson.build b/net/meson.build
index c965e83b26..754e2d1d40 100644
--- a/net/meson.build
+++ b/net/meson.build
@@ -44,4 +44,11 @@ if have_vhost_net_vdpa
softmmu_ss.add(files('vhost-vdpa.c'))
endif
+vmnet_files = files(
+ 'vmnet-common.m',
+ 'vmnet-bridged.m',
+ 'vmnet-host.c',
+ 'vmnet-shared.c'
+)
+softmmu_ss.add(when: vmnet, if_true: vmnet_files)
subdir('can')
diff --git a/net/net.c b/net/net.c
index a094cf1d29..2db160e063 100644
--- a/net/net.c
+++ b/net/net.c
@@ -1020,6 +1020,11 @@ static int (* const net_client_init_fun[NET_CLIENT_DRIVER__MAX])(
#ifdef CONFIG_L2TPV3
[NET_CLIENT_DRIVER_L2TPV3] = net_init_l2tpv3,
#endif
+#ifdef CONFIG_VMNET
+ [NET_CLIENT_DRIVER_VMNET_HOST] = net_init_vmnet_host,
+ [NET_CLIENT_DRIVER_VMNET_SHARED] = net_init_vmnet_shared,
+ [NET_CLIENT_DRIVER_VMNET_BRIDGED] = net_init_vmnet_bridged,
+#endif /* CONFIG_VMNET */
};
@@ -1106,6 +1111,11 @@ void show_netdevs(void)
#ifdef CONFIG_VHOST_VDPA
"vhost-vdpa",
#endif
+#ifdef CONFIG_VMNET
+ "vmnet-host",
+ "vmnet-shared",
+ "vmnet-bridged",
+#endif
};
qemu_printf("Available netdev backend types:\n");
diff --git a/net/vmnet-bridged.m b/net/vmnet-bridged.m
new file mode 100644
index 0000000000..46d2282863
--- /dev/null
+++ b/net/vmnet-bridged.m
@@ -0,0 +1,152 @@
+/*
+ * vmnet-bridged.m
+ *
+ * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-types-net.h"
+#include "qapi/error.h"
+#include "clients.h"
+#include "vmnet_int.h"
+
+#include <vmnet/vmnet.h>
+
+
+static bool validate_ifname(const char *ifname)
+{
+ xpc_object_t shared_if_list = vmnet_copy_shared_interface_list();
+ bool match = false;
+ if (!xpc_array_get_count(shared_if_list)) {
+ goto done;
+ }
+
+ match = !xpc_array_apply(
+ shared_if_list,
+ ^bool(size_t index, xpc_object_t value) {
+ return strcmp(xpc_string_get_string_ptr(value), ifname) != 0;
+ });
+
+done:
+ xpc_release(shared_if_list);
+ return match;
+}
+
+
+static char* get_valid_ifnames()
+{
+ xpc_object_t shared_if_list = vmnet_copy_shared_interface_list();
+ __block char *if_list = NULL;
+ __block char *if_list_prev = NULL;
+
+ if (!xpc_array_get_count(shared_if_list)) {
+ goto done;
+ }
+
+ xpc_array_apply(
+ shared_if_list,
+ ^bool(size_t index, xpc_object_t value) {
+ /* build list of strings like "en0 en1 en2 " */
+ if_list = g_strconcat(xpc_string_get_string_ptr(value),
+ " ",
+ if_list_prev,
+ NULL);
+ g_free(if_list_prev);
+ if_list_prev = if_list;
+ return true;
+ });
+
+done:
+ xpc_release(shared_if_list);
+ return if_list;
+}
+
+
+static bool validate_options(const Netdev *netdev, Error **errp)
+{
+ const NetdevVmnetBridgedOptions *options = &(netdev->u.vmnet_bridged);
+ char* if_list;
+
+ if (!validate_ifname(options->ifname)) {
+ if_list = get_valid_ifnames();
+ if (if_list) {
+ error_setg(errp,
+ "unsupported ifname '%s', expected one of [ %s]",
+ options->ifname,
+ if_list);
+ g_free(if_list);
+ } else {
+ error_setg(errp,
+ "unsupported ifname '%s', no supported "
+ "interfaces available",
+ options->ifname);
+ }
+ return false;
+ }
+
+#if !defined(MAC_OS_VERSION_11_0) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_11_0
+ if (options->has_isolated) {
+ error_setg(errp,
+ "vmnet-bridged.isolated feature is "
+ "unavailable: outdated vmnet.framework API");
+ return false;
+ }
+#endif
+ return true;
+}
+
+
+static xpc_object_t build_if_desc(const Netdev *netdev)
+{
+ const NetdevVmnetBridgedOptions *options = &(netdev->u.vmnet_bridged);
+ xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0);
+
+ xpc_dictionary_set_uint64(if_desc,
+ vmnet_operation_mode_key,
+ VMNET_BRIDGED_MODE
+ );
+
+ xpc_dictionary_set_string(if_desc,
+ vmnet_shared_interface_name_key,
+ options->ifname);
+
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+ xpc_dictionary_set_bool(if_desc,
+ vmnet_enable_isolation_key,
+ options->isolated);
+#endif
+ return if_desc;
+}
+
+
+static NetClientInfo net_vmnet_bridged_info = {
+ .type = NET_CLIENT_DRIVER_VMNET_BRIDGED,
+ .size = sizeof(VmnetState),
+ .receive = vmnet_receive_common,
+ .cleanup = vmnet_cleanup_common,
+};
+
+
+int net_init_vmnet_bridged(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp)
+{
+ NetClientState *nc = qemu_new_net_client(&net_vmnet_bridged_info,
+ peer, "vmnet-bridged", name);
+ xpc_object_t if_desc;
+ int result = -1;
+
+ if (!validate_options(netdev, errp)) {
+ return result;
+ }
+
+ if_desc = build_if_desc(netdev);
+ result = vmnet_if_create(nc, if_desc, errp);
+ xpc_release(if_desc);
+ return result;
+}
diff --git a/net/vmnet-common.m b/net/vmnet-common.m
new file mode 100644
index 0000000000..2cb60b9ddd
--- /dev/null
+++ b/net/vmnet-common.m
@@ -0,0 +1,378 @@
+/*
+ * vmnet-common.m - network client wrapper for Apple vmnet.framework
+ *
+ * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
+ * Copyright(c) 2021 Phillip Tennen <phillip@axleos.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
+#include "qemu/log.h"
+#include "qapi/qapi-types-net.h"
+#include "vmnet_int.h"
+#include "clients.h"
+#include "qemu/error-report.h"
+#include "qapi/error.h"
+
+#include <vmnet/vmnet.h>
+#include <dispatch/dispatch.h>
+
+
+static void vmnet_send_completed(NetClientState *nc, ssize_t len);
+
+
+const char *vmnet_status_map_str(vmnet_return_t status)
+{
+ switch (status) {
+ case VMNET_SUCCESS:
+ return "success";
+ case VMNET_FAILURE:
+ return "general failure (possibly not enough privileges)";
+ case VMNET_MEM_FAILURE:
+ return "memory allocation failure";
+ case VMNET_INVALID_ARGUMENT:
+ return "invalid argument specified";
+ case VMNET_SETUP_INCOMPLETE:
+ return "interface setup is not complete";
+ case VMNET_INVALID_ACCESS:
+ return "invalid access, permission denied";
+ case VMNET_PACKET_TOO_BIG:
+ return "packet size is larger than MTU";
+ case VMNET_BUFFER_EXHAUSTED:
+ return "buffers exhausted in kernel";
+ case VMNET_TOO_MANY_PACKETS:
+ return "packet count exceeds limit";
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+ case VMNET_SHARING_SERVICE_BUSY:
+ return "conflict, sharing service is in use";
+#endif
+ default:
+ return "unknown vmnet error";
+ }
+}
+
+
+/**
+ * Write packets from QEMU to vmnet interface.
+ *
+ * vmnet.framework supports iov, but writing more than
+ * one iov into vmnet interface fails with
+ * 'VMNET_INVALID_ARGUMENT'. Collecting provided iovs into
+ * one and passing it to vmnet works fine. That's the
+ * reason why receive_iov() left unimplemented. But it still
+ * works with good performance having .receive() only.
+ */
+ssize_t vmnet_receive_common(NetClientState *nc,
+ const uint8_t *buf,
+ size_t size)
+{
+ VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
+ struct vmpktdesc packet;
+ struct iovec iov;
+ int pkt_cnt;
+ vmnet_return_t if_status;
+
+ if (size > s->max_packet_size) {
+ warn_report("vmnet: packet is too big, %zu > %" PRIu64,
+ packet.vm_pkt_size,
+ s->max_packet_size);
+ return -1;
+ }
+
+ iov.iov_base = (char *) buf;
+ iov.iov_len = size;
+
+ packet.vm_pkt_iovcnt = 1;
+ packet.vm_flags = 0;
+ packet.vm_pkt_size = size;
+ packet.vm_pkt_iov = &iov;
+ pkt_cnt = 1;
+
+ if_status = vmnet_write(s->vmnet_if, &packet, &pkt_cnt);
+ if (if_status != VMNET_SUCCESS) {
+ error_report("vmnet: write error: %s\n",
+ vmnet_status_map_str(if_status));
+ return -1;
+ }
+
+ if (pkt_cnt) {
+ return size;
+ }
+ return 0;
+}
+
+
+/**
+ * Read packets from vmnet interface and write them
+ * to temporary buffers in VmnetState.
+ *
+ * Returns read packets number (may be 0) on success,
+ * -1 on error
+ */
+static int vmnet_read_packets(VmnetState *s)
+{
+ assert(s->packets_send_current_pos == s->packets_send_end_pos);
+
+ struct vmpktdesc *packets = s->packets_buf;
+ vmnet_return_t status;
+ int i;
+
+ /* Read as many packets as present */
+ s->packets_send_current_pos = 0;
+ s->packets_send_end_pos = VMNET_PACKETS_LIMIT;
+ for (i = 0; i < s->packets_send_end_pos; ++i) {
+ packets[i].vm_pkt_size = s->max_packet_size;
+ packets[i].vm_pkt_iovcnt = 1;
+ packets[i].vm_flags = 0;
+ }
+
+ status = vmnet_read(s->vmnet_if, packets, &s->packets_send_end_pos);
+ if (status != VMNET_SUCCESS) {
+ error_printf("vmnet: read failed: %s\n",
+ vmnet_status_map_str(status));
+ s->packets_send_current_pos = 0;
+ s->packets_send_end_pos = 0;
+ return -1;
+ }
+ return s->packets_send_end_pos;
+}
+
+
+/**
+ * Write packets from temporary buffers in VmnetState
+ * to QEMU.
+ */
+static void vmnet_write_packets_to_qemu(VmnetState *s)
+{
+ while (s->packets_send_current_pos < s->packets_send_end_pos) {
+ ssize_t size = qemu_send_packet_async(&s->nc,
+ s->iov_buf[s->packets_send_current_pos].iov_base,
+ s->packets_buf[s->packets_send_current_pos].vm_pkt_size,
+ vmnet_send_completed);
+
+ if (size == 0) {
+ /* QEMU is not ready to consume more packets -
+ * stop and wait for completion callback call */
+ return;
+ }
+ ++s->packets_send_current_pos;
+ }
+}
+
+
+/**
+ * Bottom half callback that transfers packets from vmnet interface
+ * to QEMU.
+ *
+ * The process of transferring packets is three-staged:
+ * 1. Handle vmnet event;
+ * 2. Read packets from vmnet interface into temporary buffer;
+ * 3. Write packets from temporary buffer to QEMU.
+ *
+ * QEMU may suspend this process on the last stage, returning 0 from
+ * qemu_send_packet_async function. If this happens, we should
+ * respectfully wait until it is ready to consume more packets,
+ * write left ones in temporary buffer and only after this
+ * continue reading more packets from vmnet interface.
+ *
+ * Packets to be transferred are stored into packets_buf,
+ * in the window [packets_send_current_pos..packets_send_end_pos)
+ * including current_pos, excluding end_pos.
+ *
+ * Thus, if QEMU is not ready, buffer is not read and
+ * packets_send_current_pos < packets_send_end_pos.
+ */
+static void vmnet_send_bh(void *opaque)
+{
+ NetClientState *nc = (NetClientState *) opaque;
+ VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
+
+ /*
+ * Do nothing if QEMU is not ready - wait
+ * for completion callback invocation
+ */
+ if (s->packets_send_current_pos < s->packets_send_end_pos) {
+ return;
+ }
+
+ /* Read packets from vmnet interface */
+ if (vmnet_read_packets(s) > 0) {
+ /* Send them to QEMU */
+ vmnet_write_packets_to_qemu(s);
+ }
+}
+
+
+/**
+ * Completion callback to be invoked by QEMU when it becomes
+ * ready to consume more packets.
+ */
+static void vmnet_send_completed(NetClientState *nc, ssize_t len)
+{
+ VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
+
+ /* Callback is invoked eq queued packet is sent */
+ ++s->packets_send_current_pos;
+
+ /* Complete sending packets left in VmnetState buffers */
+ vmnet_write_packets_to_qemu(s);
+
+ /* And read new ones from vmnet if VmnetState buffer is ready */
+ if (s->packets_send_current_pos < s->packets_send_end_pos) {
+ qemu_bh_schedule(s->send_bh);
+ }
+}
+
+
+static void vmnet_bufs_init(VmnetState *s)
+{
+ struct vmpktdesc *packets = s->packets_buf;
+ struct iovec *iov = s->iov_buf;
+ int i;
+
+ for (i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
+ iov[i].iov_len = s->max_packet_size;
+ iov[i].iov_base = g_malloc0(iov[i].iov_len);
+ packets[i].vm_pkt_iov = iov + i;
+ }
+}
+
+
+int vmnet_if_create(NetClientState *nc,
+ xpc_object_t if_desc,
+ Error **errp)
+{
+ VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
+ dispatch_semaphore_t if_created_sem = dispatch_semaphore_create(0);
+ __block vmnet_return_t if_status;
+
+ s->if_queue = dispatch_queue_create(
+ "org.qemu.vmnet.if_queue",
+ DISPATCH_QUEUE_SERIAL
+ );
+
+ xpc_dictionary_set_bool(
+ if_desc,
+ vmnet_allocate_mac_address_key,
+ false
+ );
+
+#ifdef DEBUG
+ qemu_log("vmnet.start.interface_desc:\n");
+ xpc_dictionary_apply(if_desc,
+ ^bool(const char *k, xpc_object_t v) {
+ char *desc = xpc_copy_description(v);
+ qemu_log(" %s=%s\n", k, desc);
+ free(desc);
+ return true;
+ });
+#endif /* DEBUG */
+
+ s->vmnet_if = vmnet_start_interface(
+ if_desc,
+ s->if_queue,
+ ^(vmnet_return_t status, xpc_object_t interface_param) {
+ if_status = status;
+ if (status != VMNET_SUCCESS || !interface_param) {
+ dispatch_semaphore_signal(if_created_sem);
+ return;
+ }
+
+#ifdef DEBUG
+ qemu_log("vmnet.start.interface_param:\n");
+ xpc_dictionary_apply(interface_param,
+ ^bool(const char *k, xpc_object_t v) {
+ char *desc = xpc_copy_description(v);
+ qemu_log(" %s=%s\n", k, desc);
+ free(desc);
+ return true;
+ });
+#endif /* DEBUG */
+
+ s->mtu = xpc_dictionary_get_uint64(
+ interface_param,
+ vmnet_mtu_key);
+ s->max_packet_size = xpc_dictionary_get_uint64(
+ interface_param,
+ vmnet_max_packet_size_key);
+
+ dispatch_semaphore_signal(if_created_sem);
+ });
+
+ if (s->vmnet_if == NULL) {
+ dispatch_release(s->if_queue);
+ dispatch_release(if_created_sem);
+ error_setg(errp,
+ "unable to create interface with requested params");
+ return -1;
+ }
+
+ dispatch_semaphore_wait(if_created_sem, DISPATCH_TIME_FOREVER);
+ dispatch_release(if_created_sem);
+
+ if (if_status != VMNET_SUCCESS) {
+ dispatch_release(s->if_queue);
+ error_setg(errp,
+ "cannot create vmnet interface: %s",
+ vmnet_status_map_str(if_status));
+ return -1;
+ }
+
+ s->send_bh = aio_bh_new(qemu_get_aio_context(), vmnet_send_bh, nc);
+ vmnet_bufs_init(s);
+
+ s->packets_send_current_pos = 0;
+ s->packets_send_end_pos = 0;
+
+ vmnet_interface_set_event_callback(
+ s->vmnet_if,
+ VMNET_INTERFACE_PACKETS_AVAILABLE,
+ s->if_queue,
+ ^(interface_event_t event_id, xpc_object_t event) {
+ assert(event_id == VMNET_INTERFACE_PACKETS_AVAILABLE);
+ /*
+ * This function is being called from a non qemu thread, so
+ * we only schedule a BH, and do the rest of the io completion
+ * handling from vmnet_send_bh() which runs in a qemu context.
+ */
+ qemu_bh_schedule(s->send_bh);
+ });
+
+ return 0;
+}
+
+
+void vmnet_cleanup_common(NetClientState *nc)
+{
+ VmnetState *s = DO_UPCAST(VmnetState, nc, nc);
+ dispatch_semaphore_t if_stopped_sem;
+
+ if (s->vmnet_if == NULL) {
+ return;
+ }
+
+ if_stopped_sem = dispatch_semaphore_create(0);
+ vmnet_stop_interface(
+ s->vmnet_if,
+ s->if_queue,
+ ^(vmnet_return_t status) {
+ assert(status == VMNET_SUCCESS);
+ dispatch_semaphore_signal(if_stopped_sem);
+ });
+ dispatch_semaphore_wait(if_stopped_sem, DISPATCH_TIME_FOREVER);
+
+ qemu_purge_queued_packets(nc);
+
+ qemu_bh_delete(s->send_bh);
+ dispatch_release(if_stopped_sem);
+ dispatch_release(s->if_queue);
+
+ for (int i = 0; i < VMNET_PACKETS_LIMIT; ++i) {
+ g_free(s->iov_buf[i].iov_base);
+ }
+}
diff --git a/net/vmnet-host.c b/net/vmnet-host.c
new file mode 100644
index 0000000000..05f8d78864
--- /dev/null
+++ b/net/vmnet-host.c
@@ -0,0 +1,128 @@
+/*
+ * vmnet-host.c
+ *
+ * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/uuid.h"
+#include "qapi/qapi-types-net.h"
+#include "qapi/error.h"
+#include "clients.h"
+#include "vmnet_int.h"
+
+#include <vmnet/vmnet.h>
+
+
+static bool validate_options(const Netdev *netdev, Error **errp)
+{
+ const NetdevVmnetHostOptions *options = &(netdev->u.vmnet_host);
+
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+
+ QemuUUID net_uuid;
+ if (options->has_net_uuid &&
+ qemu_uuid_parse(options->net_uuid, &net_uuid) < 0) {
+ error_setg(errp, "Invalid UUID provided in 'net-uuid'");
+ return false;
+ }
+#else
+ if (options->has_isolated) {
+ error_setg(errp,
+ "vmnet-host.isolated feature is "
+ "unavailable: outdated vmnet.framework API");
+ return false;
+ }
+
+ if (options->has_net_uuid) {
+ error_setg(errp,
+ "vmnet-host.net-uuid feature is "
+ "unavailable: outdated vmnet.framework API");
+ return false;
+ }
+#endif
+
+ if ((options->has_start_address ||
+ options->has_end_address ||
+ options->has_subnet_mask) &&
+ !(options->has_start_address &&
+ options->has_end_address &&
+ options->has_subnet_mask)) {
+ error_setg(errp,
+ "'start-address', 'end-address', 'subnet-mask' "
+ "should be provided together");
+ return false;
+ }
+
+ return true;
+}
+
+static xpc_object_t build_if_desc(const Netdev *netdev)
+{
+ const NetdevVmnetHostOptions *options = &(netdev->u.vmnet_host);
+ xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0);
+
+ xpc_dictionary_set_uint64(if_desc,
+ vmnet_operation_mode_key,
+ VMNET_HOST_MODE);
+
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+
+ xpc_dictionary_set_bool(if_desc,
+ vmnet_enable_isolation_key,
+ options->isolated);
+
+ QemuUUID net_uuid;
+ if (options->has_net_uuid) {
+ qemu_uuid_parse(options->net_uuid, &net_uuid);
+ xpc_dictionary_set_uuid(if_desc,
+ vmnet_network_identifier_key,
+ net_uuid.data);
+ }
+#endif
+
+ if (options->has_start_address) {
+ xpc_dictionary_set_string(if_desc,
+ vmnet_start_address_key,
+ options->start_address);
+ xpc_dictionary_set_string(if_desc,
+ vmnet_end_address_key,
+ options->end_address);
+ xpc_dictionary_set_string(if_desc,
+ vmnet_subnet_mask_key,
+ options->subnet_mask);
+ }
+
+ return if_desc;
+}
+
+static NetClientInfo net_vmnet_host_info = {
+ .type = NET_CLIENT_DRIVER_VMNET_HOST,
+ .size = sizeof(VmnetState),
+ .receive = vmnet_receive_common,
+ .cleanup = vmnet_cleanup_common,
+};
+
+int net_init_vmnet_host(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp)
+{
+ NetClientState *nc = qemu_new_net_client(&net_vmnet_host_info,
+ peer, "vmnet-host", name);
+ xpc_object_t if_desc;
+ int result = -1;
+
+ if (!validate_options(netdev, errp)) {
+ return result;
+ }
+
+ if_desc = build_if_desc(netdev);
+ result = vmnet_if_create(nc, if_desc, errp);
+ xpc_release(if_desc);
+ return result;
+}
diff --git a/net/vmnet-shared.c b/net/vmnet-shared.c
new file mode 100644
index 0000000000..18cadc72bd
--- /dev/null
+++ b/net/vmnet-shared.c
@@ -0,0 +1,114 @@
+/*
+ * vmnet-shared.c
+ *
+ * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/qapi-types-net.h"
+#include "qapi/error.h"
+#include "vmnet_int.h"
+#include "clients.h"
+
+#include <vmnet/vmnet.h>
+
+
+static bool validate_options(const Netdev *netdev, Error **errp)
+{
+ const NetdevVmnetSharedOptions *options = &(netdev->u.vmnet_shared);
+
+#if !defined(MAC_OS_VERSION_11_0) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_11_0
+ if (options->has_isolated) {
+ error_setg(errp,
+ "vmnet-shared.isolated feature is "
+ "unavailable: outdated vmnet.framework API");
+ return false;
+ }
+#endif
+
+ if ((options->has_start_address ||
+ options->has_end_address ||
+ options->has_subnet_mask) &&
+ !(options->has_start_address &&
+ options->has_end_address &&
+ options->has_subnet_mask)) {
+ error_setg(errp,
+ "'start-address', 'end-address', 'subnet-mask' "
+ "should be provided together"
+ );
+ return false;
+ }
+
+ return true;
+}
+
+static xpc_object_t build_if_desc(const Netdev *netdev)
+{
+ const NetdevVmnetSharedOptions *options = &(netdev->u.vmnet_shared);
+ xpc_object_t if_desc = xpc_dictionary_create(NULL, NULL, 0);
+
+ xpc_dictionary_set_uint64(
+ if_desc,
+ vmnet_operation_mode_key,
+ VMNET_SHARED_MODE
+ );
+
+ if (options->has_nat66_prefix) {
+ xpc_dictionary_set_string(if_desc,
+ vmnet_nat66_prefix_key,
+ options->nat66_prefix);
+ }
+
+ if (options->has_start_address) {
+ xpc_dictionary_set_string(if_desc,
+ vmnet_start_address_key,
+ options->start_address);
+ xpc_dictionary_set_string(if_desc,
+ vmnet_end_address_key,
+ options->end_address);
+ xpc_dictionary_set_string(if_desc,
+ vmnet_subnet_mask_key,
+ options->subnet_mask);
+ }
+
+#if defined(MAC_OS_VERSION_11_0) && \
+ MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_VERSION_11_0
+ xpc_dictionary_set_bool(
+ if_desc,
+ vmnet_enable_isolation_key,
+ options->isolated
+ );
+#endif
+
+ return if_desc;
+}
+
+static NetClientInfo net_vmnet_shared_info = {
+ .type = NET_CLIENT_DRIVER_VMNET_SHARED,
+ .size = sizeof(VmnetState),
+ .receive = vmnet_receive_common,
+ .cleanup = vmnet_cleanup_common,
+};
+
+int net_init_vmnet_shared(const Netdev *netdev, const char *name,
+ NetClientState *peer, Error **errp)
+{
+ NetClientState *nc = qemu_new_net_client(&net_vmnet_shared_info,
+ peer, "vmnet-shared", name);
+ xpc_object_t if_desc;
+ int result = -1;
+
+ if (!validate_options(netdev, errp)) {
+ return result;
+ }
+
+ if_desc = build_if_desc(netdev);
+ result = vmnet_if_create(nc, if_desc, errp);
+ xpc_release(if_desc);
+ return result;
+}
diff --git a/net/vmnet_int.h b/net/vmnet_int.h
new file mode 100644
index 0000000000..adf6e8c20d
--- /dev/null
+++ b/net/vmnet_int.h
@@ -0,0 +1,63 @@
+/*
+ * vmnet_int.h
+ *
+ * Copyright(c) 2022 Vladislav Yaroshchuk <vladislav.yaroshchuk@jetbrains.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+#ifndef VMNET_INT_H
+#define VMNET_INT_H
+
+#include "qemu/osdep.h"
+#include "vmnet_int.h"
+#include "clients.h"
+
+#include <vmnet/vmnet.h>
+#include <dispatch/dispatch.h>
+
+/**
+ * From vmnet.framework documentation
+ *
+ * Each read/write call allows up to 200 packets to be
+ * read or written for a maximum of 256KB.
+ *
+ * Each packet written should be a complete
+ * ethernet frame.
+ *
+ * https://developer.apple.com/documentation/vmnet
+ */
+#define VMNET_PACKETS_LIMIT 200
+
+typedef struct VmnetState {
+ NetClientState nc;
+ interface_ref vmnet_if;
+
+ uint64_t mtu;
+ uint64_t max_packet_size;
+
+ dispatch_queue_t if_queue;
+
+ QEMUBH *send_bh;
+
+ struct vmpktdesc packets_buf[VMNET_PACKETS_LIMIT];
+ int packets_send_current_pos;
+ int packets_send_end_pos;
+
+ struct iovec iov_buf[VMNET_PACKETS_LIMIT];
+} VmnetState;
+
+const char *vmnet_status_map_str(vmnet_return_t status);
+
+int vmnet_if_create(NetClientState *nc,
+ xpc_object_t if_desc,
+ Error **errp);
+
+ssize_t vmnet_receive_common(NetClientState *nc,
+ const uint8_t *buf,
+ size_t size);
+
+void vmnet_cleanup_common(NetClientState *nc);
+
+#endif /* VMNET_INT_H */
diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img
index 0ec2d96b8f..392bce072b 100644
--- a/pc-bios/hppa-firmware.img
+++ b/pc-bios/hppa-firmware.img
Binary files differ
diff --git a/qapi/net.json b/qapi/net.json
index b92f3f5fb4..d6f7cfd4d6 100644
--- a/qapi/net.json
+++ b/qapi/net.json
@@ -453,6 +453,120 @@
'*queues': 'int' } }
##
+# @NetdevVmnetHostOptions:
+#
+# vmnet (host mode) network backend.
+#
+# Allows the vmnet interface to communicate with other vmnet
+# interfaces that are in host mode and also with the host.
+#
+# @start-address: The starting IPv4 address to use for the interface.
+# Must be in the private IP range (RFC 1918). Must be
+# specified along with @end-address and @subnet-mask.
+# This address is used as the gateway address. The
+# subsequent address up to and including end-address are
+# placed in the DHCP pool.
+#
+# @end-address: The DHCP IPv4 range end address to use for the
+# interface. Must be in the private IP range (RFC 1918).
+# Must be specified along with @start-address and
+# @subnet-mask.
+#
+# @subnet-mask: The IPv4 subnet mask to use on the interface. Must
+# be specified along with @start-address and @subnet-mask.
+#
+# @isolated: Enable isolation for this interface. Interface isolation
+# ensures that vmnet interface is not able to communicate
+# with any other vmnet interfaces. Only communication with
+# host is allowed. Requires at least macOS Big Sur 11.0.
+#
+# @net-uuid: The identifier (UUID) to uniquely identify the isolated
+# network vmnet interface should be added to. If
+# set, no DHCP service is provided for this interface and
+# network communication is allowed only with other interfaces
+# added to this network identified by the UUID. Requires
+# at least macOS Big Sur 11.0.
+#
+# Since: 7.1
+##
+{ 'struct': 'NetdevVmnetHostOptions',
+ 'data': {
+ '*start-address': 'str',
+ '*end-address': 'str',
+ '*subnet-mask': 'str',
+ '*isolated': 'bool',
+ '*net-uuid': 'str' },
+ 'if': 'CONFIG_VMNET' }
+
+##
+# @NetdevVmnetSharedOptions:
+#
+# vmnet (shared mode) network backend.
+#
+# Allows traffic originating from the vmnet interface to reach the
+# Internet through a network address translator (NAT).
+# The vmnet interface can communicate with the host and with
+# other shared mode interfaces on the same subnet. If no DHCP
+# settings, subnet mask and IPv6 prefix specified, the interface can
+# communicate with any of other interfaces in shared mode.
+#
+# @start-address: The starting IPv4 address to use for the interface.
+# Must be in the private IP range (RFC 1918). Must be
+# specified along with @end-address and @subnet-mask.
+# This address is used as the gateway address. The
+# subsequent address up to and including end-address are
+# placed in the DHCP pool.
+#
+# @end-address: The DHCP IPv4 range end address to use for the
+# interface. Must be in the private IP range (RFC 1918).
+# Must be specified along with @start-address and @subnet-mask.
+#
+# @subnet-mask: The IPv4 subnet mask to use on the interface. Must
+# be specified along with @start-address and @subnet-mask.
+#
+# @isolated: Enable isolation for this interface. Interface isolation
+# ensures that vmnet interface is not able to communicate
+# with any other vmnet interfaces. Only communication with
+# host is allowed. Requires at least macOS Big Sur 11.0.
+#
+# @nat66-prefix: The IPv6 prefix to use into guest network. Must be a
+# unique local address i.e. start with fd00::/8 and have
+# length of 64.
+#
+# Since: 7.1
+##
+{ 'struct': 'NetdevVmnetSharedOptions',
+ 'data': {
+ '*start-address': 'str',
+ '*end-address': 'str',
+ '*subnet-mask': 'str',
+ '*isolated': 'bool',
+ '*nat66-prefix': 'str' },
+ 'if': 'CONFIG_VMNET' }
+
+##
+# @NetdevVmnetBridgedOptions:
+#
+# vmnet (bridged mode) network backend.
+#
+# Bridges the vmnet interface with a physical network interface.
+#
+# @ifname: The name of the physical interface to be bridged.
+#
+# @isolated: Enable isolation for this interface. Interface isolation
+# ensures that vmnet interface is not able to communicate
+# with any other vmnet interfaces. Only communication with
+# host is allowed. Requires at least macOS Big Sur 11.0.
+#
+# Since: 7.1
+##
+{ 'struct': 'NetdevVmnetBridgedOptions',
+ 'data': {
+ 'ifname': 'str',
+ '*isolated': 'bool' },
+ 'if': 'CONFIG_VMNET' }
+
+##
# @NetClientDriver:
#
# Available netdev drivers.
@@ -460,10 +574,16 @@
# Since: 2.7
#
# @vhost-vdpa since 5.1
+# @vmnet-host since 7.1
+# @vmnet-shared since 7.1
+# @vmnet-bridged since 7.1
##
{ 'enum': 'NetClientDriver',
'data': [ 'none', 'nic', 'user', 'tap', 'l2tpv3', 'socket', 'vde',
- 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa' ] }
+ 'bridge', 'hubport', 'netmap', 'vhost-user', 'vhost-vdpa',
+ { 'name': 'vmnet-host', 'if': 'CONFIG_VMNET' },
+ { 'name': 'vmnet-shared', 'if': 'CONFIG_VMNET' },
+ { 'name': 'vmnet-bridged', 'if': 'CONFIG_VMNET' }] }
##
# @Netdev:
@@ -477,6 +597,9 @@
# Since: 1.2
#
# 'l2tpv3' - since 2.1
+# 'vmnet-host' - since 7.1
+# 'vmnet-shared' - since 7.1
+# 'vmnet-bridged' - since 7.1
##
{ 'union': 'Netdev',
'base': { 'id': 'str', 'type': 'NetClientDriver' },
@@ -492,7 +615,13 @@
'hubport': 'NetdevHubPortOptions',
'netmap': 'NetdevNetmapOptions',
'vhost-user': 'NetdevVhostUserOptions',
- 'vhost-vdpa': 'NetdevVhostVDPAOptions' } }
+ 'vhost-vdpa': 'NetdevVhostVDPAOptions',
+ 'vmnet-host': { 'type': 'NetdevVmnetHostOptions',
+ 'if': 'CONFIG_VMNET' },
+ 'vmnet-shared': { 'type': 'NetdevVmnetSharedOptions',
+ 'if': 'CONFIG_VMNET' },
+ 'vmnet-bridged': { 'type': 'NetdevVmnetBridgedOptions',
+ 'if': 'CONFIG_VMNET' } } }
##
# @RxState:
diff --git a/qemu-options.hx b/qemu-options.hx
index b484640067..a664baaa18 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -2796,6 +2796,25 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev,
"-netdev vhost-vdpa,id=str,vhostdev=/path/to/dev\n"
" configure a vhost-vdpa network,Establish a vhost-vdpa netdev\n"
#endif
+#ifdef CONFIG_VMNET
+ "-netdev vmnet-host,id=str[,isolated=on|off][,net-uuid=uuid]\n"
+ " [,start-address=addr,end-address=addr,subnet-mask=mask]\n"
+ " configure a vmnet network backend in host mode with ID 'str',\n"
+ " isolate this interface from others with 'isolated',\n"
+ " configure the address range and choose a subnet mask,\n"
+ " specify network UUID 'uuid' to disable DHCP and interact with\n"
+ " vmnet-host interfaces within this isolated network\n"
+ "-netdev vmnet-shared,id=str[,isolated=on|off][,nat66-prefix=addr]\n"
+ " [,start-address=addr,end-address=addr,subnet-mask=mask]\n"
+ " configure a vmnet network backend in shared mode with ID 'str',\n"
+ " configure the address range and choose a subnet mask,\n"
+ " set IPv6 ULA prefix (of length 64) to use for internal network,\n"
+ " isolate this interface from others with 'isolated'\n"
+ "-netdev vmnet-bridged,id=str,ifname=name[,isolated=on|off]\n"
+ " configure a vmnet network backend in bridged mode with ID 'str',\n"
+ " use 'ifname=name' to select a physical network interface to be bridged,\n"
+ " isolate this interface from others with 'isolated'\n"
+#endif
"-netdev hubport,id=str,hubid=n[,netdev=nd]\n"
" configure a hub port on the hub with ID 'n'\n", QEMU_ARCH_ALL)
DEF("nic", HAS_ARG, QEMU_OPTION_nic,
@@ -2815,6 +2834,9 @@ DEF("nic", HAS_ARG, QEMU_OPTION_nic,
#ifdef CONFIG_POSIX
"vhost-user|"
#endif
+#ifdef CONFIG_VMNET
+ "vmnet-host|vmnet-shared|vmnet-bridged|"
+#endif
"socket][,option][,...][mac=macaddr]\n"
" initialize an on-board / default host NIC (using MAC address\n"
" macaddr) and connect it to the given host network backend\n"
@@ -2837,6 +2859,9 @@ DEF("net", HAS_ARG, QEMU_OPTION_net,
#ifdef CONFIG_NETMAP
"netmap|"
#endif
+#ifdef CONFIG_VMNET
+ "vmnet-host|vmnet-shared|vmnet-bridged|"
+#endif
"socket][,option][,option][,...]\n"
" old way to initialize a host network interface\n"
" (use the -netdev option if possible instead)\n", QEMU_ARCH_ALL)
diff --git a/roms/seabios-hppa b/roms/seabios-hppa
-Subproject bf3404006fd2c832857eb57e6f853862f97dace
+Subproject 17ca7a9998c1755d42321cfc0afb5f480f5a58f
diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh
index 7fc56d3f4a..731e5ea1cf 100644
--- a/scripts/meson-buildoptions.sh
+++ b/scripts/meson-buildoptions.sh
@@ -156,6 +156,7 @@ meson_options_help() {
printf "%s\n" ' vhost-kernel vhost kernel backend support'
printf "%s\n" ' vhost-net vhost-net kernel acceleration support'
printf "%s\n" ' vhost-user vhost-user backend support'
+ printf "%s\n" ' vmnet vmnet.framework network backend support'
printf "%s\n" ' vhost-user-blk-server'
printf "%s\n" ' build vhost-user-blk server'
printf "%s\n" ' vhost-vdpa vhost-vdpa kernel backend support'
diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h
index db03d6a7e1..d9b678c2f1 100644
--- a/target/arm/cpregs.h
+++ b/target/arm/cpregs.h
@@ -461,4 +461,28 @@ static inline bool cp_access_ok(int current_el,
/* Raw read of a coprocessor register (as needed for migration, etc) */
uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
+/*
+ * Return true if the cp register encoding is in the "feature ID space" as
+ * defined by FEAT_IDST (and thus should be reported with ER_ELx.EC
+ * as EC_SYSTEMREGISTERTRAP rather than EC_UNCATEGORIZED).
+ */
+static inline bool arm_cpreg_encoding_in_idspace(uint8_t opc0, uint8_t opc1,
+ uint8_t opc2,
+ uint8_t crn, uint8_t crm)
+{
+ return opc0 == 3 && (opc1 == 0 || opc1 == 1 || opc1 == 3) &&
+ crn == 0 && crm < 8;
+}
+
+/*
+ * As arm_cpreg_encoding_in_idspace(), but take the encoding from an
+ * ARMCPRegInfo.
+ */
+static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri)
+{
+ return ri->state == ARM_CP_STATE_AA64 &&
+ arm_cpreg_encoding_in_idspace(ri->opc0, ri->opc1, ri->opc2,
+ ri->crn, ri->crm);
+}
+
#endif /* TARGET_ARM_CPREGS_H */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 029f644768..d2bd74c2ed 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -201,9 +201,11 @@ static void arm_cpu_reset(DeviceState *dev)
/* Trap on btype=3 for PACIxSP. */
env->cp15.sctlr_el[1] |= SCTLR_BT0;
/* and to the FP/Neon instructions */
- env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR_EL1, FPEN, 3);
/* and to the SVE instructions */
- env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR_EL1, ZEN, 3);
/* with reasonable vector length */
if (cpu_isar_feature(aa64_sve, cpu)) {
env->vfp.zcr_el[1] =
@@ -252,7 +254,10 @@ static void arm_cpu_reset(DeviceState *dev)
} else {
#if defined(CONFIG_USER_ONLY)
/* Userspace expects access to cp10 and cp11 for FP/Neon */
- env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR, CP10, 3);
+ env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
+ CPACR, CP11, 3);
#endif
}
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 18ca61e8e2..c1865ad5da 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -362,6 +362,7 @@ typedef struct CPUArchState {
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
uint64_t hcr_el2; /* Hypervisor configuration register */
+ uint64_t hcrx_el2; /* Extended Hypervisor configuration register */
uint64_t scr_el3; /* Secure configuration register. */
union { /* Fault status registers. */
struct {
@@ -965,6 +966,7 @@ struct ArchCPU {
uint64_t id_aa64dfr0;
uint64_t id_aa64dfr1;
uint64_t id_aa64zfr0;
+ uint64_t reset_pmcr_el0;
} isar;
uint64_t midr;
uint32_t revidr;
@@ -1002,6 +1004,7 @@ struct ArchCPU {
int gic_num_lrs; /* number of list registers */
int gic_vpribits; /* number of virtual priority bits */
int gic_vprebits; /* number of virtual preemption bits */
+ int gic_pribits; /* number of physical priority bits */
/* Whether the cfgend input is high (i.e. this CPU should reset into
* big-endian mode). This setting isn't used directly: instead it modifies
@@ -1258,11 +1261,45 @@ void pmu_init(ARMCPU *cpu);
#define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */
#define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */
-#define CPTR_TCPAC (1U << 31)
-#define CPTR_TTA (1U << 20)
-#define CPTR_TFP (1U << 10)
-#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
-#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
+/* Bit definitions for CPACR (AArch32 only) */
+FIELD(CPACR, CP10, 20, 2)
+FIELD(CPACR, CP11, 22, 2)
+FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */
+FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */
+FIELD(CPACR, ASEDIS, 31, 1)
+
+/* Bit definitions for CPACR_EL1 (AArch64 only) */
+FIELD(CPACR_EL1, ZEN, 16, 2)
+FIELD(CPACR_EL1, FPEN, 20, 2)
+FIELD(CPACR_EL1, SMEN, 24, 2)
+FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */
+
+/* Bit definitions for HCPTR (AArch32 only) */
+FIELD(HCPTR, TCP10, 10, 1)
+FIELD(HCPTR, TCP11, 11, 1)
+FIELD(HCPTR, TASE, 15, 1)
+FIELD(HCPTR, TTA, 20, 1)
+FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */
+FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */
+
+/* Bit definitions for CPTR_EL2 (AArch64 only) */
+FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */
+FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */
+FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */
+FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */
+FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */
+FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */
+FIELD(CPTR_EL2, TTA, 28, 1)
+FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */
+FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */
+
+/* Bit definitions for CPTR_EL3 (AArch64 only) */
+FIELD(CPTR_EL3, EZ, 8, 1)
+FIELD(CPTR_EL3, TFP, 10, 1)
+FIELD(CPTR_EL3, ESM, 12, 1)
+FIELD(CPTR_EL3, TTA, 20, 1)
+FIELD(CPTR_EL3, TAM, 30, 1)
+FIELD(CPTR_EL3, TCPAC, 31, 1)
#define MDCR_EPMAD (1U << 21)
#define MDCR_EDAD (1U << 20)
@@ -1543,6 +1580,19 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_TWEDEN (1ULL << 59)
#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
+#define HCRX_ENAS0 (1ULL << 0)
+#define HCRX_ENALS (1ULL << 1)
+#define HCRX_ENASR (1ULL << 2)
+#define HCRX_FNXS (1ULL << 3)
+#define HCRX_FGTNXS (1ULL << 4)
+#define HCRX_SMPME (1ULL << 5)
+#define HCRX_TALLINT (1ULL << 6)
+#define HCRX_VINMI (1ULL << 7)
+#define HCRX_VFNMI (1ULL << 8)
+#define HCRX_CMOW (1ULL << 9)
+#define HCRX_MCE2 (1ULL << 10)
+#define HCRX_MSCEN (1ULL << 11)
+
#define HPFAR_NS (1ULL << 63)
#define SCR_NS (1U << 0)
@@ -2310,6 +2360,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
* Not included here is HCR_RW.
*/
uint64_t arm_hcr_el2_eff(CPUARMState *env);
+uint64_t arm_hcrx_el2_eff(CPUARMState *env);
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
@@ -3931,6 +3982,11 @@ static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
}
+static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
+}
+
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
@@ -3941,6 +3997,16 @@ static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
}
+static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0;
+}
+
+static inline bool isar_feature_aa64_ids(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0;
+}
+
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
{
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 04427e073f..3ff9219ca3 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -79,6 +79,7 @@ static void aarch64_a57_initfn(Object *obj)
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->isar.dbgdidr = 0x3516d000;
+ cpu->isar.reset_pmcr_el0 = 0x41013000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
@@ -87,6 +88,7 @@ static void aarch64_a57_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
define_cortex_a72_a57_a53_cp_reginfo(cpu);
}
@@ -132,6 +134,7 @@ static void aarch64_a53_initfn(Object *obj)
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
cpu->isar.dbgdidr = 0x3516d000;
+ cpu->isar.reset_pmcr_el0 = 0x41033000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
@@ -140,6 +143,7 @@ static void aarch64_a53_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
define_cortex_a72_a57_a53_cp_reginfo(cpu);
}
@@ -183,6 +187,7 @@ static void aarch64_a72_initfn(Object *obj)
cpu->isar.id_aa64isar0 = 0x00011120;
cpu->isar.id_aa64mmfr0 = 0x00001124;
cpu->isar.dbgdidr = 0x3516d000;
+ cpu->isar.reset_pmcr_el0 = 0x41023000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
@@ -191,6 +196,7 @@ static void aarch64_a72_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
define_cortex_a72_a57_a53_cp_reginfo(cpu);
}
@@ -252,11 +258,15 @@ static void aarch64_a76_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
/* From B5.1 AdvSIMD AArch64 register summary */
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
+
+ /* From D5.1 AArch64 PMU register summary */
+ cpu->isar.reset_pmcr_el0 = 0x410b3000;
}
static void aarch64_neoverse_n1_initfn(Object *obj)
@@ -317,11 +327,15 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
/* From B5.1 AdvSIMD AArch64 register summary */
cpu->isar.mvfr0 = 0x10110222;
cpu->isar.mvfr1 = 0x13211111;
cpu->isar.mvfr2 = 0x00000043;
+
+ /* From D5.1 AArch64 PMU register summary */
+ cpu->isar.reset_pmcr_el0 = 0x410c3000;
}
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
@@ -812,6 +826,7 @@ static void aarch64_max_initfn(Object *obj)
{
ARMCPU *cpu = ARM_CPU(obj);
uint64_t t;
+ uint32_t u;
if (kvm_enabled() || hvf_enabled()) {
/* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
@@ -842,6 +857,15 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
cpu->midr = t;
+ /*
+ * We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
+ * are zero.
+ */
+ u = cpu->clidr;
+ u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
+ u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
+ cpu->clidr = u;
+
t = cpu->isar.id_aa64isar0;
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
@@ -910,6 +934,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
+ t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
cpu->isar.id_aa64mmfr1 = t;
t = cpu->isar.id_aa64mmfr2;
@@ -918,6 +943,8 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
+ t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */
+ t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */
t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
cpu->isar.id_aa64mmfr2 = t;
@@ -996,6 +1023,7 @@ static void aarch64_a64fx_initfn(Object *obj)
cpu->gic_num_lrs = 4;
cpu->gic_vpribits = 5;
cpu->gic_vprebits = 5;
+ cpu->gic_pribits = 5;
/* Suppport of A64FX's vector length are 128,256 and 512bit only */
aarch64_add_sve_properties(obj);
@@ -1004,6 +1032,8 @@ static void aarch64_a64fx_initfn(Object *obj)
set_bit(1, cpu->sve_vq_supported); /* 256bit */
set_bit(3, cpu->sve_vq_supported); /* 512bit */
+ cpu->isar.reset_pmcr_el0 = 0x46014040;
+
/* TODO: Add A64FX specific HPC extension registers */
}
diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c
index ea4eccddc3..b751a19c8a 100644
--- a/target/arm/cpu_tcg.c
+++ b/target/arm/cpu_tcg.c
@@ -425,6 +425,7 @@ static void cortex_a8_initfn(Object *obj)
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
cpu->reset_auxcr = 2;
+ cpu->isar.reset_pmcr_el0 = 0x41002000;
define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
}
@@ -496,6 +497,7 @@ static void cortex_a9_initfn(Object *obj)
cpu->clidr = (1 << 27) | (1 << 24) | 3;
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
+ cpu->isar.reset_pmcr_el0 = 0x41093000;
define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
}
@@ -565,6 +567,7 @@ static void cortex_a7_initfn(Object *obj)
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ cpu->isar.reset_pmcr_el0 = 0x41072000;
define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
}
@@ -607,6 +610,7 @@ static void cortex_a15_initfn(Object *obj)
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
+ cpu->isar.reset_pmcr_el0 = 0x410F3000;
define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
}
@@ -835,6 +839,7 @@ static void cortex_r5_initfn(Object *obj)
cpu->isar.id_isar6 = 0x0;
cpu->mp_is_up = true;
cpu->pmsav7_dregion = 16;
+ cpu->isar.reset_pmcr_el0 = 0x41151800;
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
}
@@ -1093,6 +1098,7 @@ static void arm_max_initfn(Object *obj)
cpu->isar.id_isar5 = 0x00011121;
cpu->isar.id_isar6 = 0;
cpu->isar.dbgdidr = 0x3516d000;
+ cpu->isar.reset_pmcr_el0 = 0x41013000;
cpu->clidr = 0x0a200023;
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 432bd81919..40da63913c 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -39,7 +39,6 @@
#include "cpregs.h"
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
-#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
#ifndef CONFIG_USER_ONLY
@@ -767,11 +766,14 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
/* VFP coprocessor: cp10 & cp11 [23:20] */
- mask |= (1 << 31) | (1 << 30) | (0xf << 20);
+ mask |= R_CPACR_ASEDIS_MASK |
+ R_CPACR_D32DIS_MASK |
+ R_CPACR_CP11_MASK |
+ R_CPACR_CP10_MASK;
if (!arm_feature(env, ARM_FEATURE_NEON)) {
/* ASEDIS [31] bit is RAO/WI */
- value |= (1 << 31);
+ value |= R_CPACR_ASEDIS_MASK;
}
/* VFPv3 and upwards with NEON implement 32 double precision
@@ -779,7 +781,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
/* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
- value |= (1 << 30);
+ value |= R_CPACR_D32DIS_MASK;
}
}
value &= mask;
@@ -791,8 +793,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
- value &= ~(0xf << 20);
- value |= env->cp15.cpacr_el1 & (0xf << 20);
+ mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
+ value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
}
env->cp15.cpacr_el1 = value;
@@ -808,7 +810,7 @@ static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
- value &= ~(0xf << 20);
+ value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
}
return value;
}
@@ -828,11 +830,11 @@ static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
if (arm_feature(env, ARM_FEATURE_V8)) {
/* Check if CPACR accesses are to be trapped to EL2 */
if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
- (env->cp15.cptr_el[2] & CPTR_TCPAC)) {
+ FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
return CP_ACCESS_TRAP_EL2;
/* Check if CPACR accesses are to be trapped to EL3 */
} else if (arm_current_el(env) < 3 &&
- (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
return CP_ACCESS_TRAP_EL3;
}
}
@@ -844,7 +846,8 @@ static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
/* Check if CPTR accesses are set to trap to EL3 */
- if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
+ if (arm_current_el(env) == 2 &&
+ FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
return CP_ACCESS_TRAP_EL3;
}
@@ -3187,6 +3190,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
&prot, &page_size, &fi, &cacheattrs);
+ /*
+ * ATS operations only do S1 or S1+S2 translations, so we never
+ * have to deal with the ARMCacheAttrs format for S2 only.
+ */
+ assert(!cacheattrs.is_s2_format);
+
if (ret) {
/*
* Some kinds of translation fault must cause exceptions rather
@@ -5155,6 +5164,9 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
if (cpu_isar_feature(aa64_scxtnum, cpu)) {
valid_mask |= HCR_ENSCXT;
}
+ if (cpu_isar_feature(aa64_fwb, cpu)) {
+ valid_mask |= HCR_FWB;
+ }
}
/* Clear RES0 bits. */
@@ -5166,8 +5178,10 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
* HCR_PTW forbids certain page-table setups
* HCR_DC disables stage1 and enables stage2 translation
* HCR_DCT enables tagging on (disabled) stage1 translation
+ * HCR_FWB changes the interpretation of stage2 descriptor bits
*/
- if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
+ if ((env->cp15.hcr_el2 ^ value) &
+ (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
tlb_flush(CPU(cpu));
}
env->cp15.hcr_el2 = value;
@@ -5278,6 +5292,52 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
return ret;
}
+static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint64_t valid_mask = 0;
+
+ /* No features adding bits to HCRX are implemented. */
+
+ /* Clear RES0 bits. */
+ env->cp15.hcrx_el2 = value & valid_mask;
+}
+
+static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_current_el(env) < 3
+ && arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_HXEN)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static const ARMCPRegInfo hcrx_el2_reginfo = {
+ .name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
+ .access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
+ .fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
+};
+
+/* Return the effective value of HCRX_EL2. */
+uint64_t arm_hcrx_el2_eff(CPUARMState *env)
+{
+ /*
+ * The bits in this register behave as 0 for all purposes other than
+ * direct reads of the register if:
+ * - EL2 is not enabled in the current security state,
+ * - SCR_EL3.HXEn is 0.
+ */
+ if (!arm_is_el2_enabled(env)
+ || (arm_feature(env, ARM_FEATURE_EL3)
+ && !(env->cp15.scr_el3 & SCR_HXEN))) {
+ return 0;
+ }
+ return env->cp15.hcrx_el2;
+}
+
static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
@@ -5287,8 +5347,8 @@ static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
*/
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
- value &= ~(0x3 << 10);
- value |= env->cp15.cptr_el[2] & (0x3 << 10);
+ uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
+ value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
}
env->cp15.cptr_el[2] = value;
}
@@ -5303,7 +5363,7 @@ static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
- value |= 0x3 << 10;
+ value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
}
return value;
}
@@ -5533,13 +5593,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
.resetvalue = 0,
.writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
#endif
- /* The only field of MDCR_EL2 that has a defined architectural reset value
- * is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
- */
- { .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
- .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
- .access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
- .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
{ .name = "HPFAR", .state = ARM_CP_STATE_AA32,
.cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
.access = PL2_RW, .accessfn = access_el3_aa32ns,
@@ -6098,8 +6151,7 @@ int sve_exception_el(CPUARMState *env, int el)
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
- /* Check CPACR.ZEN. */
- switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
+ switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
case 1:
if (el != 0) {
break;
@@ -6112,7 +6164,7 @@ int sve_exception_el(CPUARMState *env, int el)
}
/* Check CPACR.FPEN. */
- switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
+ switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN)) {
case 1:
if (el != 0) {
break;
@@ -6129,8 +6181,7 @@ int sve_exception_el(CPUARMState *env, int el)
*/
if (el <= 2) {
if (hcr_el2 & HCR_E2H) {
- /* Check CPTR_EL2.ZEN. */
- switch (extract32(env->cp15.cptr_el[2], 16, 2)) {
+ switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
case 1:
if (el != 0 || !(hcr_el2 & HCR_TGE)) {
break;
@@ -6141,8 +6192,7 @@ int sve_exception_el(CPUARMState *env, int el)
return 2;
}
- /* Check CPTR_EL2.FPEN. */
- switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
+ switch (FIELD_EX32(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
case 1:
if (el == 2 || !(hcr_el2 & HCR_TGE)) {
break;
@@ -6153,10 +6203,10 @@ int sve_exception_el(CPUARMState *env, int el)
return 0;
}
} else if (arm_is_el2_enabled(env)) {
- if (env->cp15.cptr_el[2] & CPTR_TZ) {
+ if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
return 2;
}
- if (env->cp15.cptr_el[2] & CPTR_TFP) {
+ if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
return 0;
}
}
@@ -6164,7 +6214,7 @@ int sve_exception_el(CPUARMState *env, int el)
/* CPTR_EL3. Since EZ is negative we must check for EL3. */
if (arm_feature(env, ARM_FEATURE_EL3)
- && !(env->cp15.cptr_el[3] & CPTR_EZ)) {
+ && !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
return 3;
}
#endif
@@ -6529,7 +6579,6 @@ static void define_debug_regs(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &dbgdidr);
}
- /* Note that all these register fields hold "number of Xs minus 1". */
brps = arm_num_brps(cpu);
wrps = arm_num_wrps(cpu);
ctx_cmps = arm_num_ctx_cmps(cpu);
@@ -6543,14 +6592,16 @@ static void define_debug_regs(ARMCPU *cpu)
}
for (i = 0; i < brps; i++) {
+ char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i);
+ char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i);
ARMCPRegInfo dbgregs[] = {
- { .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
+ { .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
.access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
.writefn = dbgbvr_write, .raw_writefn = raw_write
},
- { .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
+ { .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
.access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
@@ -6558,17 +6609,21 @@ static void define_debug_regs(ARMCPU *cpu)
},
};
define_arm_cp_regs(cpu, dbgregs);
+ g_free(dbgbvr_el1_name);
+ g_free(dbgbcr_el1_name);
}
for (i = 0; i < wrps; i++) {
+ char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i);
+ char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i);
ARMCPRegInfo dbgregs[] = {
- { .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
+ { .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
.access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
.writefn = dbgwvr_write, .raw_writefn = raw_write
},
- { .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
+ { .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH,
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
.access = PL1_RW, .accessfn = access_tda,
.fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
@@ -6576,6 +6631,8 @@ static void define_debug_regs(ARMCPU *cpu)
},
};
define_arm_cp_regs(cpu, dbgregs);
+ g_free(dbgwvr_el1_name);
+ g_free(dbgwcr_el1_name);
}
}
@@ -6586,7 +6643,7 @@ static void define_pmu_regs(ARMCPU *cpu)
* field as main ID register, and we implement four counters in
* addition to the cycle count register.
*/
- unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
+ unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW,
@@ -6601,10 +6658,10 @@ static void define_pmu_regs(ARMCPU *cpu)
.access = PL0_RW, .accessfn = pmreg_access,
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
- .resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
- PMCRLC,
+ .resetvalue = cpu->isar.reset_pmcr_el0,
.writefn = pmcr_write, .raw_writefn = raw_write,
};
+
define_one_arm_cp_reg(cpu, &pmcr);
define_one_arm_cp_reg(cpu, &pmcr64);
for (i = 0; i < pmcrn; i++) {
@@ -6758,7 +6815,7 @@ static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
if (el < 2 &&
- arm_feature(env, ARM_FEATURE_EL2) &&
+ arm_is_el2_enabled(env) &&
!(arm_hcr_el2_eff(env) & HCR_APK)) {
return CP_ACCESS_TRAP_EL2;
}
@@ -7961,6 +8018,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
.type = ARM_CP_EL3_NO_EL2_C_NZ,
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
};
+ /*
+ * The only field of MDCR_EL2 that has a defined architectural reset
+ * value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
+ */
+ ARMCPRegInfo mdcr_el2 = {
+ .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = pmu_num_counters(env),
+ .fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
+ };
+ define_one_arm_cp_reg(cpu, &mdcr_el2);
define_arm_cp_regs(cpu, vpidr_regs);
define_arm_cp_regs(cpu, el2_cp_reginfo);
if (arm_feature(env, ARM_FEATURE_V8)) {
@@ -8384,6 +8452,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_arm_cp_regs(cpu, zcr_reginfo);
}
+ if (cpu_isar_feature(aa64_hcx, cpu)) {
+ define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
+ }
+
#ifdef TARGET_AARCH64
if (cpu_isar_feature(aa64_pauth, cpu)) {
define_arm_cp_regs(cpu, pauth_reginfo);
@@ -10717,6 +10789,25 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
return true;
}
+static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
+{
+ /*
+ * For an S1 page table walk, the stage 1 attributes are always
+ * some form of "this is Normal memory". The combined S1+S2
+ * attributes are therefore only Device if stage 2 specifies Device.
+ * With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
+ * ie when cacheattrs.attrs bits [3:2] are 0b00.
+ * With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
+ * when cacheattrs.attrs bit [2] is 0.
+ */
+ assert(cacheattrs.is_s2_format);
+ if (arm_hcr_el2_eff(env) & HCR_FWB) {
+ return (cacheattrs.attrs & 0x4) == 0;
+ } else {
+ return (cacheattrs.attrs & 0xc) == 0;
+ }
+}
+
/* Translate a S1 pagetable walk through S2 if needed. */
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
hwaddr addr, bool *is_secure,
@@ -10745,7 +10836,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
return ~0;
}
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
- (cacheattrs.attrs & 0xf0) == 0) {
+ ptw_attrs_are_device(env, cacheattrs)) {
/*
* PTW set and S1 walk touched S2 Device memory:
* generate Permission fault.
@@ -11817,12 +11908,14 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
}
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
- cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
+ cacheattrs->is_s2_format = true;
+ cacheattrs->attrs = extract32(attrs, 0, 4);
} else {
/* Index into MAIR registers for cache attributes */
uint8_t attrindx = extract32(attrs, 0, 3);
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
assert(attrindx <= 7);
+ cacheattrs->is_s2_format = false;
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
}
@@ -12557,28 +12650,130 @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
}
}
+/*
+ * Combine the memory type and cacheability attributes of
+ * s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
+ * combined attributes in MAIR_EL1 format.
+ */
+static uint8_t combined_attrs_nofwb(CPUARMState *env,
+ ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
+
+ s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
+
+ s1lo = extract32(s1.attrs, 0, 4);
+ s2lo = extract32(s2_mair_attrs, 0, 4);
+ s1hi = extract32(s1.attrs, 4, 4);
+ s2hi = extract32(s2_mair_attrs, 4, 4);
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret_attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret_attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret_attrs = 8; /* nGRE */
+ } else {
+ ret_attrs = 0xc; /* GRE */
+ }
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+ }
+ return ret_attrs;
+}
+
+static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
+{
+ /*
+ * Given the 4 bits specifying the outer or inner cacheability
+ * in MAIR format, return a value specifying Normal Write-Back,
+ * with the allocation and transient hints taken from the input
+ * if the input specified some kind of cacheable attribute.
+ */
+ if (attr == 0 || attr == 4) {
+ /*
+ * 0 == an UNPREDICTABLE encoding
+ * 4 == Non-cacheable
+ * Either way, force Write-Back RW allocate non-transient
+ */
+ return 0xf;
+ }
+ /* Change WriteThrough to WriteBack, keep allocation and transient hints */
+ return attr | 4;
+}
+
+/*
+ * Combine the memory type and cacheability attributes of
+ * s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
+ * combined attributes in MAIR_EL1 format.
+ */
+static uint8_t combined_attrs_fwb(CPUARMState *env,
+ ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ switch (s2.attrs) {
+ case 7:
+ /* Use stage 1 attributes */
+ return s1.attrs;
+ case 6:
+ /*
+ * Force Normal Write-Back. Note that if S1 is Normal cacheable
+ * then we take the allocation hints from it; otherwise it is
+ * RW allocate, non-transient.
+ */
+ if ((s1.attrs & 0xf0) == 0) {
+ /* S1 is Device */
+ return 0xff;
+ }
+ /* Need to check the Inner and Outer nibbles separately */
+ return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
+ force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
+ case 5:
+ /* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
+ if ((s1.attrs & 0xf0) == 0) {
+ return s1.attrs;
+ }
+ return 0x44;
+ case 0 ... 3:
+ /* Force Device, of subtype specified by S2 */
+ return s2.attrs << 2;
+ default:
+ /*
+ * RESERVED values (including RES0 descriptor bit [5] being nonzero);
+ * arbitrarily force Device.
+ */
+ return 0;
+ }
+}
+
/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
* and CombineS1S2Desc()
*
+ * @env: CPUARMState
* @s1: Attributes from stage 1 walk
* @s2: Attributes from stage 2 walk
*/
-static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
+ ARMCacheAttrs s1, ARMCacheAttrs s2)
{
- uint8_t s1lo, s2lo, s1hi, s2hi;
ARMCacheAttrs ret;
bool tagged = false;
+ assert(s2.is_s2_format && !s1.is_s2_format);
+ ret.is_s2_format = false;
+
if (s1.attrs == 0xf0) {
tagged = true;
s1.attrs = 0xff;
}
- s1lo = extract32(s1.attrs, 0, 4);
- s2lo = extract32(s2.attrs, 0, 4);
- s1hi = extract32(s1.attrs, 4, 4);
- s2hi = extract32(s2.attrs, 4, 4);
-
/* Combine shareability attributes (table D4-43) */
if (s1.shareability == 2 || s2.shareability == 2) {
/* if either are outer-shareable, the result is outer-shareable */
@@ -12592,37 +12787,22 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
}
/* Combine memory type and cacheability attributes */
- if (s1hi == 0 || s2hi == 0) {
- /* Device has precedence over normal */
- if (s1lo == 0 || s2lo == 0) {
- /* nGnRnE has precedence over anything */
- ret.attrs = 0;
- } else if (s1lo == 4 || s2lo == 4) {
- /* non-Reordering has precedence over Reordering */
- ret.attrs = 4; /* nGnRE */
- } else if (s1lo == 8 || s2lo == 8) {
- /* non-Gathering has precedence over Gathering */
- ret.attrs = 8; /* nGRE */
- } else {
- ret.attrs = 0xc; /* GRE */
- }
+ if (arm_hcr_el2_eff(env) & HCR_FWB) {
+ ret.attrs = combined_attrs_fwb(env, s1, s2);
+ } else {
+ ret.attrs = combined_attrs_nofwb(env, s1, s2);
+ }
- /* Any location for which the resultant memory type is any
- * type of Device memory is always treated as Outer Shareable.
- */
+ /*
+ * Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ * Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ * TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
+ */
+ if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
ret.shareability = 2;
- } else { /* Normal memory */
- /* Outer/inner cacheability combine independently */
- ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
- | combine_cacheattr_nibble(s1lo, s2lo);
-
- if (ret.attrs == 0x44) {
- /* Any location for which the resultant memory type is Normal
- * Inner Non-cacheable, Outer Non-cacheable is always treated
- * as Outer Shareable.
- */
- ret.shareability = 2;
- }
}
/* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
@@ -12731,7 +12911,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
}
cacheattrs->shareability = 0;
}
- *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
/* Check if IPA translates to secure or non-secure PA space. */
if (arm_is_secure_below_el3(env)) {
@@ -12849,6 +13029,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
hcr = arm_hcr_el2_eff(env);
cacheattrs->shareability = 0;
+ cacheattrs->is_s2_format = false;
if (hcr & HCR_DC) {
if (hcr & HCR_DCT) {
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
@@ -13216,7 +13397,7 @@ int fp_exception_el(CPUARMState *env, int cur_el)
* This register is ignored if E2H+TGE are both set.
*/
if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
- int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
+ int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
switch (fpen) {
case 0:
@@ -13262,8 +13443,7 @@ int fp_exception_el(CPUARMState *env, int cur_el)
*/
if (cur_el <= 2) {
if (hcr_el2 & HCR_E2H) {
- /* Check CPTR_EL2.FPEN. */
- switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
+ switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
case 1:
if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
break;
@@ -13274,14 +13454,14 @@ int fp_exception_el(CPUARMState *env, int cur_el)
return 2;
}
} else if (arm_is_el2_enabled(env)) {
- if (env->cp15.cptr_el[2] & CPTR_TFP) {
+ if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
return 2;
}
}
}
/* CPTR_EL3 : present in v8 */
- if (env->cp15.cptr_el[3] & CPTR_TFP) {
+ if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
/* Trap all FP ops to EL3 */
return 3;
}
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 6ca0e95746..b654bee468 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1149,8 +1149,13 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
/* Cacheability and shareability attributes for a memory access */
typedef struct ARMCacheAttrs {
- unsigned int attrs:8; /* as in the MAIR register encoding */
+ /*
+ * If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
+ * Otherwise, attrs is the same as the MAIR_EL1 8-bit format
+ */
+ unsigned int attrs:8;
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+ bool is_s2_format:1;
} ARMCacheAttrs;
bool get_phys_addr(CPUARMState *env, target_ulong address,
@@ -1299,7 +1304,9 @@ enum MVEECIState {
static inline uint32_t pmu_num_counters(CPUARMState *env)
{
- return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
+ ARMCPU *cpu = env_archcpu(env);
+
+ return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
}
/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index b8cfaf5782..363032da90 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -505,6 +505,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
*/
int fdarray[3];
bool sve_supported;
+ bool pmu_supported = false;
uint64_t features = 0;
uint64_t t;
int err;
@@ -537,6 +538,11 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
}
+ if (kvm_arm_pmu_supported()) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
+ pmu_supported = true;
+ }
+
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
return false;
}
@@ -659,6 +665,12 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
dbgdidr |= (1 << 15); /* RES1 bit */
ahcf->isar.dbgdidr = dbgdidr;
}
+
+ if (pmu_supported) {
+ /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
+ ARM64_SYS_REG(3, 3, 9, 12, 0));
+ }
}
sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 390b6578a8..c4bd668870 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -631,6 +631,7 @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
uint32_t isread)
{
+ ARMCPU *cpu = env_archcpu(env);
const ARMCPRegInfo *ri = rip;
CPAccessResult res = CP_ACCESS_OK;
int target_el;
@@ -674,6 +675,14 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
case CP_ACCESS_TRAP:
break;
case CP_ACCESS_TRAP_UNCATEGORIZED:
+ if (cpu_isar_feature(aa64_ids, cpu) && isread &&
+ arm_cpreg_in_idspace(ri)) {
+ /*
+ * FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
+ * not EC_UNCATEGORIZED
+ */
+ break;
+ }
syndrome = syn_uncategorized();
break;
default:
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 6a27234a5c..f502545307 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -1795,6 +1795,30 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
tcg_temp_free_i32(nzcv);
}
+static void gen_sysreg_undef(DisasContext *s, bool isread,
+ uint8_t op0, uint8_t op1, uint8_t op2,
+ uint8_t crn, uint8_t crm, uint8_t rt)
+{
+ /*
+ * Generate code to emit an UNDEF with correct syndrome
+ * information for a failed system register access.
+ * This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
+ * but if FEAT_IDST is implemented then read accesses to registers
+ * in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
+ * syndrome.
+ */
+ uint32_t syndrome;
+
+ if (isread && dc_isar_feature(aa64_ids, s) &&
+ arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
+ syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
+ } else {
+ syndrome = syn_uncategorized();
+ }
+ gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome,
+ default_exception_el(s));
+}
+
/* MRS - move from system register
* MSR (register) - move to system register
* SYS
@@ -1820,13 +1844,13 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
"system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
isread ? "read" : "write", op0, op1, crn, crm, op2);
- unallocated_encoding(s);
+ gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
return;
}
/* Check access permissions */
if (!cp_access_ok(s->current_el, ri, isread)) {
- unallocated_encoding(s);
+ gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
return;
}
@@ -2103,13 +2127,13 @@ static void disas_exc(DisasContext *s, uint32_t insn)
* with our 32-bit semihosting).
*/
if (s->current_el == 0) {
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
break;
}
#endif
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
} else {
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
}
break;
case 5:
@@ -2118,7 +2142,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
break;
}
/* DCPS1, DCPS2, DCPS3 */
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
break;
default:
unallocated_encoding(s);
@@ -2283,7 +2307,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
if (op3 != 0 || op4 != 0 || rn != 0x1f) {
goto do_unallocated;
} else {
- unsupported_encoding(s, insn);
+ unallocated_encoding(s);
}
return;
diff --git a/target/arm/translate-a64.h b/target/arm/translate-a64.h
index 38884158aa..f2e8ee0ee1 100644
--- a/target/arm/translate-a64.h
+++ b/target/arm/translate-a64.h
@@ -18,15 +18,6 @@
#ifndef TARGET_ARM_TRANSLATE_A64_H
#define TARGET_ARM_TRANSLATE_A64_H
-#define unsupported_encoding(s, insn) \
- do { \
- qemu_log_mask(LOG_UNIMP, \
- "%s:%d: unsupported instruction encoding 0x%08x " \
- "at pc=%016" PRIx64 "\n", \
- __FILE__, __LINE__, insn, s->pc_curr); \
- unallocated_encoding(s); \
- } while (0)
-
TCGv_i64 new_tmp_a64(DisasContext *s);
TCGv_i64 new_tmp_a64_local(DisasContext *s);
TCGv_i64 new_tmp_a64_zero(DisasContext *s);
diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c
index 9176b96c1c..a80ef5aff4 100644
--- a/tests/unit/ptimer-test.c
+++ b/tests/unit/ptimer-test.c
@@ -768,8 +768,8 @@ static void add_ptimer_tests(uint8_t policy)
char policy_name[256] = "";
char *tmp;
- if (policy == PTIMER_POLICY_DEFAULT) {
- g_sprintf(policy_name, "default");
+ if (policy == PTIMER_POLICY_LEGACY) {
+ g_sprintf(policy_name, "legacy");
}
if (policy & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) {
@@ -862,7 +862,7 @@ static void add_ptimer_tests(uint8_t policy)
static void add_all_ptimer_policies_comb_tests(void)
{
int last_policy = PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT;
- int policy = PTIMER_POLICY_DEFAULT;
+ int policy = PTIMER_POLICY_LEGACY;
for (; policy < (last_policy << 1); policy++) {
if ((policy & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) &&