Update the definitions in dm_op.h from Xen public header.
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
V1->V2:
- New commit.
include/xen/interface/hvm/dm_op.h | 445 ++++++++++++++++++++++++++++++
1 file changed, 445 insertions(+)
diff --git a/include/xen/interface/hvm/dm_op.h b/include/xen/interface/hvm/dm_op.h
index 08d972f87c7b..bc6948fd1815 100644
--- a/include/xen/interface/hvm/dm_op.h
+++ b/include/xen/interface/hvm/dm_op.h
@@ -6,6 +6,451 @@
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
#define __XEN_PUBLIC_HVM_DM_OP_H__
+#include <xen/interface/xen.h>
+#include <xen/interface/event_channel.h>
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator and Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
+ * secondary emulator.
+ *
+ * The <id> handed back is unique for target domain. The valur of
+ * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
+ * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then the buffered
+ * ioreq ring will not be allocated and hence all emulation requests to
+ * this server will be synchronous.
+ */
+#define XEN_DMOP_create_ioreq_server 1
+
+struct xen_dm_op_create_ioreq_server {
+ /* IN - should server handle buffered ioreqs */
+ uint8_t handle_bufioreq;
+ uint8_t pad[3];
+ /* OUT - server id */
+ ioservid_t id;
+};
+
+/*
+ * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
+ * access IOREQ Server <id>.
+ *
+ * If the IOREQ Server is handling buffered emulation requests, the
+ * emulator needs to bind to event channel <bufioreq_port> to listen for
+ * them. (The event channels used for synchronous emulation requests are
+ * specified in the per-CPU ioreq structures).
+ * In addition, if the XENMEM_acquire_resource memory op cannot be used,
+ * the emulator will need to map the synchronous ioreq structures and
+ * buffered ioreq ring (if it exists) from guest memory. If <flags> does
+ * not contain XEN_DMOP_no_gfns then these pages will be made available and
+ * the frame numbers passed back in gfns <ioreq_gfn> and <bufioreq_gfn>
+ * respectively. (If the IOREQ Server is not handling buffered emulation
+ * only <ioreq_gfn> will be valid).
+ *
+ * NOTE: To access the synchronous ioreq structures and buffered ioreq
+ * ring, it is preferable to use the XENMEM_acquire_resource memory
+ * op specifying resource type XENMEM_resource_ioreq_server.
+ */
+#define XEN_DMOP_get_ioreq_server_info 2
+
+struct xen_dm_op_get_ioreq_server_info {
+ /* IN - server id */
+ ioservid_t id;
+ /* IN - flags */
+ uint16_t flags;
+
+#define _XEN_DMOP_no_gfns 0
+#define XEN_DMOP_no_gfns (1u << _XEN_DMOP_no_gfns)
+
+ /* OUT - buffered ioreq port */
+ evtchn_port_t bufioreq_port;
+ /* OUT - sync ioreq gfn (see block comment above) */
+ uint64_aligned_t ioreq_gfn;
+ /* OUT - buffered ioreq gfn (see block comment above)*/
+ uint64_aligned_t bufioreq_gfn;
+};
+
+/*
+ * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
+ * emulation by the client of
+ * IOREQ Server <id>.
+ * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
+ * previously registered for
+ * emulation by the client of
+ * IOREQ Server <id>.
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory
+ * accesses and PCI config space accesses. The <type> field denotes which
+ * type of range* the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function
+ * values which should be encoded using the DMOP_PCI_SBDF helper macro
+ * below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+#define XEN_DMOP_map_io_range_to_ioreq_server 3
+#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
+
+struct xen_dm_op_ioreq_server_range {
+ /* IN - server id */
+ ioservid_t id;
+ uint16_t pad;
+ /* IN - type of range */
+ uint32_t type;
+# define XEN_DMOP_IO_RANGE_PORT 0 /* I/O port range */
+# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
+# define XEN_DMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
+ /* IN - inclusive start and end of range */
+ uint64_aligned_t start, end;
+};
+
+#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
+ ((((s) & 0xffff) << 16) | \
+ (((b) & 0xff) << 8) | \
+ (((d) & 0x1f) << 3) | \
+ ((f) & 0x07))
+
+/*
+ * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is
+ * in the enabled state.
+ * Note that the contents of the ioreq_gfn and bufioreq_gfn (see
+ * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+#define XEN_DMOP_set_ioreq_server_state 5
+
+struct xen_dm_op_set_ioreq_server_state {
+ /* IN - server id */
+ ioservid_t id;
+ /* IN - enabled? */
+ uint8_t enabled;
+ uint8_t pad;
+};
+
+/*
+ * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+#define XEN_DMOP_destroy_ioreq_server 6
+
+struct xen_dm_op_destroy_ioreq_server {
+ /* IN - server id */
+ ioservid_t id;
+ uint16_t pad;
+};
+
+/*
+ * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
+ * range.
+ *
+ * NOTE: The bitmap passed back to the caller is passed in a
+ * secondary buffer.
+ */
+#define XEN_DMOP_track_dirty_vram 7
+
+struct xen_dm_op_track_dirty_vram {
+ /* IN - number of pages to be tracked */
+ uint32_t nr;
+ uint32_t pad;
+ /* IN - first pfn to track */
+ uint64_aligned_t first_pfn;
+};
+
+/*
+ * XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
+ * PCI INTx pins.
+ */
+#define XEN_DMOP_set_pci_intx_level 8
+
+struct xen_dm_op_set_pci_intx_level {
+ /* IN - PCI INTx identification (domain:bus:device:intx) */
+ uint16_t domain;
+ uint8_t bus, device, intx;
+ /* IN - Level: 0 -> deasserted, 1 -> asserted */
+ uint8_t level;
+};
+
+/*
+ * XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
+ * ISA IRQ lines.
+ */
+#define XEN_DMOP_set_isa_irq_level 9
+
+struct xen_dm_op_set_isa_irq_level {
+ /* IN - ISA IRQ (0-15) */
+ uint8_t isa_irq;
+ /* IN - Level: 0 -> deasserted, 1 -> asserted */
+ uint8_t level;
+};
+
+/*
+ * XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
+ */
+#define XEN_DMOP_set_pci_link_route 10
+
+struct xen_dm_op_set_pci_link_route {
+ /* PCI INTx line (0-3) */
+ uint8_t link;
+ /* ISA IRQ (1-15) or 0 -> disable link */
+ uint8_t isa_irq;
+};
+
+/*
+ * XEN_DMOP_modified_memory: Notify that a set of pages were modified by
+ * an emulator.
+ *
+ * DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
+ * @nr_extents entries.
+ *
+ * On error, @nr_extents will contain the index+1 of the extent that
+ * had the error. It is not defined if or which pages may have been
+ * marked as dirty, in this event.
+ */
+#define XEN_DMOP_modified_memory 11
+
+struct xen_dm_op_modified_memory {
+ /*
+ * IN - Number of extents to be processed
+ * OUT -returns n+1 for failing extent
+ */
+ uint32_t nr_extents;
+ /* IN/OUT - Must be set to 0 */
+ uint32_t opaque;
+};
+
+struct xen_dm_op_modified_memory_extent {
+ /* IN - number of contiguous pages modified */
+ uint32_t nr;
+ uint32_t pad;
+ /* IN - first pfn modified */
+ uint64_aligned_t first_pfn;
+};
+
+/*
+ * XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
+ * in a specific way. (See definition of
+ * hvmmem_type_t).
+ *
+ * NOTE: In the event of a continuation (return code -ERESTART), the
+ * @first_pfn is set to the value of the pfn of the remaining
+ * region and @nr reduced to the size of the remaining region.
+ */
+#define XEN_DMOP_set_mem_type 12
+
+struct xen_dm_op_set_mem_type {
+ /* IN - number of contiguous pages */
+ uint32_t nr;
+ /* IN - new hvmmem_type_t of region */
+ uint16_t mem_type;
+ uint16_t pad;
+ /* IN - first pfn in region */
+ uint64_aligned_t first_pfn;
+};
+
+/*
+ * XEN_DMOP_inject_event: Inject an event into a VCPU, which will
+ * get taken up when it is next scheduled.
+ *
+ * Note that the caller should know enough of the state of the CPU before
+ * injecting, to know what the effect of injecting the event will be.
+ */
+#define XEN_DMOP_inject_event 13
+
+struct xen_dm_op_inject_event {
+ /* IN - index of vCPU */
+ uint32_t vcpuid;
+ /* IN - interrupt vector */
+ uint8_t vector;
+ /* IN - event type (DMOP_EVENT_* ) */
+ uint8_t type;
+/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
+# define XEN_DMOP_EVENT_ext_int 0 /* external interrupt */
+# define XEN_DMOP_EVENT_nmi 2 /* nmi */
+# define XEN_DMOP_EVENT_hw_exc 3 /* hardware exception */
+# define XEN_DMOP_EVENT_sw_int 4 /* software interrupt (CD nn) */
+# define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
+# define XEN_DMOP_EVENT_sw_exc 6 /* INT3 (CC), INTO (CE) */
+ /* IN - instruction length */
+ uint8_t insn_len;
+ uint8_t pad0;
+ /* IN - error code (or ~0 to skip) */
+ uint32_t error_code;
+ uint32_t pad1;
+ /* IN - type-specific extra data (%cr2 for #PF, pending_dbg for #DB) */
+ uint64_aligned_t cr2;
+};
+
+/*
+ * XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
+ */
+#define XEN_DMOP_inject_msi 14
+
+struct xen_dm_op_inject_msi {
+ /* IN - MSI data (lower 32 bits) */
+ uint32_t data;
+ uint32_t pad;
+ /* IN - MSI address (0xfeexxxxx) */
+ uint64_aligned_t addr;
+};
+
+/*
+ * XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
+ * to specific memory type <type>
+ * for specific accesses <flags>
+ *
+ * For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
+ * which means only write operations are to be forwarded to an ioreq server.
+ * Support for the emulation of read operations can be added when an ioreq
+ * server has such requirement in future.
+ */
+#define XEN_DMOP_map_mem_type_to_ioreq_server 15
+
+struct xen_dm_op_map_mem_type_to_ioreq_server {
+ ioservid_t id; /* IN - ioreq server id */
+ uint16_t type; /* IN - memory type */
+ uint32_t flags; /* IN - types of accesses to be forwarded to the
+ ioreq server. flags with 0 means to unmap the
+ ioreq server */
+
+#define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
+#define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
+
+ uint64_t opaque; /* IN/OUT - only used for hypercall continuation,
+ has to be set to zero by the caller */
+};
+
+/*
+ * XEN_DMOP_remote_shutdown : Declare a shutdown for another domain
+ * Identical to SCHEDOP_remote_shutdown
+ */
+#define XEN_DMOP_remote_shutdown 16
+
+struct xen_dm_op_remote_shutdown {
+ uint32_t reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
+ /* (Other reason values are not blocked) */
+};
+
+/*
+ * XEN_DMOP_relocate_memory : Relocate GFNs for the specified guest.
+ * Identical to XENMEM_add_to_physmap with
+ * space == XENMAPSPACE_gmfn_range.
+ */
+#define XEN_DMOP_relocate_memory 17
+
+struct xen_dm_op_relocate_memory {
+ /* All fields are IN/OUT, with their OUT state undefined. */
+ /* Number of GFNs to process. */
+ uint32_t size;
+ uint32_t pad;
+ /* Starting GFN to relocate. */
+ uint64_aligned_t src_gfn;
+ /* Starting GFN where GFNs should be relocated. */
+ uint64_aligned_t dst_gfn;
+};
+
+/*
+ * XEN_DMOP_pin_memory_cacheattr : Pin caching type of RAM space.
+ * Identical to XEN_DOMCTL_pin_mem_cacheattr.
+ */
+#define XEN_DMOP_pin_memory_cacheattr 18
+
+struct xen_dm_op_pin_memory_cacheattr {
+ uint64_aligned_t start; /* Start gfn. */
+ uint64_aligned_t end; /* End gfn. */
+/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
+#define XEN_DMOP_MEM_CACHEATTR_UC 0
+#define XEN_DMOP_MEM_CACHEATTR_WC 1
+#define XEN_DMOP_MEM_CACHEATTR_WT 4
+#define XEN_DMOP_MEM_CACHEATTR_WP 5
+#define XEN_DMOP_MEM_CACHEATTR_WB 6
+#define XEN_DMOP_MEM_CACHEATTR_UCM 7
+#define XEN_DMOP_DELETE_MEM_CACHEATTR (~(uint32_t)0)
+ uint32_t type; /* XEN_DMOP_MEM_CACHEATTR_* */
+ uint32_t pad;
+};
+
+/*
+ * XEN_DMOP_set_irq_level: Set the logical level of a one of a domain's
+ * IRQ lines (currently Arm only).
+ * Only SPIs are supported.
+ */
+#define XEN_DMOP_set_irq_level 19
+
+struct xen_dm_op_set_irq_level {
+ uint32_t irq;
+ /* IN - Level: 0 -> deasserted, 1 -> asserted */
+ uint8_t level;
+ uint8_t pad[3];
+};
+
+/*
+ * XEN_DMOP_nr_vcpus: Query the number of vCPUs a domain has.
+ *
+ * This is the number of vcpu objects allocated in Xen for the domain, and is
+ * fixed from creation time. This bound is applicable to e.g. the vcpuid
+ * parameter of XEN_DMOP_inject_event, or number of struct ioreq objects
+ * mapped via XENMEM_acquire_resource.
+ */
+#define XEN_DMOP_nr_vcpus 20
+
+struct xen_dm_op_nr_vcpus {
+ uint32_t vcpus; /* OUT */
+};
+
+struct xen_dm_op {
+ uint32_t op;
+ uint32_t pad;
+ union {
+ struct xen_dm_op_create_ioreq_server create_ioreq_server;
+ struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
+ struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
+ struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
+ struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
+ struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
+ struct xen_dm_op_track_dirty_vram track_dirty_vram;
+ struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
+ struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
+ struct xen_dm_op_set_irq_level set_irq_level;
+ struct xen_dm_op_set_pci_link_route set_pci_link_route;
+ struct xen_dm_op_modified_memory modified_memory;
+ struct xen_dm_op_set_mem_type set_mem_type;
+ struct xen_dm_op_inject_event inject_event;
+ struct xen_dm_op_inject_msi inject_msi;
+ struct xen_dm_op_map_mem_type_to_ioreq_server map_mem_type_to_ioreq_server;
+ struct xen_dm_op_remote_shutdown remote_shutdown;
+ struct xen_dm_op_relocate_memory relocate_memory;
+ struct xen_dm_op_pin_memory_cacheattr pin_memory_cacheattr;
+ struct xen_dm_op_nr_vcpus nr_vcpus;
+ } u;
+};
+
struct xen_dm_op_buf {
GUEST_HANDLE(void) h;
xen_ulong_t size;
--
2.31.1.272.g89b43f80a514
On 13-07-23, 14:40, Oleksandr Tyshchenko wrote:
> Viresh, great work!
Thanks Oleksandr.
> Do you perhaps have corresponding users-space (virtio backend) example
> adopted for that feature (I would like to take a look at it if possible)?
This is taken care by the xen-vhost-frontend Rust crate in our case
(which was initially designed based on virtio-disk but has deviated a
lot from it now). Here is the commit of interest. The backends remain
unmodified though.
https://github.com/vireshk/xen-vhost-frontend/commit/d79c419f14c1f54240b314…
And I have updated the commit with CONFIG_ARM64 thingy..
--
viresh
Hi,
This patchset intends to make grant mapping usage configurable for virtio
devices. Currently they are forced enabled for backends running on non-Dom0
domains. This patchset adds a new `grant_usage` parameter for the virtio
devices, which can be used to enable or disable grant mappings irrespective of
the backend domain, while still preserving the default behavior in absence of a
parameter.
V2->V3:
- Patch 2/3 is new and fixes ordering issues with default values.
- Reuse `libxl_defbool` instead of defining a new type, it can take values 0 and
1.
- Improved commit logs and comments.
V1->V2:
- Instead of just 0 or 1, the argument can take multiple values now and control
the functionality in a better way.
- Update .gen.go files as well.
- Don't add nodes under frontend path.
Viresh Kumar (3):
libxl: virtio: Remove unused frontend nodes
libxl: Call libxl__virtio_devtype.set_default() early enough
libxl: arm: Add grant_usage parameter for virtio devices
docs/man/xl.cfg.5.pod.in | 8 +++++++
tools/golang/xenlight/helpers.gen.go | 6 +++++
tools/golang/xenlight/types.gen.go | 1 +
tools/libs/light/libxl_arm.c | 22 +++++++++++--------
tools/libs/light/libxl_create.c | 11 +++++++++-
tools/libs/light/libxl_types.idl | 1 +
tools/libs/light/libxl_virtio.c | 33 ++++++++++++++++++++++------
tools/xl/xl_parse.c | 2 ++
8 files changed, 67 insertions(+), 17 deletions(-)
--
2.31.1.272.g89b43f80a514
The libxl_virtio file works for only PV devices and the nodes under the
frontend path are not used by anyone. Remove them.
While at it, also add a comment to the file describing what devices this
file is used for.
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
V2: New patch.
tools/libs/light/libxl_virtio.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/tools/libs/light/libxl_virtio.c b/tools/libs/light/libxl_virtio.c
index faada49e184e..eadcb7124c3f 100644
--- a/tools/libs/light/libxl_virtio.c
+++ b/tools/libs/light/libxl_virtio.c
@@ -10,6 +10,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
+ *
+ * This is used for PV (paravirtualized) devices only and the frontend isn't
+ * aware of the xenstore.
*/
#include "libxl_internal.h"
@@ -49,11 +52,6 @@ static int libxl__set_xenstore_virtio(libxl__gc *gc, uint32_t domid,
flexarray_append_pair(back, "type", GCSPRINTF("%s", virtio->type));
flexarray_append_pair(back, "transport", GCSPRINTF("%s", transport));
- flexarray_append_pair(front, "irq", GCSPRINTF("%u", virtio->irq));
- flexarray_append_pair(front, "base", GCSPRINTF("%#"PRIx64, virtio->base));
- flexarray_append_pair(front, "type", GCSPRINTF("%s", virtio->type));
- flexarray_append_pair(front, "transport", GCSPRINTF("%s", transport));
-
return 0;
}
--
2.31.1.272.g89b43f80a514
For generic virtio devices, where we don't need to add compatible or
other special DT properties, the type field is set to "virtio,device".
But this misses the case where the user sets the type with a valid
virtio device id as well, like "virtio,device1a" for file system device.
The complete list of virtio device ids is mentioned here:
https://docs.oasis-open.org/virtio/virtio/v1.2/cs01/virtio-v1.2-cs01.html#x…
Update documentation to support that as well.
Fixes: dd54ea500be8 ("docs: add documentation for generic virtio devices")
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
Reviewed-by: Oleksandr Tyshchenko <oleksandr_tyshchenko(a)epam.com>
---
V2->V3:
- Updated commit log and clarified / fixed doc.
- Tag from Oleksandr.
docs/man/xl.cfg.5.pod.in | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/docs/man/xl.cfg.5.pod.in b/docs/man/xl.cfg.5.pod.in
index 10f37990be57..24ac92718288 100644
--- a/docs/man/xl.cfg.5.pod.in
+++ b/docs/man/xl.cfg.5.pod.in
@@ -1608,8 +1608,11 @@ example, "type=virtio,device22" for the I2C device, whose device-tree binding is
L<https://www.kernel.org/doc/Documentation/devicetree/bindings/i2c/i2c-virtio…>
-For generic virtio devices, where we don't need to set special or compatible
-properties in the Device Tree, the type field must be set to "virtio,device".
+For other generic virtio devices, where we don't need to set special or
+compatible properties in the Device Tree, the type field must be set to
+"virtio,device" or "virtio,device<N>", where "N" is the virtio device id in
+hexadecimal format, without the "0x" prefix and all in lower case, like
+"virtio,device1a" for the file system device.
=item B<transport=STRING>
--
2.31.1.272.g89b43f80a514
Since the driver doesn't support interrupts, we must return early when
index is set to VIRTIO_CONFIG_IRQ_IDX.
Fixes: 544f0278afca ("virtio: introduce macro VIRTIO_CONFIG_IRQ_IDX")
Signed-off-by: Viresh Kumar <viresh.kumar(a)linaro.org>
---
V2: Simplify and fix comments.
hw/virtio/vhost-user-i2c.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index 60eaf0d95be0..4eef3f063376 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -128,6 +128,14 @@ static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
{
VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
+ /*
+ * We don't support interrupts, return early if index is set to
+ * VIRTIO_CONFIG_IRQ_IDX.
+ */
+ if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+ return;
+ }
+
vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask);
}
@@ -135,6 +143,14 @@ static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx)
{
VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
+ /*
+ * We don't support interrupts, return early if index is set to
+ * VIRTIO_CONFIG_IRQ_IDX.
+ */
+ if (idx == VIRTIO_CONFIG_IRQ_IDX) {
+ return false;
+ }
+
return vhost_virtqueue_pending(&i2c->vhost_dev, idx);
}
--
2.31.1.272.g89b43f80a514