diff options
Diffstat (limited to '0004-virt-vbox-Add-support-for-the-new-VBG_IOCTL_ACQUIRE_.patch')
-rw-r--r-- | 0004-virt-vbox-Add-support-for-the-new-VBG_IOCTL_ACQUIRE_.patch | 737 |
1 files changed, 737 insertions, 0 deletions
diff --git a/0004-virt-vbox-Add-support-for-the-new-VBG_IOCTL_ACQUIRE_.patch b/0004-virt-vbox-Add-support-for-the-new-VBG_IOCTL_ACQUIRE_.patch new file mode 100644 index 0000000..76c9fad --- /dev/null +++ b/0004-virt-vbox-Add-support-for-the-new-VBG_IOCTL_ACQUIRE_.patch @@ -0,0 +1,737 @@ +From 0fe1b29d2826794bfd880944496b433c80da0173 Mon Sep 17 00:00:00 2001 +From: "Jan Alexander Steffens (heftig)" <heftig@archlinux.org> +Date: Wed, 15 Jul 2020 22:42:07 +0200 +Subject: virt: vbox: Add support for the new + VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl + +https://lore.kernel.org/patchwork/cover/1270301/ + +Needed for https://bugs.archlinux.org/task/67253 + +Squashed commit of the following: + +commit 0d7bec556896815f4920a5ed87339732c697175c +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:58 2020 +0200 + + virt: vbox: Fix some comments which talk about the "session spinlock" + + The session lock is a mutex, not a spinlock, fix the comments to match. + + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit fed91350029e0c3bf626bd55a148e766587ec439 +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:57 2020 +0200 + + virt: vbox: Log unknown ioctl requests as error + + Every now and then upstream adds new ioctls without notifying us, + log unknown ioctl requests as an error to catch these. + + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit 245d06f4c1b8f84cde07d14c09296a4fe90a26f0 +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:56 2020 +0200 + + virt: vbox: Add a few new vmmdev request types to the userspace whitelist + + Upstream VirtualBox has defined and is using a few new request types for + vmmdev requests passed through /dev/vboxguest to the hypervisor. + + Add the defines for these to vbox_vmmdev_types.h and add add them to the + whitelists of vmmdev requests which userspace is allowed to make. + + BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545 + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit 2f24f5446915322bbbb1ccf4ee2297512feab942 +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:55 2020 +0200 + + virt: vbox: Add support for the new VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl + + Add support for the new VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES ioctl, this + is necessary for automatic resizing of the guest resolution to match the + VM-window size to work with the new VMSVGA virtual GPU which is now the + new default in VirtualBox. + + BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1789545 + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit 2fc9822453e8dd215765cd2b28e5e6e26338829e +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:54 2020 +0200 + + virt: vbox: Add vbg_set_host_capabilities() helper function + + Add vbg_set_host_capabilities() helper function, this is a preparation + patch for adding support for the VBGL_IOCTL_GUEST_CAPS_ACQUIRE ioctl. + + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit 1594daf929c8691849c00e49e4a1ed977048addc +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:53 2020 +0200 + + virt: vbox: Rename guest_caps struct members to set_guest_caps + + Rename guest_caps[_tracker] struct members to set_guest_caps[_tracker] + this is a preparation patch for adding support for the + VBGL_IOCTL_GUEST_CAPS_ACQUIRE ioctl. + + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit d7f3b7cec66d171cf5194ad37f647f4562da6ccc +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:52 2020 +0200 + + virt: vbox: Fix guest capabilities mask check + + Check the passed in capabilities against VMMDEV_GUEST_CAPABILITIES_MASK + instead of against VMMDEV_EVENT_VALID_EVENT_MASK. + This tightens the allowed mask from 0x7ff to 0x7. + + Fixes: 0ba002bc4393 ("virt: Add vboxguest driver for Virtual Box Guest integration") + Cc: stable@vger.kernel.org + Acked-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> + +commit f6694fa90e2183874def2d022d097795e2269a15 +Author: Hans de Goede <hdegoede@redhat.com> +Date: Thu Jul 9 14:08:51 2020 +0200 + + virt: vbox: Fix VBGL_IOCTL_VMMDEV_REQUEST_BIG and _LOG req numbers to match upstream + + Until this commit the mainline kernel version (this version) of the + vboxguest module contained a bug where it defined + VBGL_IOCTL_VMMDEV_REQUEST_BIG and VBGL_IOCTL_LOG using + _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead of + _IO(V, ...) as the out of tree VirtualBox upstream version does. + + Since the VirtualBox userspace bits are always built against VirtualBox + upstream's headers, this means that so far the mainline kernel version + of the vboxguest module has been failing these 2 ioctls with -ENOTTY. + I guess that VBGL_IOCTL_VMMDEV_REQUEST_BIG is never used causing us to + not hit that one and sofar the vboxguest driver has failed to actually + log any log messages passed it through VBGL_IOCTL_LOG. + + This commit changes the VBGL_IOCTL_VMMDEV_REQUEST_BIG and VBGL_IOCTL_LOG + defines to match the out of tree VirtualBox upstream vboxguest version, + while keeping compatibility with the old wrong request defines so as + to not break the kernel ABI in case someone has been using the old + request defines. + + Fixes: f6ddd094f579 ("virt: Add vboxguest driver for Virtual Box Guest integration UAPI") + Cc: stable@vger.kernel.org + Acked-by: Arnd Bergmann <arnd@arndb.de> + Reviewed-by: Arnd Bergmann <arnd@arndb.de> + Signed-off-by: Hans de Goede <hdegoede@redhat.com> +--- + drivers/virt/vboxguest/vboxguest_core.c | 272 +++++++++++++++++++++++++------ + drivers/virt/vboxguest/vboxguest_core.h | 38 ++++- + drivers/virt/vboxguest/vboxguest_linux.c | 3 +- + drivers/virt/vboxguest/vboxguest_utils.c | 1 + + drivers/virt/vboxguest/vmmdev.h | 2 + + include/linux/vbox_utils.h | 1 + + include/uapi/linux/vbox_vmmdev_types.h | 3 + + include/uapi/linux/vboxguest.h | 28 +++- + 8 files changed, 294 insertions(+), 54 deletions(-) + +diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c +index b690a8a4bf9e..0b43efddea22 100644 +--- a/drivers/virt/vboxguest/vboxguest_core.c ++++ b/drivers/virt/vboxguest/vboxguest_core.c +@@ -559,7 +559,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, + * Changes the event filter mask for the given session. + * + * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to +- * do session cleanup. Takes the session spinlock. ++ * do session cleanup. Takes the session mutex. + * + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. +@@ -662,7 +662,156 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) + } + + /** +- * Sets the guest capabilities for a session. Takes the session spinlock. ++ * Set guest capabilities on the host. ++ * Must be called with gdev->session_mutex hold. ++ * Return: 0 or negative errno value. ++ * @gdev: The Guest extension device. ++ * @session: The session. ++ * @session_termination: Set if we're called by the session cleanup code. ++ */ ++static int vbg_set_host_capabilities(struct vbg_dev *gdev, ++ struct vbg_session *session, ++ bool session_termination) ++{ ++ struct vmmdev_mask *req; ++ u32 caps; ++ int rc; ++ ++ WARN_ON(!mutex_is_locked(&gdev->session_mutex)); ++ ++ caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask; ++ ++ if (gdev->guest_caps_host == caps) ++ return 0; ++ ++ /* On termination the requestor is the kernel, as we're cleaning up. */ ++ req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, ++ session_termination ? VBG_KERNEL_REQUEST : ++ session->requestor); ++ if (!req) { ++ gdev->guest_caps_host = U32_MAX; ++ return -ENOMEM; ++ } ++ ++ req->or_mask = caps; ++ req->not_mask = ~caps; ++ rc = vbg_req_perform(gdev, req); ++ vbg_req_free(req, sizeof(*req)); ++ ++ gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX; ++ ++ return vbg_status_code_to_errno(rc); ++} ++ ++/** ++ * Acquire (get exclusive access) guest capabilities for a session. ++ * Takes the session mutex. ++ * Return: 0 or negative errno value. ++ * @gdev: The Guest extension device. ++ * @session: The session. ++ * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). ++ * @or_mask: The capabilities to add. ++ * @not_mask: The capabilities to remove. ++ * @session_termination: Set if we're called by the session cleanup code. ++ * This tweaks the error handling so we perform ++ * proper session cleanup even if the host ++ * misbehaves. ++ */ ++static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, ++ struct vbg_session *session, ++ u32 or_mask, u32 not_mask, ++ u32 flags, bool session_termination) ++{ ++ unsigned long irqflags; ++ bool wakeup = false; ++ int ret = 0; ++ ++ mutex_lock(&gdev->session_mutex); ++ ++ if (gdev->set_guest_caps_tracker.mask & or_mask) { ++ vbg_err("%s error: cannot acquire caps which are currently set\n", ++ __func__); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* ++ * Mark any caps in the or_mask as now being in acquire-mode. Note ++ * once caps are in acquire_mode they always stay in this mode. ++ * This impacts event handling, so we take the event-lock. ++ */ ++ spin_lock_irqsave(&gdev->event_spinlock, irqflags); ++ gdev->acquire_mode_guest_caps |= or_mask; ++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); ++ ++ /* If we only have to switch the caps to acquire mode, we're done. */ ++ if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE) ++ goto out; ++ ++ not_mask &= ~or_mask; /* or_mask takes priority over not_mask */ ++ not_mask &= session->acquired_guest_caps; ++ or_mask &= ~session->acquired_guest_caps; ++ ++ if (or_mask == 0 && not_mask == 0) ++ goto out; ++ ++ if (gdev->acquired_guest_caps & or_mask) { ++ ret = -EBUSY; ++ goto out; ++ } ++ ++ gdev->acquired_guest_caps |= or_mask; ++ gdev->acquired_guest_caps &= ~not_mask; ++ /* session->acquired_guest_caps impacts event handling, take the lock */ ++ spin_lock_irqsave(&gdev->event_spinlock, irqflags); ++ session->acquired_guest_caps |= or_mask; ++ session->acquired_guest_caps &= ~not_mask; ++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); ++ ++ ret = vbg_set_host_capabilities(gdev, session, session_termination); ++ /* Roll back on failure, unless it's session termination time. */ ++ if (ret < 0 && !session_termination) { ++ gdev->acquired_guest_caps &= ~or_mask; ++ gdev->acquired_guest_caps |= not_mask; ++ spin_lock_irqsave(&gdev->event_spinlock, irqflags); ++ session->acquired_guest_caps &= ~or_mask; ++ session->acquired_guest_caps |= not_mask; ++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); ++ } ++ ++ /* ++ * If we added a capability, check if that means some other thread in ++ * our session should be unblocked because there are events pending ++ * (the result of vbg_get_allowed_event_mask_for_session() may change). ++ * ++ * HACK ALERT! When the seamless support capability is added we generate ++ * a seamless change event so that the ring-3 client can sync with ++ * the seamless state. ++ */ ++ if (ret == 0 && or_mask != 0) { ++ spin_lock_irqsave(&gdev->event_spinlock, irqflags); ++ ++ if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS) ++ gdev->pending_events |= ++ VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; ++ ++ if (gdev->pending_events) ++ wakeup = true; ++ ++ spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); ++ ++ if (wakeup) ++ wake_up(&gdev->event_wq); ++ } ++ ++out: ++ mutex_unlock(&gdev->session_mutex); ++ ++ return ret; ++} ++ ++/** ++ * Sets the guest capabilities for a session. Takes the session mutex. + * Return: 0 or negative errno value. + * @gdev: The Guest extension device. + * @session: The session. +@@ -678,62 +827,40 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev, + u32 or_mask, u32 not_mask, + bool session_termination) + { +- struct vmmdev_mask *req; + u32 changed, previous; +- int rc, ret = 0; +- +- /* +- * Allocate a request buffer before taking the spinlock, when +- * the session is being terminated the requestor is the kernel, +- * as we're cleaning up. +- */ +- req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, +- session_termination ? VBG_KERNEL_REQUEST : +- session->requestor); +- if (!req) { +- if (!session_termination) +- return -ENOMEM; +- /* Ignore allocation failure, we must do session cleanup. */ +- } ++ int ret = 0; + + mutex_lock(&gdev->session_mutex); + ++ if (gdev->acquire_mode_guest_caps & or_mask) { ++ vbg_err("%s error: cannot set caps which are in acquire_mode\n", ++ __func__); ++ ret = -EBUSY; ++ goto out; ++ } ++ + /* Apply the changes to the session mask. */ +- previous = session->guest_caps; +- session->guest_caps |= or_mask; +- session->guest_caps &= ~not_mask; ++ previous = session->set_guest_caps; ++ session->set_guest_caps |= or_mask; ++ session->set_guest_caps &= ~not_mask; + + /* If anything actually changed, update the global usage counters. */ +- changed = previous ^ session->guest_caps; ++ changed = previous ^ session->set_guest_caps; + if (!changed) + goto out; + +- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); +- or_mask = gdev->guest_caps_tracker.mask; +- +- if (gdev->guest_caps_host == or_mask || !req) +- goto out; ++ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous); + +- gdev->guest_caps_host = or_mask; +- req->or_mask = or_mask; +- req->not_mask = ~or_mask; +- rc = vbg_req_perform(gdev, req); +- if (rc < 0) { +- ret = vbg_status_code_to_errno(rc); +- +- /* Failed, roll back (unless it's session termination time). */ +- gdev->guest_caps_host = U32_MAX; +- if (session_termination) +- goto out; +- +- vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, +- session->guest_caps); +- session->guest_caps = previous; ++ ret = vbg_set_host_capabilities(gdev, session, session_termination); ++ /* Roll back on failure, unless it's session termination time. */ ++ if (ret < 0 && !session_termination) { ++ vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, ++ session->set_guest_caps); ++ session->set_guest_caps = previous; + } + + out: + mutex_unlock(&gdev->session_mutex); +- vbg_req_free(req, sizeof(*req)); + + return ret; + } +@@ -949,6 +1076,7 @@ void vbg_core_close_session(struct vbg_session *session) + struct vbg_dev *gdev = session->gdev; + int i, rc; + ++ vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true); + vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); + vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); + +@@ -1006,6 +1134,25 @@ static int vbg_ioctl_driver_version_info( + return 0; + } + ++/* Must be called with the event_lock held */ ++static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, ++ struct vbg_session *session) ++{ ++ u32 acquire_mode_caps = gdev->acquire_mode_guest_caps; ++ u32 session_acquired_caps = session->acquired_guest_caps; ++ u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK; ++ ++ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) && ++ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)) ++ allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST; ++ ++ if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) && ++ !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)) ++ allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; ++ ++ return allowed_events; ++} ++ + static bool vbg_wait_event_cond(struct vbg_dev *gdev, + struct vbg_session *session, + u32 event_mask) +@@ -1017,6 +1164,7 @@ static bool vbg_wait_event_cond(struct vbg_dev *gdev, + spin_lock_irqsave(&gdev->event_spinlock, flags); + + events = gdev->pending_events & event_mask; ++ events &= vbg_get_allowed_event_mask_for_session(gdev, session); + wakeup = events || session->cancel_waiters; + + spin_unlock_irqrestore(&gdev->event_spinlock, flags); +@@ -1031,6 +1179,7 @@ static u32 vbg_consume_events_locked(struct vbg_dev *gdev, + { + u32 events = gdev->pending_events & event_mask; + ++ events &= vbg_get_allowed_event_mask_for_session(gdev, session); + gdev->pending_events &= ~events; + return events; + } +@@ -1150,7 +1299,9 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, + case VMMDEVREQ_VIDEO_ACCEL_ENABLE: + case VMMDEVREQ_VIDEO_ACCEL_FLUSH: + case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: ++ case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS: + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: ++ case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI: + case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: + case VMMDEVREQ_GET_VRDPCHANGE_REQ: + case VMMDEVREQ_LOG_STRING: +@@ -1432,6 +1583,29 @@ static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, + false); + } + ++static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev, ++ struct vbg_session *session, ++ struct vbg_ioctl_acquire_guest_caps *caps) ++{ ++ u32 flags, or_mask, not_mask; ++ ++ if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0)) ++ return -EINVAL; ++ ++ flags = caps->u.in.flags; ++ or_mask = caps->u.in.or_mask; ++ not_mask = caps->u.in.not_mask; ++ ++ if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) ++ return -EINVAL; ++ ++ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) ++ return -EINVAL; ++ ++ return vbg_acquire_session_capabilities(gdev, session, or_mask, ++ not_mask, flags, false); ++} ++ + static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, + struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) + { +@@ -1444,7 +1618,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, + or_mask = caps->u.in.or_mask; + not_mask = caps->u.in.not_mask; + +- if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) ++ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) + return -EINVAL; + + ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, +@@ -1452,7 +1626,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, + if (ret) + return ret; + +- caps->u.out.session_caps = session->guest_caps; ++ caps->u.out.session_caps = session->set_guest_caps; + caps->u.out.global_caps = gdev->guest_caps_host; + + return 0; +@@ -1520,7 +1694,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) + + /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ + if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || +- req == VBG_IOCTL_VMMDEV_REQUEST_BIG) ++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG || ++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT) + return vbg_ioctl_vmmrequest(gdev, session, data); + + if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) +@@ -1540,6 +1715,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) + return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); + case VBG_IOCTL_CHANGE_FILTER_MASK: + return vbg_ioctl_change_filter_mask(gdev, session, data); ++ case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES: ++ return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); + case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: + return vbg_ioctl_change_guest_capabilities(gdev, session, data); + case VBG_IOCTL_CHECK_BALLOON: +@@ -1558,10 +1735,11 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) + case VBG_IOCTL_HGCM_CALL(0): + return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); + case VBG_IOCTL_LOG(0): ++ case VBG_IOCTL_LOG_ALT(0): + return vbg_ioctl_log(data); + } + +- vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); ++ vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req); + return -ENOTTY; + } + +diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h +index 4188c12b839f..ab4bf64e2cec 100644 +--- a/drivers/virt/vboxguest/vboxguest_core.h ++++ b/drivers/virt/vboxguest/vboxguest_core.h +@@ -15,6 +15,21 @@ + #include <linux/vboxguest.h> + #include "vmmdev.h" + ++/* ++ * The mainline kernel version (this version) of the vboxguest module ++ * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and ++ * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead ++ * of _IO(V, ...) as the out of tree VirtualBox upstream version does. ++ * ++ * These _ALT definitions keep compatibility with the wrong defines the ++ * mainline kernel version used for a while. ++ * Note the VirtualBox userspace bits have always been built against ++ * VirtualBox upstream's headers, so this is likely not necessary. But ++ * we must never break our ABI so we keep these around to be 100% sure. ++ */ ++#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) ++#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) ++ + struct vbg_session; + + /** VBox guest memory balloon. */ +@@ -103,11 +118,21 @@ struct vbg_dev { + u32 event_filter_host; + + /** +- * Usage counters for guest capabilities. Indexed by capability bit ++ * Guest capabilities which have been switched to acquire_mode. ++ */ ++ u32 acquire_mode_guest_caps; ++ /** ++ * Guest capabilities acquired by vbg_acquire_session_capabilities(). ++ * Only one session can acquire a capability at a time. ++ */ ++ u32 acquired_guest_caps; ++ /** ++ * Usage counters for guest capabilities requested through ++ * vbg_set_session_capabilities(). Indexed by capability bit + * number, one count per session using a capability. + * Protected by session_mutex. + */ +- struct vbg_bit_usage_tracker guest_caps_tracker; ++ struct vbg_bit_usage_tracker set_guest_caps_tracker; + /** + * The guest capabilities last reported to the host (or UINT32_MAX). + * Protected by session_mutex. +@@ -149,11 +174,16 @@ struct vbg_session { + */ + u32 event_filter; + /** +- * Guest capabilities for this session. ++ * Guest capabilities acquired by vbg_acquire_session_capabilities(). ++ * Only one session can acquire a capability at a time. ++ */ ++ u32 acquired_guest_caps; ++ /** ++ * Guest capabilities set through vbg_set_session_capabilities(). + * A capability claimed by any guest session will be reported to the + * host. Protected by vbg_gdev.session_mutex. + */ +- u32 guest_caps; ++ u32 set_guest_caps; + /** VMMDEV_REQUESTOR_* flags */ + u32 requestor; + /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ +diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c +index 6e8c0f1c1056..32c2c52f7e84 100644 +--- a/drivers/virt/vboxguest/vboxguest_linux.c ++++ b/drivers/virt/vboxguest/vboxguest_linux.c +@@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, + * the need for a bounce-buffer and another copy later on. + */ + is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) || +- req == VBG_IOCTL_VMMDEV_REQUEST_BIG; ++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG || ++ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT; + + if (is_vmmdev_req) + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT, +diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c +index 7396187ee32a..ea05af41ec69 100644 +--- a/drivers/virt/vboxguest/vboxguest_utils.c ++++ b/drivers/virt/vboxguest/vboxguest_utils.c +@@ -59,6 +59,7 @@ EXPORT_SYMBOL(name) + VBG_LOG(vbg_info, pr_info); + VBG_LOG(vbg_warn, pr_warn); + VBG_LOG(vbg_err, pr_err); ++VBG_LOG(vbg_err_ratelimited, pr_err_ratelimited); + #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) + VBG_LOG(vbg_debug, pr_debug); + #endif +diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h +index 6337b8d75d96..21f408120e3f 100644 +--- a/drivers/virt/vboxguest/vmmdev.h ++++ b/drivers/virt/vboxguest/vmmdev.h +@@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8); + * not. + */ + #define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2) ++/* The mask of valid capabilities, for sanity checking. */ ++#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U + + /** struct vmmdev_hypervisorinfo - Hypervisor info structure. */ + struct vmmdev_hypervisorinfo { +diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h +index ff56c443180c..db8a7d118093 100644 +--- a/include/linux/vbox_utils.h ++++ b/include/linux/vbox_utils.h +@@ -16,6 +16,7 @@ struct vbg_dev; + __printf(1, 2) void vbg_info(const char *fmt, ...); + __printf(1, 2) void vbg_warn(const char *fmt, ...); + __printf(1, 2) void vbg_err(const char *fmt, ...); ++__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...); + + /* Only use backdoor logging for non-dynamic debug builds */ + #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) +diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h +index c27289fd619a..f8a8d6b3c521 100644 +--- a/include/uapi/linux/vbox_vmmdev_types.h ++++ b/include/uapi/linux/vbox_vmmdev_types.h +@@ -63,6 +63,7 @@ enum vmmdev_request_type { + VMMDEVREQ_SET_GUEST_CAPABILITIES = 56, + VMMDEVREQ_VIDEMODE_SUPPORTED2 = 57, /* since version 3.2.0 */ + VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX = 80, /* since version 4.2.4 */ ++ VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI = 81, + VMMDEVREQ_HGCM_CONNECT = 60, + VMMDEVREQ_HGCM_DISCONNECT = 61, + VMMDEVREQ_HGCM_CALL32 = 62, +@@ -92,6 +93,8 @@ enum vmmdev_request_type { + VMMDEVREQ_WRITE_COREDUMP = 218, + VMMDEVREQ_GUEST_HEARTBEAT = 219, + VMMDEVREQ_HEARTBEAT_CONFIGURE = 220, ++ VMMDEVREQ_NT_BUG_CHECK = 221, ++ VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS = 222, + /* Ensure the enum is a 32 bit data-type */ + VMMDEVREQ_SIZEHACK = 0x7fffffff + }; +diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h +index 9cec58a6a5ea..15125f6ec60d 100644 +--- a/include/uapi/linux/vboxguest.h ++++ b/include/uapi/linux/vboxguest.h +@@ -103,7 +103,7 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_driver_version_info, 24 + 20); + + + /* IOCTL to perform a VMM Device request larger then 1KB. */ +-#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0) ++#define VBG_IOCTL_VMMDEV_REQUEST_BIG _IO('V', 3) + + + /** VBG_IOCTL_HGCM_CONNECT data structure. */ +@@ -198,7 +198,7 @@ struct vbg_ioctl_log { + } u; + }; + +-#define VBG_IOCTL_LOG(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s) ++#define VBG_IOCTL_LOG(s) _IO('V', 9) + + + /** VBG_IOCTL_WAIT_FOR_EVENTS data structure. */ +@@ -257,6 +257,30 @@ VMMDEV_ASSERT_SIZE(vbg_ioctl_change_filter, 24 + 8); + _IOWR('V', 12, struct vbg_ioctl_change_filter) + + ++/** VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES data structure. */ ++struct vbg_ioctl_acquire_guest_caps { ++ /** The header. */ ++ struct vbg_ioctl_hdr hdr; ++ union { ++ struct { ++ /** Flags (VBGL_IOC_AGC_FLAGS_XXX). */ ++ __u32 flags; ++ /** Capabilities to set (VMMDEV_GUEST_SUPPORTS_XXX). */ ++ __u32 or_mask; ++ /** Capabilities to drop (VMMDEV_GUEST_SUPPORTS_XXX). */ ++ __u32 not_mask; ++ } in; ++ } u; ++}; ++VMMDEV_ASSERT_SIZE(vbg_ioctl_acquire_guest_caps, 24 + 12); ++ ++#define VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE 0x00000001 ++#define VBGL_IOC_AGC_FLAGS_VALID_MASK 0x00000001 ++ ++#define VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES \ ++ _IOWR('V', 13, struct vbg_ioctl_acquire_guest_caps) ++ ++ + /** VBG_IOCTL_CHANGE_GUEST_CAPABILITIES data structure. */ + struct vbg_ioctl_set_guest_caps { + /** The header. */ +-- +cgit v1.2.3-1-gf6bb5 + |