xen: features and fixes for 4.21

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRTLbB6QfY48x44uB6AXGG7T9hjvgUCXBvKlAAKCRCAXGG7T9hj
 vmIoAP0XpLCE+0Z1hhxcDcJ0hKah1NIniRSIGGr6Af+gxe8F4wEA0Vm55gtEZerU
 9mL5S7e2EcuTo93XCIjsxU8uPLGtegQ=
 =59wi
 -----END PGP SIGNATURE-----

Merge tag 'for-linus-4.21-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
 "Xen features and fixes:

   - a series to enable KVM guests to be booted by qemu via the Xen PVH
     boot entry for speeding up KVM guest tests

   - a series for a common driver to be used by Xen PV frontends (right
     now drm and sound)

   - two other fixes in Xen related code"

* tag 'for-linus-4.21-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  ALSA: xen-front: Use Xen common shared buffer implementation
  drm/xen-front: Use Xen common shared buffer implementation
  xen: Introduce shared buffer helpers for page directory...
  xen/pciback: Check dev_data before using it
  kprobes/x86/xen: blacklist non-attachable xen interrupt functions
  KVM: x86: Allow Qemu/KVM to use PVH entry point
  xen/pvh: Add memory map pointer to hvm_start_info struct
  xen/pvh: Move Xen code for getting mem map via hcall out of common file
  xen/pvh: Move Xen specific PVH VM initialization out of common file
  xen/pvh: Create a new file for Xen specific PVH code
  xen/pvh: Move PVH entry code out of Xen specific tree
  xen/pvh: Split CONFIG_XEN_PVH into CONFIG_PVH and CONFIG_XEN_PVH
This commit is contained in:
Linus Torvalds 2018-12-26 11:35:07 -08:00
commit 460023a5d1
31 changed files with 998 additions and 864 deletions

View File

@ -16590,6 +16590,7 @@ L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
F: arch/x86/xen/
F: arch/x86/platform/pvh/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/

View File

@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_PVH) += platform/pvh/
# Hyper-V paravirtualization support
obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/

View File

@ -796,6 +796,12 @@ config KVM_GUEST
underlying device model, the host provides the guest with
timing infrastructure such as time of day, and system time
config PVH
bool "Support for running PVH guests"
---help---
This option enables the PVH entry point for guest virtual machines
as specified in the x86/HVM direct boot ABI.
config KVM_DEBUG_FS
bool "Enable debug information for KVM Guests in debugfs"
depends on KVM_GUEST && DEBUG_FS

View File

@ -386,7 +386,7 @@ NEXT_PAGE(early_dynamic_pgts)
.data
#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
NEXT_PGD_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + L4_PAGE_OFFSET*8, 0

View File

@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_head.o := y
obj-$(CONFIG_PVH) += enlighten.o
obj-$(CONFIG_PVH) += head.o

View File

@ -0,0 +1,137 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/acpi.h>
#include <xen/hvc-console.h>
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/x86_init.h>
#include <asm/xen/interface.h>
#include <xen/xen.h>
#include <xen/interface/hvm/start_info.h>
/*
* PVH variables.
*
* pvh_bootparams and pvh_start_info need to live in the data segment since
* they are used after startup_{32|64}, which clear .bss, are invoked.
*/
struct boot_params pvh_bootparams __attribute__((section(".data")));
struct hvm_start_info pvh_start_info __attribute__((section(".data")));
unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
static u64 pvh_get_root_pointer(void)
{
return pvh_start_info.rsdp_paddr;
}
/*
* Xen guests are able to obtain the memory map from the hypervisor via the
* HYPERVISOR_memory_op hypercall.
* If we are trying to boot a Xen PVH guest, it is expected that the kernel
* will have been configured to provide an override for this routine to do
* just that.
*/
void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
{
xen_raw_printk("Error: Could not find memory map\n");
BUG();
}
static void __init init_pvh_bootparams(bool xen_guest)
{
memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
struct hvm_memmap_table_entry *ep;
int i;
ep = __va(pvh_start_info.memmap_paddr);
pvh_bootparams.e820_entries = pvh_start_info.memmap_entries;
for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) {
pvh_bootparams.e820_table[i].addr = ep->addr;
pvh_bootparams.e820_table[i].size = ep->size;
pvh_bootparams.e820_table[i].type = ep->type;
}
} else if (xen_guest) {
mem_map_via_hcall(&pvh_bootparams);
} else {
/* Non-xen guests are not supported by version 0 */
BUG();
}
if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
ISA_START_ADDRESS;
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
ISA_END_ADDRESS - ISA_START_ADDRESS;
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
E820_TYPE_RESERVED;
pvh_bootparams.e820_entries++;
} else
xen_raw_printk("Warning: Can fit ISA range into e820\n");
pvh_bootparams.hdr.cmd_line_ptr =
pvh_start_info.cmdline_paddr;
/* The first module is always ramdisk. */
if (pvh_start_info.nr_modules) {
struct hvm_modlist_entry *modaddr =
__va(pvh_start_info.modlist_paddr);
pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
pvh_bootparams.hdr.ramdisk_size = modaddr->size;
}
/*
* See Documentation/x86/boot.txt.
*
* Version 2.12 supports Xen entry point but we will use default x86/PC
* environment (i.e. hardware_subarch 0).
*/
pvh_bootparams.hdr.version = (2 << 8) | 12;
pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0;
x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
}
/*
* If we are trying to boot a Xen PVH guest, it is expected that the kernel
* will have been configured to provide the required override for this routine.
*/
void __init __weak xen_pvh_init(void)
{
xen_raw_printk("Error: Missing xen PVH initialization\n");
BUG();
}
static void hypervisor_specific_init(bool xen_guest)
{
if (xen_guest)
xen_pvh_init();
}
/*
* This routine (and those that it might call) should not use
* anything that lives in .bss since that segment will be cleared later.
*/
void __init xen_prepare_pvh(void)
{
u32 msr = xen_cpuid_base();
bool xen_guest = !!msr;
if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
pvh_start_info.magic);
BUG();
}
hypervisor_specific_init(xen_guest);
init_pvh_bootparams(xen_guest);
}

View File

@ -74,6 +74,7 @@ config XEN_DEBUG_FS
Enabling this option may incur a significant performance overhead.
config XEN_PVH
bool "Support for running as a PVH guest"
bool "Support for running as a Xen PVH guest"
depends on XEN && XEN_PVHVM && ACPI
select PVH
def_bool n

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@ -38,7 +37,6 @@ obj-$(CONFIG_XEN_PV) += xen-asm.o
obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
obj-$(CONFIG_XEN_PVH) += xen-pvh.o
obj-$(CONFIG_EVENT_TRACING) += trace.o

View File

@ -6,103 +6,45 @@
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/x86_init.h>
#include <xen/xen.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/start_info.h>
/*
* PVH variables.
*
* xen_pvh pvh_bootparams and pvh_start_info need to live in data segment
* since they are used after startup_{32|64}, which clear .bss, are invoked.
* The variable xen_pvh needs to live in the data segment since it is used
* after startup_{32|64} is invoked, which will clear the .bss segment.
*/
bool xen_pvh __attribute__((section(".data"))) = 0;
struct boot_params pvh_bootparams __attribute__((section(".data")));
struct hvm_start_info pvh_start_info __attribute__((section(".data")));
unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
static u64 pvh_get_root_pointer(void)
{
return pvh_start_info.rsdp_paddr;
}
static void __init init_pvh_bootparams(void)
{
struct xen_memory_map memmap;
int rc;
memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
pvh_bootparams.e820_entries = memmap.nr_entries;
if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
ISA_START_ADDRESS;
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
ISA_END_ADDRESS - ISA_START_ADDRESS;
pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
E820_TYPE_RESERVED;
pvh_bootparams.e820_entries++;
} else
xen_raw_printk("Warning: Can fit ISA range into e820\n");
pvh_bootparams.hdr.cmd_line_ptr =
pvh_start_info.cmdline_paddr;
/* The first module is always ramdisk. */
if (pvh_start_info.nr_modules) {
struct hvm_modlist_entry *modaddr =
__va(pvh_start_info.modlist_paddr);
pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
pvh_bootparams.hdr.ramdisk_size = modaddr->size;
}
/*
* See Documentation/x86/boot.txt.
*
* Version 2.12 supports Xen entry point but we will use default x86/PC
* environment (i.e. hardware_subarch 0).
*/
pvh_bootparams.hdr.version = (2 << 8) | 12;
pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
}
/*
* This routine (and those that it might call) should not use
* anything that lives in .bss since that segment will be cleared later.
*/
void __init xen_prepare_pvh(void)
void __init xen_pvh_init(void)
{
u32 msr;
u64 pfn;
if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
pvh_start_info.magic);
BUG();
}
xen_pvh = 1;
xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
init_pvh_bootparams();
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
{
struct xen_memory_map memmap;
int rc;
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
boot_params_p->e820_entries = memmap.nr_entries;
}

View File

@ -12,6 +12,7 @@
#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/asm.h>
#include <xen/interface/xen.h>
@ -24,6 +25,7 @@ ENTRY(xen_\name)
pop %r11
jmp \name
END(xen_\name)
_ASM_NOKPROBE(xen_\name)
.endm
xen_pv_trap divide_error

View File

@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.

View File

@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_kms.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o

View File

@ -19,6 +19,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
@ -26,28 +27,20 @@
#include "xen_drm_front_evtchnl.h"
#include "xen_drm_front_gem.h"
#include "xen_drm_front_kms.h"
#include "xen_drm_front_shbuf.h"
struct xen_drm_front_dbuf {
struct list_head list;
u64 dbuf_cookie;
u64 fb_cookie;
struct xen_drm_front_shbuf *shbuf;
struct xen_front_pgdir_shbuf shbuf;
};
static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *dbuf;
dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
dbuf->dbuf_cookie = dbuf_cookie;
dbuf->shbuf = shbuf;
list_add(&dbuf->list, &front_info->dbuf_list);
return 0;
}
static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
return NULL;
}
static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->fb_cookie == fb_cookie)
xen_drm_front_shbuf_flush(buf->shbuf);
}
static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->dbuf_cookie == dbuf_cookie) {
list_del(&buf->list);
xen_drm_front_shbuf_unmap(buf->shbuf);
xen_drm_front_shbuf_free(buf->shbuf);
xen_front_pgdir_shbuf_unmap(&buf->shbuf);
xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
break;
}
@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
list_for_each_entry_safe(buf, q, dbuf_list, list) {
list_del(&buf->list);
xen_drm_front_shbuf_unmap(buf->shbuf);
xen_drm_front_shbuf_free(buf->shbuf);
xen_front_pgdir_shbuf_unmap(&buf->shbuf);
xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
}
}
@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u32 bpp, u64 size, struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
struct xen_drm_front_shbuf *shbuf;
struct xen_drm_front_dbuf *dbuf;
struct xendispl_req *req;
struct xen_drm_front_shbuf_cfg buf_cfg;
struct xen_front_pgdir_shbuf_cfg buf_cfg;
unsigned long flags;
int ret;
@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (unlikely(!evtchnl))
return -EIO;
dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
if (!dbuf)
return -ENOMEM;
dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
buf_cfg.pages = pages;
buf_cfg.size = size;
buf_cfg.pgdir = &dbuf->shbuf;
buf_cfg.be_alloc = front_info->cfg.be_alloc;
shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
if (IS_ERR(shbuf))
return PTR_ERR(shbuf);
ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
if (ret < 0) {
xen_drm_front_shbuf_free(shbuf);
return ret;
}
ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
if (ret < 0)
goto fail_shbuf_alloc;
mutex_lock(&evtchnl->u.req.req_io_lock);
spin_lock_irqsave(&front_info->io_lock, flags);
req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req->op.dbuf_create.gref_directory =
xen_drm_front_shbuf_get_dir_start(shbuf);
xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (ret < 0)
goto fail;
ret = xen_drm_front_shbuf_map(shbuf);
ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
if (ret < 0)
goto fail;
@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
fail:
mutex_unlock(&evtchnl->u.req.req_io_lock);
fail_shbuf_alloc:
dbuf_free(&front_info->dbuf_list, dbuf_cookie);
return ret;
}
@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
if (unlikely(conn_idx >= front_info->num_evt_pairs))
return -EINVAL;
dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
evtchnl = &front_info->evt_pairs[conn_idx].req;
mutex_lock(&evtchnl->u.req.req_io_lock);

View File

@ -22,7 +22,6 @@
#include <xen/balloon.h>
#include "xen_drm_front.h"
#include "xen_drm_front_shbuf.h"
struct xen_gem_object {
struct drm_gem_object base;

View File

@ -1,414 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual DRM device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#include <drm/drmP.h>
#if defined(CONFIG_X86)
#include <drm/drm_cache.h>
#endif
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/xen/hypervisor.h>
#include <xen/balloon.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/interface/io/ring.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
#include "xen_drm_front_shbuf.h"
struct xen_drm_front_shbuf_ops {
/*
* Calculate number of grefs required to handle this buffer,
* e.g. if grefs are required for page directory only or the buffer
* pages as well.
*/
void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
/* Fill page directory according to para-virtual display protocol. */
void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
/* Claim grant references for the pages of the buffer. */
int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
grant_ref_t *priv_gref_head, int gref_idx);
/* Map grant references of the buffer. */
int (*map)(struct xen_drm_front_shbuf *buf);
/* Unmap grant references of the buffer. */
int (*unmap)(struct xen_drm_front_shbuf *buf);
};
grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
{
if (!buf->grefs)
return GRANT_INVALID_REF;
return buf->grefs[0];
}
int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
{
if (buf->ops->map)
return buf->ops->map(buf);
/* no need to map own grant references */
return 0;
}
int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
{
if (buf->ops->unmap)
return buf->ops->unmap(buf);
/* no need to unmap own grant references */
return 0;
}
void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
{
#if defined(CONFIG_X86)
drm_clflush_pages(buf->pages, buf->num_pages);
#endif
}
void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
{
if (buf->grefs) {
int i;
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != GRANT_INVALID_REF)
gnttab_end_foreign_access(buf->grefs[i],
0, 0UL);
}
kfree(buf->grefs);
kfree(buf->directory);
kfree(buf);
}
/*
* number of grefs a page can hold with respect to the
* struct xendispl_page_directory header
*/
#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
offsetof(struct xendispl_page_directory, gref)) / \
sizeof(grant_ref_t))
static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
{
/* number of pages the page directory consumes itself */
return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
}
static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
{
/* only for pages the page directory consumes itself */
buf->num_grefs = get_num_pages_dir(buf);
}
static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
{
/*
* number of pages the page directory consumes itself
* plus grefs for the buffer pages
*/
buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
}
#define xen_page_to_vaddr(page) \
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
static int backend_unmap(struct xen_drm_front_shbuf *buf)
{
struct gnttab_unmap_grant_ref *unmap_ops;
int i, ret;
if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
return 0;
unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
GFP_KERNEL);
if (!unmap_ops) {
DRM_ERROR("Failed to get memory while unmapping\n");
return -ENOMEM;
}
for (i = 0; i < buf->num_pages; i++) {
phys_addr_t addr;
addr = xen_page_to_vaddr(buf->pages[i]);
gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
buf->backend_map_handles[i]);
}
ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
buf->num_pages);
for (i = 0; i < buf->num_pages; i++) {
if (unlikely(unmap_ops[i].status != GNTST_okay))
DRM_ERROR("Failed to unmap page %d: %d\n",
i, unmap_ops[i].status);
}
if (ret)
DRM_ERROR("Failed to unmap grant references, ret %d", ret);
kfree(unmap_ops);
kfree(buf->backend_map_handles);
buf->backend_map_handles = NULL;
return ret;
}
static int backend_map(struct xen_drm_front_shbuf *buf)
{
struct gnttab_map_grant_ref *map_ops = NULL;
unsigned char *ptr;
int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
if (!map_ops)
return -ENOMEM;
buf->backend_map_handles = kcalloc(buf->num_pages,
sizeof(*buf->backend_map_handles),
GFP_KERNEL);
if (!buf->backend_map_handles) {
kfree(map_ops);
return -ENOMEM;
}
/*
* read page directory to get grefs from the backend: for external
* buffer we only allocate buf->grefs for the page directory,
* so buf->num_grefs has number of pages in the page directory itself
*/
ptr = buf->directory;
grefs_left = buf->num_pages;
cur_page = 0;
for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
struct xendispl_page_directory *page_dir =
(struct xendispl_page_directory *)ptr;
int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
if (to_copy > grefs_left)
to_copy = grefs_left;
for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
phys_addr_t addr;
addr = xen_page_to_vaddr(buf->pages[cur_page]);
gnttab_set_map_op(&map_ops[cur_page], addr,
GNTMAP_host_map,
page_dir->gref[cur_gref],
buf->xb_dev->otherend_id);
cur_page++;
}
grefs_left -= to_copy;
ptr += PAGE_SIZE;
}
ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
/* save handles even if error, so we can unmap */
for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
if (unlikely(map_ops[cur_page].status != GNTST_okay))
DRM_ERROR("Failed to map page %d: %d\n",
cur_page, map_ops[cur_page].status);
}
if (ret) {
DRM_ERROR("Failed to map grant references, ret %d", ret);
backend_unmap(buf);
}
kfree(map_ops);
return ret;
}
static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
{
struct xendispl_page_directory *page_dir;
unsigned char *ptr;
int i, num_pages_dir;
ptr = buf->directory;
num_pages_dir = get_num_pages_dir(buf);
/* fill only grefs for the page directory itself */
for (i = 0; i < num_pages_dir - 1; i++) {
page_dir = (struct xendispl_page_directory *)ptr;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
ptr += PAGE_SIZE;
}
/* last page must say there is no more pages */
page_dir = (struct xendispl_page_directory *)ptr;
page_dir->gref_dir_next_page = GRANT_INVALID_REF;
}
static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
{
unsigned char *ptr;
int cur_gref, grefs_left, to_copy, i, num_pages_dir;
ptr = buf->directory;
num_pages_dir = get_num_pages_dir(buf);
/*
* while copying, skip grefs at start, they are for pages
* granted for the page directory itself
*/
cur_gref = num_pages_dir;
grefs_left = buf->num_pages;
for (i = 0; i < num_pages_dir; i++) {
struct xendispl_page_directory *page_dir =
(struct xendispl_page_directory *)ptr;
if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
to_copy = grefs_left;
page_dir->gref_dir_next_page = GRANT_INVALID_REF;
} else {
to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
}
memcpy(&page_dir->gref, &buf->grefs[cur_gref],
to_copy * sizeof(grant_ref_t));
ptr += PAGE_SIZE;
grefs_left -= to_copy;
cur_gref += to_copy;
}
}
static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
grant_ref_t *priv_gref_head,
int gref_idx)
{
int i, cur_ref, otherend_id;
otherend_id = buf->xb_dev->otherend_id;
for (i = 0; i < buf->num_pages; i++) {
cur_ref = gnttab_claim_grant_reference(priv_gref_head);
if (cur_ref < 0)
return cur_ref;
gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
xen_page_to_gfn(buf->pages[i]),
0);
buf->grefs[gref_idx++] = cur_ref;
}
return 0;
}
static int grant_references(struct xen_drm_front_shbuf *buf)
{
grant_ref_t priv_gref_head;
int ret, i, j, cur_ref;
int otherend_id, num_pages_dir;
ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
if (ret < 0) {
DRM_ERROR("Cannot allocate grant references\n");
return ret;
}
otherend_id = buf->xb_dev->otherend_id;
j = 0;
num_pages_dir = get_num_pages_dir(buf);
for (i = 0; i < num_pages_dir; i++) {
unsigned long frame;
cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
if (cur_ref < 0)
return cur_ref;
frame = xen_page_to_gfn(virt_to_page(buf->directory +
PAGE_SIZE * i));
gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
buf->grefs[j++] = cur_ref;
}
if (buf->ops->grant_refs_for_buffer) {
ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
if (ret)
return ret;
}
gnttab_free_grant_references(priv_gref_head);
return 0;
}
static int alloc_storage(struct xen_drm_front_shbuf *buf)
{
buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
if (!buf->grefs)
return -ENOMEM;
buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
if (!buf->directory)
return -ENOMEM;
return 0;
}
/*
* For be allocated buffers we don't need grant_refs_for_buffer as those
* grant references are allocated at backend side
*/
static const struct xen_drm_front_shbuf_ops backend_ops = {
.calc_num_grefs = backend_calc_num_grefs,
.fill_page_dir = backend_fill_page_dir,
.map = backend_map,
.unmap = backend_unmap
};
/* For locally granted references we do not need to map/unmap the references */
static const struct xen_drm_front_shbuf_ops local_ops = {
.calc_num_grefs = guest_calc_num_grefs,
.fill_page_dir = guest_fill_page_dir,
.grant_refs_for_buffer = guest_grant_refs_for_buffer,
};
struct xen_drm_front_shbuf *
xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
{
struct xen_drm_front_shbuf *buf;
int ret;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
if (cfg->be_alloc)
buf->ops = &backend_ops;
else
buf->ops = &local_ops;
buf->xb_dev = cfg->xb_dev;
buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
buf->pages = cfg->pages;
buf->ops->calc_num_grefs(buf);
ret = alloc_storage(buf);
if (ret)
goto fail;
ret = grant_references(buf);
if (ret)
goto fail;
buf->ops->fill_page_dir(buf);
return buf;
fail:
xen_drm_front_shbuf_free(buf);
return ERR_PTR(ret);
}

View File

@ -1,64 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Xen para-virtual DRM device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#ifndef __XEN_DRM_FRONT_SHBUF_H_
#define __XEN_DRM_FRONT_SHBUF_H_
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <xen/grant_table.h>
struct xen_drm_front_shbuf {
/*
* number of references granted for the backend use:
* - for allocated/imported dma-buf's this holds number of grant
* references for the page directory and pages of the buffer
* - for the buffer provided by the backend this holds number of
* grant references for the page directory as grant references for
* the buffer will be provided by the backend
*/
int num_grefs;
grant_ref_t *grefs;
unsigned char *directory;
int num_pages;
struct page **pages;
struct xenbus_device *xb_dev;
/* these are the ops used internally depending on be_alloc mode */
const struct xen_drm_front_shbuf_ops *ops;
/* Xen map handles for the buffer allocated by the backend */
grant_handle_t *backend_map_handles;
};
struct xen_drm_front_shbuf_cfg {
struct xenbus_device *xb_dev;
size_t size;
struct page **pages;
bool be_alloc;
};
struct xen_drm_front_shbuf *
xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
#endif /* __XEN_DRM_FRONT_SHBUF_H_ */

View File

@ -340,4 +340,7 @@ config XEN_SYMS
config XEN_HAVE_VPMU
bool
config XEN_FRONT_PGDIR_SHBUF
tristate
endmenu

View File

@ -44,3 +44,4 @@ xen-gntdev-y := gntdev.o
xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o

View File

@ -0,0 +1,553 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen frontend/backend page directory based shared buffer
* helper module.
*
* Copyright (C) 2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/xen/hypervisor.h>
#include <xen/balloon.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/interface/io/ring.h>
#include <xen/xen-front-pgdir-shbuf.h>
#ifndef GRANT_INVALID_REF
/*
* FIXME: usage of grant reference 0 as invalid grant reference:
* grant reference 0 is valid, but never exposed to a PV driver,
* because of the fact it is already in use/reserved by the PV console.
*/
#define GRANT_INVALID_REF 0
#endif
/**
* This structure represents the structure of a shared page
* that contains grant references to the pages of the shared
* buffer. This structure is common to many Xen para-virtualized
* protocols at include/xen/interface/io/
*/
struct xen_page_directory {
grant_ref_t gref_dir_next_page;
grant_ref_t gref[1]; /* Variable length */
};
/**
* Shared buffer ops which are differently implemented
* depending on the allocation mode, e.g. if the buffer
* is allocated by the corresponding backend or frontend.
* Some of the operations.
*/
struct xen_front_pgdir_shbuf_ops {
/*
* Calculate number of grefs required to handle this buffer,
* e.g. if grefs are required for page directory only or the buffer
* pages as well.
*/
void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
/* Fill page directory according to para-virtual display protocol. */
void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
/* Claim grant references for the pages of the buffer. */
int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
grant_ref_t *priv_gref_head, int gref_idx);
/* Map grant references of the buffer. */
int (*map)(struct xen_front_pgdir_shbuf *buf);
/* Unmap grant references of the buffer. */
int (*unmap)(struct xen_front_pgdir_shbuf *buf);
};
/**
* Get granted reference to the very first page of the
* page directory. Usually this is passed to the backend,
* so it can find/fill the grant references to the buffer's
* pages.
*
* \param buf shared buffer which page directory is of interest.
* \return granted reference to the very first page of the
* page directory.
*/
grant_ref_t
xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
{
if (!buf->grefs)
return GRANT_INVALID_REF;
return buf->grefs[0];
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
/**
* Map granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
* (be_alloc flag) this can either do nothing (for buffers
* shared by the frontend itself) or map the provided granted
* references onto the backing storage (buf->pages).
*
* \param buf shared buffer which grants to be maped.
* \return zero on success or a negative number on failure.
*/
int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
{
if (buf->ops && buf->ops->map)
return buf->ops->map(buf);
/* No need to map own grant references. */
return 0;
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
/**
* Unmap granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
* (be_alloc flag) this can either do nothing (for buffers
* shared by the frontend itself) or unmap the provided granted
* references.
*
* \param buf shared buffer which grants to be unmaped.
* \return zero on success or a negative number on failure.
*/
int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
{
if (buf->ops && buf->ops->unmap)
return buf->ops->unmap(buf);
/* No need to unmap own grant references. */
return 0;
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
/**
* Free all the resources of the shared buffer.
*
* \param buf shared buffer which resources to be freed.
*/
void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
{
if (buf->grefs) {
int i;
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != GRANT_INVALID_REF)
gnttab_end_foreign_access(buf->grefs[i],
0, 0UL);
}
kfree(buf->grefs);
kfree(buf->directory);
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
/*
* Number of grefs a page can hold with respect to the
* struct xen_page_directory header.
*/
#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
offsetof(struct xen_page_directory, \
gref)) / sizeof(grant_ref_t))
/**
* Get the number of pages the page directory consumes itself.
*
* \param buf shared buffer.
*/
static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
{
return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
}
/**
* Calculate the number of grant references needed to share the buffer
* and its pages when backend allocates the buffer.
*
* \param buf shared buffer.
*/
static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
{
/* Only for pages the page directory consumes itself. */
buf->num_grefs = get_num_pages_dir(buf);
}
/**
* Calculate the number of grant references needed to share the buffer
* and its pages when frontend allocates the buffer.
*
* \param buf shared buffer.
*/
static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
{
/*
* Number of pages the page directory consumes itself
* plus grefs for the buffer pages.
*/
buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
}
#define xen_page_to_vaddr(page) \
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
/**
* Unmap the buffer previously mapped with grant references
* provided by the backend.
*
* \param buf shared buffer.
* \return zero on success or a negative number on failure.
*/
static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
{
struct gnttab_unmap_grant_ref *unmap_ops;
int i, ret;
if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
return 0;
unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
GFP_KERNEL);
if (!unmap_ops)
return -ENOMEM;
for (i = 0; i < buf->num_pages; i++) {
phys_addr_t addr;
addr = xen_page_to_vaddr(buf->pages[i]);
gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
buf->backend_map_handles[i]);
}
ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
buf->num_pages);
for (i = 0; i < buf->num_pages; i++) {
if (unlikely(unmap_ops[i].status != GNTST_okay))
dev_err(&buf->xb_dev->dev,
"Failed to unmap page %d: %d\n",
i, unmap_ops[i].status);
}
if (ret)
dev_err(&buf->xb_dev->dev,
"Failed to unmap grant references, ret %d", ret);
kfree(unmap_ops);
kfree(buf->backend_map_handles);
buf->backend_map_handles = NULL;
return ret;
}
/**
* Map the buffer with grant references provided by the backend.
*
* \param buf shared buffer.
* \return zero on success or a negative number on failure.
*/
static int backend_map(struct xen_front_pgdir_shbuf *buf)
{
struct gnttab_map_grant_ref *map_ops = NULL;
unsigned char *ptr;
int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
if (!map_ops)
return -ENOMEM;
buf->backend_map_handles = kcalloc(buf->num_pages,
sizeof(*buf->backend_map_handles),
GFP_KERNEL);
if (!buf->backend_map_handles) {
kfree(map_ops);
return -ENOMEM;
}
/*
* Read page directory to get grefs from the backend: for external
* buffer we only allocate buf->grefs for the page directory,
* so buf->num_grefs has number of pages in the page directory itself.
*/
ptr = buf->directory;
grefs_left = buf->num_pages;
cur_page = 0;
for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
struct xen_page_directory *page_dir =
(struct xen_page_directory *)ptr;
int to_copy = XEN_NUM_GREFS_PER_PAGE;
if (to_copy > grefs_left)
to_copy = grefs_left;
for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
phys_addr_t addr;
addr = xen_page_to_vaddr(buf->pages[cur_page]);
gnttab_set_map_op(&map_ops[cur_page], addr,
GNTMAP_host_map,
page_dir->gref[cur_gref],
buf->xb_dev->otherend_id);
cur_page++;
}
grefs_left -= to_copy;
ptr += PAGE_SIZE;
}
ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
/* Save handles even if error, so we can unmap. */
for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
if (unlikely(map_ops[cur_page].status != GNTST_okay))
dev_err(&buf->xb_dev->dev,
"Failed to map page %d: %d\n",
cur_page, map_ops[cur_page].status);
}
if (ret) {
dev_err(&buf->xb_dev->dev,
"Failed to map grant references, ret %d", ret);
backend_unmap(buf);
}
kfree(map_ops);
return ret;
}
/**
* Fill page directory with grant references to the pages of the
* page directory itself.
*
* The grant references to the buffer pages are provided by the
* backend in this case.
*
* \param buf shared buffer.
*/
static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
{
struct xen_page_directory *page_dir;
unsigned char *ptr;
int i, num_pages_dir;
ptr = buf->directory;
num_pages_dir = get_num_pages_dir(buf);
/* Fill only grefs for the page directory itself. */
for (i = 0; i < num_pages_dir - 1; i++) {
page_dir = (struct xen_page_directory *)ptr;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
ptr += PAGE_SIZE;
}
/* Last page must say there is no more pages. */
page_dir = (struct xen_page_directory *)ptr;
page_dir->gref_dir_next_page = GRANT_INVALID_REF;
}
/**
* Fill page directory with grant references to the pages of the
* page directory and the buffer we share with the backend.
*
* \param buf shared buffer.
*/
static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
{
unsigned char *ptr;
int cur_gref, grefs_left, to_copy, i, num_pages_dir;
ptr = buf->directory;
num_pages_dir = get_num_pages_dir(buf);
/*
* While copying, skip grefs at start, they are for pages
* granted for the page directory itself.
*/
cur_gref = num_pages_dir;
grefs_left = buf->num_pages;
for (i = 0; i < num_pages_dir; i++) {
struct xen_page_directory *page_dir =
(struct xen_page_directory *)ptr;
if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
to_copy = grefs_left;
page_dir->gref_dir_next_page = GRANT_INVALID_REF;
} else {
to_copy = XEN_NUM_GREFS_PER_PAGE;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
}
memcpy(&page_dir->gref, &buf->grefs[cur_gref],
to_copy * sizeof(grant_ref_t));
ptr += PAGE_SIZE;
grefs_left -= to_copy;
cur_gref += to_copy;
}
}
/**
* Grant references to the frontend's buffer pages.
*
* These will be shared with the backend, so it can
* access the buffer's data.
*
* \param buf shared buffer.
* \return zero on success or a negative number on failure.
*/
static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
grant_ref_t *priv_gref_head,
int gref_idx)
{
int i, cur_ref, otherend_id;
otherend_id = buf->xb_dev->otherend_id;
for (i = 0; i < buf->num_pages; i++) {
cur_ref = gnttab_claim_grant_reference(priv_gref_head);
if (cur_ref < 0)
return cur_ref;
gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
xen_page_to_gfn(buf->pages[i]),
0);
buf->grefs[gref_idx++] = cur_ref;
}
return 0;
}
/**
* Grant all the references needed to share the buffer.
*
* Grant references to the page directory pages and, if
* needed, also to the pages of the shared buffer data.
*
* \param buf shared buffer.
* \return zero on success or a negative number on failure.
*/
static int grant_references(struct xen_front_pgdir_shbuf *buf)
{
grant_ref_t priv_gref_head;
int ret, i, j, cur_ref;
int otherend_id, num_pages_dir;
ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
if (ret < 0) {
dev_err(&buf->xb_dev->dev,
"Cannot allocate grant references\n");
return ret;
}
otherend_id = buf->xb_dev->otherend_id;
j = 0;
num_pages_dir = get_num_pages_dir(buf);
for (i = 0; i < num_pages_dir; i++) {
unsigned long frame;
cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
if (cur_ref < 0)
return cur_ref;
frame = xen_page_to_gfn(virt_to_page(buf->directory +
PAGE_SIZE * i));
gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
buf->grefs[j++] = cur_ref;
}
if (buf->ops->grant_refs_for_buffer) {
ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
if (ret)
return ret;
}
gnttab_free_grant_references(priv_gref_head);
return 0;
}
/**
* Allocate all required structures to mange shared buffer.
*
* \param buf shared buffer.
* \return zero on success or a negative number on failure.
*/
static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
{
buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
if (!buf->grefs)
return -ENOMEM;
buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
if (!buf->directory)
return -ENOMEM;
return 0;
}
/*
* For backend allocated buffers we don't need grant_refs_for_buffer
* as those grant references are allocated at backend side.
*/
static const struct xen_front_pgdir_shbuf_ops backend_ops = {
.calc_num_grefs = backend_calc_num_grefs,
.fill_page_dir = backend_fill_page_dir,
.map = backend_map,
.unmap = backend_unmap
};
/*
* For locally granted references we do not need to map/unmap
* the references.
*/
static const struct xen_front_pgdir_shbuf_ops local_ops = {
.calc_num_grefs = guest_calc_num_grefs,
.fill_page_dir = guest_fill_page_dir,
.grant_refs_for_buffer = guest_grant_refs_for_buffer,
};
/**
* Allocate a new instance of a shared buffer.
*
* \param cfg configuration to be used while allocating a new shared buffer.
* \return zero on success or a negative number on failure.
*/
int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
{
struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
int ret;
if (cfg->be_alloc)
buf->ops = &backend_ops;
else
buf->ops = &local_ops;
buf->xb_dev = cfg->xb_dev;
buf->num_pages = cfg->num_pages;
buf->pages = cfg->pages;
buf->ops->calc_num_grefs(buf);
ret = alloc_storage(buf);
if (ret)
goto fail;
ret = grant_references(buf);
if (ret)
goto fail;
buf->ops->fill_page_dir(buf);
return 0;
fail:
xen_front_pgdir_shbuf_free(buf);
return ret;
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
MODULE_DESCRIPTION("Xen frontend/backend page directory based "
"shared buffer handling");
MODULE_AUTHOR("Oleksandr Andrushchenko");
MODULE_LICENSE("GPL");

View File

@ -106,7 +106,8 @@ static void pcistub_device_release(struct kref *kref)
* is called from "unbind" which takes a device_lock mutex.
*/
__pci_reset_function_locked(dev);
if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
if (dev_data &&
pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
dev_info(&dev->dev, "Could not reload PCI state\n");
else
pci_restore_state(dev);

View File

@ -33,7 +33,7 @@
* | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
* | | ("xEn3" with the 0x80 bit of the "E" set).
* 4 +----------------+
* | version | Version of this structure. Current version is 0. New
* | version | Version of this structure. Current version is 1. New
* | | versions are guaranteed to be backwards-compatible.
* 8 +----------------+
* | flags | SIF_xxx flags.
@ -48,6 +48,15 @@
* 32 +----------------+
* | rsdp_paddr | Physical address of the RSDP ACPI data structure.
* 40 +----------------+
* | memmap_paddr | Physical address of the (optional) memory map. Only
* | | present in version 1 and newer of the structure.
* 48 +----------------+
* | memmap_entries | Number of entries in the memory map table. Zero
* | | if there is no memory map being provided. Only
* | | present in version 1 and newer of the structure.
* 52 +----------------+
* | reserved | Version 1 and newer only.
* 56 +----------------+
*
* The layout of each entry in the module structure is the following:
*
@ -62,13 +71,51 @@
* | reserved |
* 32 +----------------+
*
* The layout of each entry in the memory map table is as follows:
*
* 0 +----------------+
* | addr | Base address
* 8 +----------------+
* | size | Size of mapping in bytes
* 16 +----------------+
* | type | Type of mapping as defined between the hypervisor
* | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
* 20 +----------------|
* | reserved |
* 24 +----------------+
*
* The address and sizes are always a 64bit little endian unsigned integer.
*
* NB: Xen on x86 will always try to place all the data below the 4GiB
* boundary.
*
* Version numbers of the hvm_start_info structure have evolved like this:
*
* Version 0: Initial implementation.
*
* Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
* padding) to the end of the hvm_start_info struct. These new
* fields can be used to pass a memory map to the guest. The
* memory map is optional and so guests that understand version 1
* of the structure must check that memmap_entries is non-zero
* before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
* The values used in the type field of the memory map table entries are
* defined below and match the Address Range Types as defined in the "System
* Address Map Interfaces" section of the ACPI Specification. Please refer to
* section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
*/
#define XEN_HVM_MEMMAP_TYPE_RAM 1
#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
#define XEN_HVM_MEMMAP_TYPE_ACPI 3
#define XEN_HVM_MEMMAP_TYPE_NVS 4
#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
#define XEN_HVM_MEMMAP_TYPE_PMEM 7
/*
* C representation of the x86/HVM start info layout.
*
@ -86,6 +133,13 @@ struct hvm_start_info {
uint64_t cmdline_paddr; /* Physical address of the command line. */
uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
/* All following fields only present in version 1 and newer */
uint64_t memmap_paddr; /* Physical address of an array of */
/* hvm_memmap_table_entry. */
uint32_t memmap_entries; /* Number of entries in the memmap table. */
/* Value will be zero if there is no memory */
/* map being provided. */
uint32_t reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
@ -95,4 +149,11 @@ struct hvm_modlist_entry {
uint64_t reserved;
};
struct hvm_memmap_table_entry {
uint64_t addr; /* Base address of the memory region */
uint64_t size; /* Size of the memory region in bytes */
uint32_t type; /* Mapping type */
uint32_t reserved; /* Must be zero for Version 1. */
};
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Xen frontend/backend page directory based shared buffer
* helper module.
*
* Copyright (C) 2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
#define __XEN_FRONT_PGDIR_SHBUF_H_
#include <linux/kernel.h>
#include <xen/grant_table.h>
struct xen_front_pgdir_shbuf_ops;
struct xen_front_pgdir_shbuf {
/*
* Number of references granted for the backend use:
*
* - for frontend allocated/imported buffers this holds the number
* of grant references for the page directory and the pages
* of the buffer
*
* - for the buffer provided by the backend this only holds the number
* of grant references for the page directory itself as grant
* references for the buffer will be provided by the backend.
*/
int num_grefs;
grant_ref_t *grefs;
/* Page directory backing storage. */
u8 *directory;
/*
* Number of pages for the shared buffer itself (excluding the page
* directory).
*/
int num_pages;
/*
* Backing storage of the shared buffer: these are the pages being
* shared.
*/
struct page **pages;
struct xenbus_device *xb_dev;
/* These are the ops used internally depending on be_alloc mode. */
const struct xen_front_pgdir_shbuf_ops *ops;
/* Xen map handles for the buffer allocated by the backend. */
grant_handle_t *backend_map_handles;
};
struct xen_front_pgdir_shbuf_cfg {
struct xenbus_device *xb_dev;
/* Number of pages of the buffer backing storage. */
int num_pages;
/* Pages of the buffer to be shared. */
struct page **pages;
/*
* This is allocated outside because there are use-cases when
* the buffer structure is allocated as a part of a bigger one.
*/
struct xen_front_pgdir_shbuf *pgdir;
/*
* Mode of grant reference sharing: if set then backend will share
* grant references to the buffer with the frontend.
*/
int be_alloc;
};
int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
grant_ref_t
xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */

View File

@ -29,6 +29,9 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
#include <xen/interface/hvm/start_info.h>
extern struct hvm_start_info pvh_start_info;
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>

View File

@ -5,6 +5,7 @@ config SND_XEN_FRONTEND
depends on XEN
select SND_PCM
select XEN_XENBUS_FRONTEND
select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend sound driver for Xen guest OSes.

View File

@ -3,7 +3,6 @@
snd_xen_front-objs := xen_snd_front.o \
xen_snd_front_cfg.o \
xen_snd_front_evtchnl.o \
xen_snd_front_shbuf.o \
xen_snd_front_alsa.o
obj-$(CONFIG_SND_XEN_FRONTEND) += snd_xen_front.o

View File

@ -16,12 +16,12 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/sndif.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_evtchnl.h"
#include "xen_snd_front_shbuf.h"
static struct xensnd_req *
be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
@ -82,7 +82,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
}
int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
struct xen_snd_front_shbuf *sh_buf,
struct xen_front_pgdir_shbuf *shbuf,
u8 format, unsigned int channels,
unsigned int rate, u32 buffer_sz,
u32 period_sz)
@ -99,7 +99,8 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz;
req->op.open.gref_directory = xen_snd_front_shbuf_get_dir_start(sh_buf);
req->op.open.gref_directory =
xen_front_pgdir_shbuf_get_dir_start(shbuf);
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);

View File

@ -16,7 +16,7 @@
struct xen_snd_front_card_info;
struct xen_snd_front_evtchnl;
struct xen_snd_front_evtchnl_pair;
struct xen_snd_front_shbuf;
struct xen_front_pgdir_shbuf;
struct xensnd_query_hw_param;
struct xen_snd_front_info {
@ -35,7 +35,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_query_hw_param *hw_param_resp);
int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
struct xen_snd_front_shbuf *sh_buf,
struct xen_front_pgdir_shbuf *shbuf,
u8 format, unsigned int channels,
unsigned int rate, u32 buffer_sz,
u32 period_sz);

View File

@ -15,17 +15,24 @@
#include <sound/pcm_params.h>
#include <xen/xenbus.h>
#include <xen/xen-front-pgdir-shbuf.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_cfg.h"
#include "xen_snd_front_evtchnl.h"
#include "xen_snd_front_shbuf.h"
struct xen_snd_front_pcm_stream_info {
struct xen_snd_front_info *front_info;
struct xen_snd_front_evtchnl_pair *evt_pair;
struct xen_snd_front_shbuf sh_buf;
/* This is the shared buffer with its backing storage. */
struct xen_front_pgdir_shbuf shbuf;
u8 *buffer;
size_t buffer_sz;
int num_pages;
struct page **pages;
int index;
bool is_open;
@ -214,12 +221,20 @@ static void stream_clear(struct xen_snd_front_pcm_stream_info *stream)
stream->out_frames = 0;
atomic_set(&stream->hw_ptr, 0);
xen_snd_front_evtchnl_pair_clear(stream->evt_pair);
xen_snd_front_shbuf_clear(&stream->sh_buf);
memset(&stream->shbuf, 0, sizeof(stream->shbuf));
stream->buffer = NULL;
stream->buffer_sz = 0;
stream->pages = NULL;
stream->num_pages = 0;
}
static void stream_free(struct xen_snd_front_pcm_stream_info *stream)
{
xen_snd_front_shbuf_free(&stream->sh_buf);
xen_front_pgdir_shbuf_unmap(&stream->shbuf);
xen_front_pgdir_shbuf_free(&stream->shbuf);
if (stream->buffer)
free_pages_exact(stream->buffer, stream->buffer_sz);
kfree(stream->pages);
stream_clear(stream);
}
@ -421,10 +436,34 @@ static int alsa_close(struct snd_pcm_substream *substream)
return 0;
}
static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
size_t buffer_sz)
{
int i;
stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
if (!stream->buffer)
return -ENOMEM;
stream->buffer_sz = buffer_sz;
stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
GFP_KERNEL);
if (!stream->pages)
return -ENOMEM;
for (i = 0; i < stream->num_pages; i++)
stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE);
return 0;
}
static int alsa_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
struct xen_snd_front_info *front_info = stream->front_info;
struct xen_front_pgdir_shbuf_cfg buf_cfg;
int ret;
/*
@ -432,19 +471,32 @@ static int alsa_hw_params(struct snd_pcm_substream *substream,
* so free the previously allocated shared buffer if any.
*/
stream_free(stream);
ret = shbuf_setup_backstore(stream, params_buffer_bytes(params));
if (ret < 0)
goto fail;
ret = xen_snd_front_shbuf_alloc(stream->front_info->xb_dev,
&stream->sh_buf,
params_buffer_bytes(params));
if (ret < 0) {
stream_free(stream);
dev_err(&stream->front_info->xb_dev->dev,
"Failed to allocate buffers for stream with index %d\n",
stream->index);
return ret;
}
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
buf_cfg.pgdir = &stream->shbuf;
buf_cfg.num_pages = stream->num_pages;
buf_cfg.pages = stream->pages;
ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
if (ret < 0)
goto fail;
ret = xen_front_pgdir_shbuf_map(&stream->shbuf);
if (ret < 0)
goto fail;
return 0;
fail:
stream_free(stream);
dev_err(&front_info->xb_dev->dev,
"Failed to allocate buffers for stream with index %d\n",
stream->index);
return ret;
}
static int alsa_hw_free(struct snd_pcm_substream *substream)
@ -476,7 +528,7 @@ static int alsa_prepare(struct snd_pcm_substream *substream)
sndif_format = ret;
ret = xen_snd_front_stream_prepare(&stream->evt_pair->req,
&stream->sh_buf,
&stream->shbuf,
sndif_format,
runtime->channels,
runtime->rate,
@ -556,10 +608,10 @@ static int alsa_pb_copy_user(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (unlikely(pos + count > stream->sh_buf.buffer_sz))
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
if (copy_from_user(stream->sh_buf.buffer + pos, src, count))
if (copy_from_user(stream->buffer + pos, src, count))
return -EFAULT;
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
@ -571,10 +623,10 @@ static int alsa_pb_copy_kernel(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (unlikely(pos + count > stream->sh_buf.buffer_sz))
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
memcpy(stream->sh_buf.buffer + pos, src, count);
memcpy(stream->buffer + pos, src, count);
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}
@ -586,14 +638,14 @@ static int alsa_cap_copy_user(struct snd_pcm_substream *substream,
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
if (unlikely(pos + count > stream->sh_buf.buffer_sz))
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
if (ret < 0)
return ret;
return copy_to_user(dst, stream->sh_buf.buffer + pos, count) ?
return copy_to_user(dst, stream->buffer + pos, count) ?
-EFAULT : 0;
}
@ -604,14 +656,14 @@ static int alsa_cap_copy_kernel(struct snd_pcm_substream *substream,
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
if (unlikely(pos + count > stream->sh_buf.buffer_sz))
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
if (ret < 0)
return ret;
memcpy(dst, stream->sh_buf.buffer + pos, count);
memcpy(dst, stream->buffer + pos, count);
return 0;
}
@ -622,10 +674,10 @@ static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
if (unlikely(pos + count > stream->sh_buf.buffer_sz))
if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
memset(stream->sh_buf.buffer + pos, 0, count);
memset(stream->buffer + pos, 0, count);
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}

View File

@ -1,194 +0,0 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#include <linux/kernel.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include "xen_snd_front_shbuf.h"
grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf)
{
if (!buf->grefs)
return GRANT_INVALID_REF;
return buf->grefs[0];
}
void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf)
{
memset(buf, 0, sizeof(*buf));
}
void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf)
{
int i;
if (buf->grefs) {
for (i = 0; i < buf->num_grefs; i++)
if (buf->grefs[i] != GRANT_INVALID_REF)
gnttab_end_foreign_access(buf->grefs[i],
0, 0UL);
kfree(buf->grefs);
}
kfree(buf->directory);
free_pages_exact(buf->buffer, buf->buffer_sz);
xen_snd_front_shbuf_clear(buf);
}
/*
* number of grant references a page can hold with respect to the
* xensnd_page_directory header
*/
#define XENSND_NUM_GREFS_PER_PAGE ((XEN_PAGE_SIZE - \
offsetof(struct xensnd_page_directory, gref)) / \
sizeof(grant_ref_t))
static void fill_page_dir(struct xen_snd_front_shbuf *buf,
int num_pages_dir)
{
struct xensnd_page_directory *page_dir;
unsigned char *ptr;
int i, cur_gref, grefs_left, to_copy;
ptr = buf->directory;
grefs_left = buf->num_grefs - num_pages_dir;
/*
* skip grant references at the beginning, they are for pages granted
* for the page directory itself
*/
cur_gref = num_pages_dir;
for (i = 0; i < num_pages_dir; i++) {
page_dir = (struct xensnd_page_directory *)ptr;
if (grefs_left <= XENSND_NUM_GREFS_PER_PAGE) {
to_copy = grefs_left;
page_dir->gref_dir_next_page = GRANT_INVALID_REF;
} else {
to_copy = XENSND_NUM_GREFS_PER_PAGE;
page_dir->gref_dir_next_page = buf->grefs[i + 1];
}
memcpy(&page_dir->gref, &buf->grefs[cur_gref],
to_copy * sizeof(grant_ref_t));
ptr += XEN_PAGE_SIZE;
grefs_left -= to_copy;
cur_gref += to_copy;
}
}
static int grant_references(struct xenbus_device *xb_dev,
struct xen_snd_front_shbuf *buf,
int num_pages_dir, int num_pages_buffer,
int num_grefs)
{
grant_ref_t priv_gref_head;
unsigned long frame;
int ret, i, j, cur_ref;
int otherend_id;
ret = gnttab_alloc_grant_references(num_grefs, &priv_gref_head);
if (ret)
return ret;
buf->num_grefs = num_grefs;
otherend_id = xb_dev->otherend_id;
j = 0;
for (i = 0; i < num_pages_dir; i++) {
cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
if (cur_ref < 0) {
ret = cur_ref;
goto fail;
}
frame = xen_page_to_gfn(virt_to_page(buf->directory +
XEN_PAGE_SIZE * i));
gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
buf->grefs[j++] = cur_ref;
}
for (i = 0; i < num_pages_buffer; i++) {
cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
if (cur_ref < 0) {
ret = cur_ref;
goto fail;
}
frame = xen_page_to_gfn(virt_to_page(buf->buffer +
XEN_PAGE_SIZE * i));
gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
buf->grefs[j++] = cur_ref;
}
gnttab_free_grant_references(priv_gref_head);
fill_page_dir(buf, num_pages_dir);
return 0;
fail:
gnttab_free_grant_references(priv_gref_head);
return ret;
}
static int alloc_int_buffers(struct xen_snd_front_shbuf *buf,
int num_pages_dir, int num_pages_buffer,
int num_grefs)
{
buf->grefs = kcalloc(num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
if (!buf->grefs)
return -ENOMEM;
buf->directory = kcalloc(num_pages_dir, XEN_PAGE_SIZE, GFP_KERNEL);
if (!buf->directory)
goto fail;
buf->buffer_sz = num_pages_buffer * XEN_PAGE_SIZE;
buf->buffer = alloc_pages_exact(buf->buffer_sz, GFP_KERNEL);
if (!buf->buffer)
goto fail;
return 0;
fail:
kfree(buf->grefs);
buf->grefs = NULL;
kfree(buf->directory);
buf->directory = NULL;
return -ENOMEM;
}
int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
struct xen_snd_front_shbuf *buf,
unsigned int buffer_sz)
{
int num_pages_buffer, num_pages_dir, num_grefs;
int ret;
xen_snd_front_shbuf_clear(buf);
num_pages_buffer = DIV_ROUND_UP(buffer_sz, XEN_PAGE_SIZE);
/* number of pages the page directory consumes itself */
num_pages_dir = DIV_ROUND_UP(num_pages_buffer,
XENSND_NUM_GREFS_PER_PAGE);
num_grefs = num_pages_buffer + num_pages_dir;
ret = alloc_int_buffers(buf, num_pages_dir,
num_pages_buffer, num_grefs);
if (ret < 0)
return ret;
ret = grant_references(xb_dev, buf, num_pages_dir, num_pages_buffer,
num_grefs);
if (ret < 0)
return ret;
fill_page_dir(buf, num_pages_dir);
return 0;
}

View File

@ -1,36 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Xen para-virtual sound device
*
* Copyright (C) 2016-2018 EPAM Systems Inc.
*
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
#ifndef __XEN_SND_FRONT_SHBUF_H
#define __XEN_SND_FRONT_SHBUF_H
#include <xen/grant_table.h>
#include "xen_snd_front_evtchnl.h"
struct xen_snd_front_shbuf {
int num_grefs;
grant_ref_t *grefs;
u8 *directory;
u8 *buffer;
size_t buffer_sz;
};
grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf);
int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
struct xen_snd_front_shbuf *buf,
unsigned int buffer_sz);
void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf);
void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf);
#endif /* __XEN_SND_FRONT_SHBUF_H */