s390/mm: force swiotlb for protected virtualization

On s390, protected virtualization guests have to use bounced I/O
buffers.  That requires some plumbing.

Let us make sure, any device that uses DMA API with direct ops correctly
is spared from the problems, that a hypervisor attempting I/O to a
non-shared page would bring.

Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
Tested-by: Michael Mueller <mimu@linux.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
This commit is contained in:
Halil Pasic 2018-09-13 18:57:16 +02:00 committed by Heiko Carstens
parent 45488c48e4
commit 64e1f0c531
3 changed files with 68 additions and 0 deletions

View File

@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config ARCH_HAS_MEM_ENCRYPT
def_bool y
config MMU
def_bool y
@ -186,6 +189,7 @@ config S390
select VIRT_CPU_ACCOUNTING
select ARCH_HAS_SCALED_CPUTIME
select HAVE_NMI
select SWIOTLB
config SCHED_OMIT_FRAME_POINTER

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef S390_MEM_ENCRYPT_H__
#define S390_MEM_ENCRYPT_H__
#ifndef __ASSEMBLY__
#define sme_me_mask 0ULL
static inline bool sme_active(void) { return false; }
extern bool sev_active(void);
int set_memory_encrypted(unsigned long addr, int numpages);
int set_memory_decrypted(unsigned long addr, int numpages);
#endif /* __ASSEMBLY__ */
#endif /* S390_MEM_ENCRYPT_H__ */

View File

@ -18,6 +18,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swiotlb.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/pagemap.h>
@ -29,6 +30,7 @@
#include <linux/export.h>
#include <linux/cma.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
@ -42,6 +44,8 @@
#include <asm/sclp.h>
#include <asm/set_memory.h>
#include <asm/kasan.h>
#include <asm/dma-mapping.h>
#include <asm/uv.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
@ -128,6 +132,47 @@ void mark_rodata_ro(void)
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
}
int set_memory_encrypted(unsigned long addr, int numpages)
{
int i;
/* make specified pages unshared, (swiotlb, dma_free) */
for (i = 0; i < numpages; ++i) {
uv_remove_shared(addr);
addr += PAGE_SIZE;
}
return 0;
}
int set_memory_decrypted(unsigned long addr, int numpages)
{
int i;
/* make specified pages shared (swiotlb, dma_alloca) */
for (i = 0; i < numpages; ++i) {
uv_set_shared(addr);
addr += PAGE_SIZE;
}
return 0;
}
/* are we a protected virtualization guest? */
bool sev_active(void)
{
return is_prot_virt_guest();
}
/* protected virtualization */
static void pv_init(void)
{
if (!is_prot_virt_guest())
return;
/* make sure bounce buffers are shared */
swiotlb_init(1);
swiotlb_update_mem_attributes();
swiotlb_force = SWIOTLB_FORCE;
}
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
pv_init();
/* Setup guest page hinting */
cmma_init();