s390/mem_detect: move tprot loop to early boot phase

Move memory detection to early boot phase. To store online memory
regions "struct mem_detect_info" has been introduced together with
for_each_mem_detect_block iterator. mem_detect_info is later converted
to memblock.

Also introduces sclp_early_get_meminfo function to get maximum physical
memory and maximum increment number.

Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Vasily Gorbik 2018-04-11 11:56:55 +02:00 committed by Martin Schwidefsky
parent 17aacfbfa1
commit 6966d604e2
11 changed files with 277 additions and 69 deletions

View File

@ -27,7 +27,7 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o ebcdic.o sclp_early_core.o mem.o
obj-y := head.o als.o startup.o mem_detect.o ebcdic.o sclp_early_core.o mem.o
targets := bzImage startup.a $(obj-y)
subdir- := compressed

View File

@ -3,5 +3,6 @@
#define BOOT_BOOT_H
void startup_kernel(void);
void detect_memory(void);
#endif /* BOOT_BOOT_H */

133
arch/s390/boot/mem_detect.c Normal file
View File

@ -0,0 +1,133 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include "compressed/decompressor.h"
#include "boot.h"
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX \
(256 * (1020 / 2) * sizeof(struct mem_detect_block))
/*
* To avoid corrupting old kernel memory during dump, find lowest memory
* chunk possible either right after the kernel end (decompressed kernel) or
* after initrd (if it is present and there is no hole between the kernel end
* and initrd)
*/
static void *mem_detect_alloc_extended(void)
{
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
INITRD_START < offset + ENTRIES_EXTENDED_MAX)
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
return (void *)offset;
}
static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
{
if (n < MEM_INLINED_ENTRIES)
return &mem_detect.entries[n];
if (unlikely(!mem_detect.entries_extended))
mem_detect.entries_extended = mem_detect_alloc_extended();
return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
}
/*
* sequential calls to add_mem_detect_block with adjacent memory areas
* are merged together into single memory block.
*/
void add_mem_detect_block(u64 start, u64 end)
{
struct mem_detect_block *block;
if (mem_detect.count) {
block = __get_mem_detect_block_ptr(mem_detect.count - 1);
if (block->end == start) {
block->end = end;
return;
}
}
block = __get_mem_detect_block_ptr(mem_detect.count);
block->start = start;
block->end = end;
mem_detect.count++;
}
static unsigned long get_mem_detect_end(void)
{
if (mem_detect.count)
return __get_mem_detect_block_ptr(mem_detect.count - 1)->end;
return 0;
}
static int tprot(unsigned long addr)
{
unsigned long pgm_addr;
int rc = -EFAULT;
psw_t old = S390_lowcore.program_new_psw;
S390_lowcore.program_new_psw.mask = __extract_psw();
asm volatile(
" larl %[pgm_addr],1f\n"
" stg %[pgm_addr],%[psw_pgm_addr]\n"
" tprot 0(%[addr]),0\n"
" ipm %[rc]\n"
" srl %[rc],28\n"
"1:\n"
: [pgm_addr] "=&d"(pgm_addr),
[psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
[rc] "+&d"(rc)
: [addr] "a"(addr)
: "cc", "memory");
S390_lowcore.program_new_psw = old;
return rc;
}
static void scan_memory(unsigned long rzm)
{
unsigned long addr, size;
int type;
if (!rzm)
rzm = 1UL << 20;
addr = 0;
do {
size = 0;
/* assume lowcore is writable */
type = addr ? tprot(addr) : CHUNK_READ_WRITE;
do {
size += rzm;
if (max_physmem_end && addr + size >= max_physmem_end)
break;
} while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
if (max_physmem_end && (addr + size > max_physmem_end))
size = max_physmem_end - addr;
add_mem_detect_block(addr, addr + size);
}
addr += size;
} while (addr < max_physmem_end);
}
void detect_memory(void)
{
unsigned long rzm;
sclp_early_get_meminfo(&max_physmem_end, &rzm);
scan_memory(rzm);
mem_detect.info_source = MEM_DETECT_TPROT_LOOP;
if (!max_physmem_end)
max_physmem_end = get_mem_detect_end();
}

View File

@ -51,6 +51,7 @@ void startup_kernel(void)
rescue_initrd();
sclp_early_read_info();
detect_memory();
if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
img = decompress_kernel();
memmove((void *)vmlinux.default_lma, img, vmlinux.image_size);

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_MEM_DETECT_H
#define _ASM_S390_MEM_DETECT_H
#include <linux/types.h>
enum mem_info_source {
MEM_DETECT_NONE = 0,
MEM_DETECT_TPROT_LOOP
};
struct mem_detect_block {
u64 start;
u64 end;
};
/*
* Storage element id is defined as 1 byte (up to 256 storage elements).
* In practise only storage element id 0 and 1 are used).
* According to architecture one storage element could have as much as
* 1020 subincrements. 255 mem_detect_blocks are embedded in mem_detect_info.
* If more mem_detect_blocks are required, a block of memory from already
* known mem_detect_block is taken (entries_extended points to it).
*/
#define MEM_INLINED_ENTRIES 255 /* (PAGE_SIZE - 16) / 16 */
struct mem_detect_info {
u32 count;
u8 info_source;
struct mem_detect_block entries[MEM_INLINED_ENTRIES];
struct mem_detect_block *entries_extended;
};
extern struct mem_detect_info mem_detect;
static inline int __get_mem_detect_block(u32 n, unsigned long *start,
unsigned long *end)
{
if (n >= mem_detect.count) {
*start = 0;
*end = 0;
return -1;
}
if (n < MEM_INLINED_ENTRIES) {
*start = (unsigned long)mem_detect.entries[n].start;
*end = (unsigned long)mem_detect.entries[n].end;
} else {
*start = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].start;
*end = (unsigned long)mem_detect.entries_extended[n - MEM_INLINED_ENTRIES].end;
}
return 0;
}
/**
* for_each_mem_detect_block - early online memory range iterator
* @i: an integer used as loop variable
* @p_start: ptr to unsigned long for start address of the range
* @p_end: ptr to unsigned long for end address of the range
*
* Walks over detected online memory ranges.
*/
#define for_each_mem_detect_block(i, p_start, p_end) \
for (i = 0, __get_mem_detect_block(i, p_start, p_end); \
i < mem_detect.count; \
i++, __get_mem_detect_block(i, p_start, p_end))
static inline void get_mem_detect_reserved(unsigned long *start,
unsigned long *size)
{
*start = (unsigned long)mem_detect.entries_extended;
if (mem_detect.count > MEM_INLINED_ENTRIES)
*size = (mem_detect.count - MEM_INLINED_ENTRIES) * sizeof(struct mem_detect_block);
else
*size = 0;
}
#endif

View File

@ -113,6 +113,7 @@ void sclp_early_printk(const char *s);
void sclp_early_printk_force(const char *s);
void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int sclp_early_get_meminfo(unsigned long *mem, unsigned long *rzm);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);

View File

@ -69,8 +69,6 @@ extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;
extern void detect_memory_memblock(void);
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_LPAR (S390_lowcore.machine_flags & MACHINE_FLAG_LPAR)

View File

@ -70,6 +70,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
#include <asm/mem_detect.h>
#include "entry.h"
/*
@ -91,7 +92,8 @@ unsigned long int_hwcap = 0;
int __initdata memory_end_set;
unsigned long __initdata memory_end;
unsigned long __initdata max_physmem_end;
unsigned long __bootdata(max_physmem_end);
struct mem_detect_info __bootdata(mem_detect);
unsigned long VMALLOC_START;
EXPORT_SYMBOL(VMALLOC_START);
@ -720,6 +722,45 @@ static void __init reserve_initrd(void)
#endif
}
static void __init reserve_mem_detect_info(void)
{
unsigned long start, size;
get_mem_detect_reserved(&start, &size);
if (size)
memblock_reserve(start, size);
}
static void __init free_mem_detect_info(void)
{
unsigned long start, size;
get_mem_detect_reserved(&start, &size);
if (size)
memblock_free(start, size);
}
static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
static void __init memblock_add_mem_detect_info(void)
{
unsigned long start, end;
int i;
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
for_each_mem_detect_block(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_dump_all();
}
/*
* Check for initrd being in usable memory
*/
@ -984,11 +1025,13 @@ void __init setup_arch(char **cmdline_p)
reserve_oldmem();
reserve_kernel();
reserve_initrd();
reserve_mem_detect_info();
memblock_allow_resize();
/* Get information about *all* installed memory */
detect_memory_memblock();
memblock_add_mem_detect_info();
free_mem_detect_info();
remove_oldmem();
/*

View File

@ -4,8 +4,7 @@
#
obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o
obj-y += page-states.o gup.o pageattr.o mem_detect.o
obj-y += pgtable.o pgalloc.o
obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o
obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o

View File

@ -1,62 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/ipl.h>
#include <asm/sclp.h>
#include <asm/setup.h>
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
void __init detect_memory_memblock(void)
{
unsigned long memsize, rnmax, rzm, addr, size;
int type;
rzm = sclp.rzm;
rnmax = sclp.rnmax;
memsize = rzm * rnmax;
if (!rzm)
rzm = 1UL << 17;
max_physmem_end = memsize;
addr = 0;
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
do {
size = 0;
/* assume lowcore is writable */
type = addr ? tprot(addr) : CHUNK_READ_WRITE;
do {
size += rzm;
if (max_physmem_end && addr + size >= max_physmem_end)
break;
} while (type == tprot(addr + size));
if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
if (max_physmem_end && (addr + size > max_physmem_end))
size = max_physmem_end - addr;
memblock_physmem_add(addr, size);
}
addr += size;
} while (addr < max_physmem_end);
memblock_set_bottom_up(false);
if (!max_physmem_end)
max_physmem_end = memblock_end_of_DRAM();
memblock_dump_all();
}

View File

@ -270,3 +270,20 @@ int __init sclp_early_get_info(struct read_info_sccb *info)
*info = sclp_info_sccb;
return 0;
}
int __init sclp_early_get_meminfo(unsigned long *mem, unsigned long *rzm)
{
unsigned long rnmax;
unsigned long rnsize;
struct read_info_sccb *sccb = &sclp_info_sccb;
if (!sclp_info_sccb_valid)
return -EIO;
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rnsize = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rnsize <<= 20;
*mem = rnsize * rnmax;
*rzm = rnsize;
return 0;
}