Merge pull request #394 from zandrey/5.4-2.3.x-imx

Update 5.4-2.3.x-imx up to v5.4.133
This commit is contained in:
Otavio Salvador 2021-07-20 13:42:15 -03:00 committed by GitHub
commit f13f2bbe1c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
524 changed files with 3276 additions and 1712 deletions

View File

@ -42,8 +42,30 @@ Description:
modification of EVM-protected metadata and
disable all further modification of policy
Note that once a key has been loaded, it will no longer be
possible to enable metadata modification.
Echoing a value is additive, the new value is added to the
existing initialization flags.
For example, after::
echo 2 ><securityfs>/evm
another echo can be performed::
echo 1 ><securityfs>/evm
and the resulting value will be 3.
Note that once an HMAC key has been loaded, it will no longer
be possible to enable metadata modification. Signaling that an
HMAC key has been loaded will clear the corresponding flag.
For example, if the current value is 6 (2 and 4 set)::
echo 1 ><securityfs>/evm
will set the new value to 3 (4 cleared).
Loading an HMAC key is the only way to disable metadata
modification.
Until key loading has been signaled EVM can not create
or validate the 'security.evm' xattr, but returns

View File

@ -567,6 +567,12 @@
loops can be debugged more effectively on production
systems.
clocksource.max_cswd_read_retries= [KNL]
Number of clocksource_watchdog() retries due to
external delays before the clock will be marked
unstable. Defaults to three retries, that is,
four attempts to read the clock under test.
clearcpuid=BITNUM[,BITNUM...] [X86]
Disable CPUID feature X for the kernel. See
arch/x86/include/asm/cpufeatures.h for the valid bit

View File

@ -38,6 +38,7 @@ Sysfs entries
fan[1-12]_input RO fan tachometer speed in RPM
fan[1-12]_fault RO fan experienced fault
fan[1-6]_target RW desired fan speed in RPM
pwm[1-6]_enable RW regulator mode, 0=disabled, 1=manual mode, 2=rpm mode
pwm[1-6] RW fan target duty cycle (0-255)
pwm[1-6]_enable RW regulator mode, 0=disabled (duty cycle=0%), 1=manual mode, 2=rpm mode
pwm[1-6] RW read: current pwm duty cycle,
write: target pwm duty cycle (0-255)
================== === =======================================================

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 129
SUBLEVEL = 133
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -937,7 +937,7 @@ LDFLAGS_vmlinux += $(call ld-option, -X,)
endif
ifeq ($(CONFIG_RELR),y)
LDFLAGS_vmlinux += --pack-dyn-relocs=relr
LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
endif
# make the checker run with the right architecture

View File

@ -914,7 +914,7 @@
0xffffffff 0x3ffcfe7c 0x1c010101 /* pioA */
0x7fffffff 0xfffccc3a 0x3f00cc3a /* pioB */
0xffffffff 0x3ff83fff 0xff00ffff /* pioC */
0x0003ff00 0x8002a800 0x00000000 /* pioD */
0xb003ff00 0x8002a800 0x00000000 /* pioD */
0xffffffff 0x7fffffff 0x76fff1bf /* pioE */
>;

View File

@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
} else {
armv7_pmnc_select_counter(idx);
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
}
}

View File

@ -134,7 +134,7 @@
uart0: serial@12000 {
compatible = "marvell,armada-3700-uart";
reg = <0x12000 0x200>;
reg = <0x12000 0x18>;
clocks = <&xtalclk>;
interrupts =
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -15,10 +15,10 @@
.macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
bic \tmp1, \tmp1, #TTBR_ASID_MASK
sub \tmp1, \tmp1, #RESERVED_TTBR0_SIZE // reserved_ttbr0 just before swapper_pg_dir
sub \tmp1, \tmp1, #PAGE_SIZE // reserved_pg_dir just before swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
add \tmp1, \tmp1, #RESERVED_TTBR0_SIZE
add \tmp1, \tmp1, #PAGE_SIZE
msr ttbr1_el1, \tmp1 // set reserved ASID
isb
.endm

View File

@ -89,12 +89,6 @@
#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
#define RESERVED_TTBR0_SIZE (PAGE_SIZE)
#else
#define RESERVED_TTBR0_SIZE (0)
#endif
/* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT

View File

@ -36,11 +36,11 @@ static inline void contextidr_thread_switch(struct task_struct *next)
}
/*
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
* Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
*/
static inline void cpu_set_reserved_ttbr0(void)
{
unsigned long ttbr = phys_to_ttbr(__pa_symbol(empty_zero_page));
unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
write_sysreg(ttbr, ttbr0_el1);
isb();
@ -184,9 +184,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
ttbr = __pa_symbol(empty_zero_page);
ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}

View File

@ -471,6 +471,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_end[];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);

View File

@ -112,8 +112,8 @@ static inline void __uaccess_ttbr0_disable(void)
local_irq_save(flags);
ttbr = read_sysreg(ttbr1_el1);
ttbr &= ~TTBR_ASID_MASK;
/* reserved_ttbr0 placed before swapper_pg_dir */
write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
/* reserved_pg_dir placed before swapper_pg_dir */
write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
isb();
/* Set reserved ASID */
write_sysreg(ttbr, ttbr1_el1);

View File

@ -1018,9 +1018,10 @@ ENDPROC(el0_svc)
*/
.pushsection ".entry.tramp.text", "ax"
// Move from tramp_pg_dir to swapper_pg_dir
.macro tramp_map_kernel, tmp
mrs \tmp, ttbr1_el1
add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
add \tmp, \tmp, #(2 * PAGE_SIZE)
bic \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
@ -1037,9 +1038,10 @@ alternative_else_nop_endif
#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
.endm
// Move from swapper_pg_dir to tramp_pg_dir
.macro tramp_unmap_kernel, tmp
mrs \tmp, ttbr1_el1
sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
sub \tmp, \tmp, #(2 * PAGE_SIZE)
orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
/*

View File

@ -356,7 +356,7 @@ void __init setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
init_task.thread_info.ttbr0 = __pa_symbol(empty_zero_page);
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
#endif
#ifdef CONFIG_VT

View File

@ -157,13 +157,11 @@ SECTIONS
. += PAGE_SIZE;
#endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
reserved_ttbr0 = .;
. += RESERVED_TTBR0_SIZE;
#endif
reserved_pg_dir = .;
. += PAGE_SIZE;
swapper_pg_dir = .;
. += PAGE_SIZE;
swapper_pg_end = .;
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;

View File

@ -166,7 +166,7 @@ ENDPROC(cpu_do_switch_mm)
.pushsection ".idmap.text", "awx"
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page
adrp \tmp1, reserved_pg_dir
phys_to_ttbr \tmp2, \tmp1
offset_ttbr1 \tmp2, \tmp1
msr ttbr1_el1, \tmp2

View File

@ -343,7 +343,7 @@ init_record_index_pools(void)
/* - 2 - */
sect_min_size = sal_log_sect_min_sizes[0];
for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
if (sect_min_size > sal_log_sect_min_sizes[i])
sect_min_size = sal_log_sect_min_sizes[i];

View File

@ -23,6 +23,9 @@ config ATARI
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
config ATARI_KBD_CORE
bool
config MAC
bool "Macintosh support"
depends on MMU

View File

@ -5,6 +5,7 @@
* Very small subset of simple string routines
*/
#include <linux/compiler_attributes.h>
#include <linux/types.h>
void *memcpy(void *dest, const void *src, size_t n)
@ -27,3 +28,19 @@ void *memset(void *s, int c, size_t n)
ss[i] = c;
return s;
}
void * __weak memmove(void *dest, const void *src, size_t n)
{
unsigned int i;
const char *s = src;
char *d = dest;
if ((uintptr_t)dest < (uintptr_t)src) {
for (i = 0; i < n; i++)
d[i] = s[i];
} else {
for (i = n; i > 0; i--)
d[i - 1] = s[i - 1];
}
return dest;
}

View File

@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024

View File

@ -53,7 +53,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
/*
* clear the huge pte entry firstly, so that the other smp threads will
* not get old pte entry after finishing flush_tlb_page and before
* setting new huge pte entry
*/
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_tlb_page(vma, addr);
}
#define __HAVE_ARCH_HUGE_PTE_NONE

View File

@ -2007,7 +2007,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r2\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@ -2020,7 +2020,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r2\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@ -2033,7 +2033,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips32r2\n\t" \
".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
@ -2045,7 +2045,7 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tmips64r2\n\t" \
".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \

View File

@ -62,11 +62,15 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
pmd_t *pmd = NULL;
struct page *pg;
pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
if (pg) {
pgtable_pmd_page_ctor(pg);
pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
}
return pmd;
}

View File

@ -200,6 +200,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
/* Reserve pfn range 0~node[0]->node_start_pfn */
memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}

View File

@ -44,6 +44,8 @@
# define SMPWMB eieio
#endif
/* clang defines this macro for a builtin, which will not work with runtime patching */
#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")

View File

@ -99,6 +99,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
/*
* tlb_thread_siblings are siblings which share a TLB. This is not
* architected, is not something a hypervisor could emulate and a future
* CPU may change behaviour even in compat mode, so this should only be
* used on PowerNV, and only with care.
*/
static inline int cpu_first_tlb_thread_sibling(int cpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return cpu & ~0x6; /* Big Core */
else
return cpu_first_thread_sibling(cpu);
}
static inline int cpu_last_tlb_thread_sibling(int cpu)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return cpu | 0x6; /* Big Core */
else
return cpu_last_thread_sibling(cpu);
}
static inline int cpu_tlb_thread_sibling_step(void)
{
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
return 2; /* Big Core */
else
return 1;
}
static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE

View File

@ -588,6 +588,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
set_cpu_online(smp_processor_id(), false);
spin_begin();
while (1)
spin_cpu_relax();
@ -603,6 +605,15 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
/*
* Offlining CPUs in stop_this_cpu can result in scheduler warnings,
* (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
* to know other CPUs are offline before it breaks locks to flush
* printk buffers, in case we panic()ed while holding the lock.
*/
set_cpu_online(smp_processor_id(), false);
spin_begin();
while (1)
spin_cpu_relax();

View File

@ -19,6 +19,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <linux/ftrace.h>
#include <linux/delay.h>
#include <asm/kprobes.h>
#include <asm/paca.h>
@ -230,17 +231,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
static void raise_backtrace_ipi(cpumask_t *mask)
{
struct paca_struct *p;
unsigned int cpu;
u64 delay_us;
for_each_cpu(cpu, mask) {
if (cpu == smp_processor_id())
if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
else
smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
}
continue;
}
for_each_cpu(cpu, mask) {
struct paca_struct *p = paca_ptrs[cpu];
delay_us = 5 * USEC_PER_SEC;
if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
// Now wait up to 5s for the other CPU to do its backtrace
while (cpumask_test_cpu(cpu, mask) && delay_us) {
udelay(1);
delay_us--;
}
// Other CPU cleared itself from the mask
if (delay_us)
continue;
}
p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);

View File

@ -2536,7 +2536,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpumask_t *cpu_in_guest;
int i;
cpu = cpu_first_thread_sibling(cpu);
cpu = cpu_first_tlb_thread_sibling(cpu);
if (nested) {
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu_in_guest = &nested->cpu_in_guest;
@ -2550,9 +2550,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
* the other side is the first smp_mb() in kvmppc_run_core().
*/
smp_mb();
for (i = 0; i < threads_per_core; ++i)
if (cpumask_test_cpu(cpu + i, cpu_in_guest))
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
i += cpu_tlb_thread_sibling_step())
if (cpumask_test_cpu(i, cpu_in_guest))
smp_call_function_single(i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@ -2583,8 +2584,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
*/
if (prev_cpu != pcpu) {
if (prev_cpu >= 0 &&
cpu_first_thread_sibling(prev_cpu) !=
cpu_first_thread_sibling(pcpu))
cpu_first_tlb_thread_sibling(prev_cpu) !=
cpu_first_tlb_thread_sibling(pcpu))
radix_flush_cpu(kvm, prev_cpu, vcpu);
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;

View File

@ -847,7 +847,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
* Thus we make all 4 threads use the same bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
pcpu = cpu_first_thread_sibling(pcpu);
pcpu = cpu_first_tlb_thread_sibling(pcpu);
if (nested)
need_tlb_flush = &nested->need_tlb_flush;

View File

@ -51,7 +51,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->ppr = vcpu->arch.ppr;
}
static void byteswap_pt_regs(struct pt_regs *regs)
/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;

View File

@ -67,7 +67,7 @@ static int global_invalidates(struct kvm *kvm)
* so use the bit for the first thread to represent the core.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
cpu = cpu_first_thread_sibling(cpu);
cpu = cpu_first_tlb_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}

View File

@ -204,9 +204,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
{
int is_exec = TRAP(regs) == 0x400;
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
DSISR_PROTFAULT))) {
if (is_exec) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user",
address,

View File

@ -920,7 +920,7 @@ config CMM_IUCV
config APPLDATA_BASE
def_bool n
prompt "Linux - VM Monitor Stream, base infrastructure"
depends on PROC_FS
depends on PROC_SYSCTL
help
This provides a kernel interface for creating and updating z/VM APPLDATA
monitor records. The monitor records are updated at certain time

View File

@ -79,12 +79,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
#define CALL_FMT_0 "=&d" (r2) :
#define CALL_FMT_1 "+&d" (r2) :
#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
/*
* To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile.
*/
#define CALL_FMT_0 "=&d" (r2)
#define CALL_FMT_1 "+&d" (r2)
#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
@ -105,7 +109,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
[_stack] "a" (stack), \
: [_stack] "a" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
r2; \

View File

@ -318,31 +318,31 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr)
{
register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
unsigned long function = (unsigned long)nr | 0x100;
int cc;
asm volatile(
" lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (r0)
: "cc");
: [function] "d" (function)
: "cc", "0");
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
{
register unsigned long r0 asm("0") = 0; /* query function */
register unsigned long r1 asm("1") = (unsigned long) query;
asm volatile(
/* Parameter regs are ignored */
" lghi 0,0\n"
" lgr 1,%[query]\n"
/* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
:
: "d" (r0), "a" (r1), [opc] "i" (opcode)
: "cc", "memory");
: [query] "d" ((unsigned long)query), [opc] "i" (opcode)
: "cc", "memory", "0", "1");
}
#define INSN_SORTL 0xb938

View File

@ -1794,9 +1794,25 @@ static void sev_asid_free(struct kvm *kvm)
__sev_asid_free(sev->asid);
}
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
static void sev_decommission(unsigned int handle)
{
struct sev_data_decommission *decommission;
if (!handle)
return;
decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
if (!decommission)
return;
decommission->handle = handle;
sev_guest_decommission(decommission, NULL);
kfree(decommission);
}
static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
{
struct sev_data_deactivate *data;
if (!handle)
@ -1814,15 +1830,7 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
sev_guest_df_flush(NULL);
kfree(data);
decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
if (!decommission)
return;
/* decommission handle */
decommission->handle = handle;
sev_guest_decommission(decommission, NULL);
kfree(decommission);
sev_decommission(handle);
}
static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
@ -1960,6 +1968,7 @@ static void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
cond_resched();
}
}
@ -6475,8 +6484,10 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
/* Bind ASID to this guest */
ret = sev_bind_asid(kvm, start->handle, error);
if (ret)
if (ret) {
sev_decommission(start->handle);
goto e_free_session;
}
/* return handle to userspace */
params.handle = start->handle;

View File

@ -5099,7 +5099,7 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
}
vmcs12 = get_vmcs12(vcpu);
if ((vmcs12->vm_function_control & (1 << function)) == 0)
if (!(vmcs12->vm_function_control & BIT_ULL(function)))
goto fail;
switch (function) {

View File

@ -571,10 +571,14 @@ static inline unsigned int blk_rq_get_max_segments(struct request *rq)
static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs)
{
if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
if (blk_integrity_merge_bio(req->q, req, bio) == false)
goto no_merge;
if (blk_integrity_merge_bio(req->q, req, bio) == false)
/* discard request merge won't add new segment */
if (req_op(req) == REQ_OP_DISCARD)
return 1;
if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
goto no_merge;
/*

View File

@ -266,8 +266,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
return;
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
has_sleeper = !wq_has_single_sleeper(&rqw->wait);
has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
TASK_UNINTERRUPTIBLE);
do {
/* The memory barrier in set_task_state saves us here. */
if (data.got_token)

View File

@ -7,6 +7,7 @@
#include <linux/blk_types.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/blk-mq.h>
#include "blk-mq-debugfs.h"
@ -99,8 +100,21 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
/*
* No IO can be in-flight when adding rqos, so freeze queue, which
* is fine since we only support rq_qos for blk-mq queue.
*
* Reuse ->queue_lock for protecting against other concurrent
* rq_qos adding/deleting
*/
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
rqos->next = q->rq_qos;
q->rq_qos = rqos;
spin_unlock_irq(&q->queue_lock);
blk_mq_unfreeze_queue(q);
if (rqos->ops->debugfs_attrs)
blk_mq_debugfs_register_rqos(rqos);
@ -110,12 +124,22 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
{
struct rq_qos **cur;
/*
* See comment in rq_qos_add() about freezing queue & using
* ->queue_lock.
*/
blk_mq_freeze_queue(q);
spin_lock_irq(&q->queue_lock);
for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
if (*cur == rqos) {
*cur = rqos->next;
break;
}
}
spin_unlock_irq(&q->queue_lock);
blk_mq_unfreeze_queue(q);
blk_mq_debugfs_unregister_rqos(rqos);
}

View File

@ -77,7 +77,8 @@ enum {
static inline bool rwb_enabled(struct rq_wb *rwb)
{
return rwb && rwb->wb_normal != 0;
return rwb && rwb->enable_state != WBT_STATE_OFF_DEFAULT &&
rwb->wb_normal != 0;
}
static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
@ -644,9 +645,13 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
void wbt_enable_default(struct request_queue *q)
{
struct rq_qos *rqos = wbt_rq_qos(q);
/* Throttling already enabled? */
if (rqos)
if (rqos) {
if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
return;
}
/* Queue not registered? Maybe shutting down... */
if (!blk_queue_registered(q))
@ -710,7 +715,7 @@ void wbt_disable_default(struct request_queue *q)
rwb = RQWB(rqos);
if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
blk_stat_deactivate(rwb->cb);
rwb->wb_normal = 0;
rwb->enable_state = WBT_STATE_OFF_DEFAULT;
}
}
EXPORT_SYMBOL_GPL(wbt_disable_default);

View File

@ -34,6 +34,7 @@ enum {
enum {
WBT_STATE_ON_DEFAULT = 1,
WBT_STATE_ON_MANUAL = 2,
WBT_STATE_OFF_DEFAULT
};
struct rq_wb {

View File

@ -20,12 +20,24 @@
static const struct crypto_type crypto_shash_type;
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
/*
* Check whether an shash algorithm has a setkey function.
*
* For CFI compatibility, this must not be an inline function. This is because
* when CFI is enabled, modules won't get the same address for shash_no_setkey
* (if it were exported, which inlining would require) as the core kernel will.
*/
bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
{
return alg->setkey != shash_no_setkey;
}
EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)

View File

@ -8,6 +8,11 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
# ACPI Boot-Time Table Parsing
#
ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
endif
obj-$(CONFIG_ACPI) += tables.o
obj-$(CONFIG_X86) += blacklist.o

View File

@ -262,7 +262,7 @@ static uint32_t acpi_pad_idle_cpus_num(void)
return ps_tsk_num;
}
static ssize_t acpi_pad_rrtime_store(struct device *dev,
static ssize_t rrtime_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@ -276,16 +276,14 @@ static ssize_t acpi_pad_rrtime_store(struct device *dev,
return count;
}
static ssize_t acpi_pad_rrtime_show(struct device *dev,
static ssize_t rrtime_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time);
}
static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
acpi_pad_rrtime_show,
acpi_pad_rrtime_store);
static DEVICE_ATTR_RW(rrtime);
static ssize_t acpi_pad_idlepct_store(struct device *dev,
static ssize_t idlepct_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@ -299,16 +297,14 @@ static ssize_t acpi_pad_idlepct_store(struct device *dev,
return count;
}
static ssize_t acpi_pad_idlepct_show(struct device *dev,
static ssize_t idlepct_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct);
}
static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
acpi_pad_idlepct_show,
acpi_pad_idlepct_store);
static DEVICE_ATTR_RW(idlepct);
static ssize_t acpi_pad_idlecpus_store(struct device *dev,
static ssize_t idlecpus_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long num;
@ -320,16 +316,14 @@ static ssize_t acpi_pad_idlecpus_store(struct device *dev,
return count;
}
static ssize_t acpi_pad_idlecpus_show(struct device *dev,
static ssize_t idlecpus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return cpumap_print_to_pagebuf(false, buf,
to_cpumask(pad_busy_cpus_bits));
}
static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
acpi_pad_idlecpus_show,
acpi_pad_idlecpus_store);
static DEVICE_ATTR_RW(idlecpus);
static int acpi_pad_add_sysfs(struct acpi_device *device)
{

View File

@ -237,7 +237,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
rt.tz, rt.daylight);
}
static DEVICE_ATTR(time, S_IRUSR | S_IWUSR, time_show, time_store);
static DEVICE_ATTR_RW(time);
static struct attribute *acpi_tad_time_attrs[] = {
&dev_attr_time.attr,
@ -446,7 +446,7 @@ static ssize_t ac_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR(ac_alarm, S_IRUSR | S_IWUSR, ac_alarm_show, ac_alarm_store);
static DEVICE_ATTR_RW(ac_alarm);
static ssize_t ac_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -462,7 +462,7 @@ static ssize_t ac_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR(ac_policy, S_IRUSR | S_IWUSR, ac_policy_show, ac_policy_store);
static DEVICE_ATTR_RW(ac_policy);
static ssize_t ac_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -478,7 +478,7 @@ static ssize_t ac_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_AC_TIMER);
}
static DEVICE_ATTR(ac_status, S_IRUSR | S_IWUSR, ac_status_show, ac_status_store);
static DEVICE_ATTR_RW(ac_status);
static struct attribute *acpi_tad_attrs[] = {
&dev_attr_caps.attr,
@ -505,7 +505,7 @@ static ssize_t dc_alarm_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_alarm_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR(dc_alarm, S_IRUSR | S_IWUSR, dc_alarm_show, dc_alarm_store);
static DEVICE_ATTR_RW(dc_alarm);
static ssize_t dc_policy_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -521,7 +521,7 @@ static ssize_t dc_policy_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_policy_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR(dc_policy, S_IRUSR | S_IWUSR, dc_policy_show, dc_policy_store);
static DEVICE_ATTR_RW(dc_policy);
static ssize_t dc_status_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@ -537,7 +537,7 @@ static ssize_t dc_status_show(struct device *dev, struct device_attribute *attr,
return acpi_tad_status_read(dev, buf, ACPI_TAD_DC_TIMER);
}
static DEVICE_ATTR(dc_status, S_IRUSR | S_IWUSR, dc_status_show, dc_status_store);
static DEVICE_ATTR_RW(dc_status);
static struct attribute *acpi_tad_dc_attrs[] = {
&dev_attr_dc_alarm.attr,

View File

@ -375,6 +375,13 @@ acpi_ns_repair_CID(struct acpi_evaluate_info *info,
(*element_ptr)->common.reference_count =
original_ref_count;
/*
* The original_element holds a reference from the package object
* that represents _HID. Since a new element was created by _HID,
* remove the reference from the _CID package.
*/
acpi_ut_remove_reference(original_element);
}
element_ptr++;

View File

@ -15,40 +15,19 @@
static void *bgrt_image;
static struct kobject *bgrt_kobj;
static ssize_t show_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.version);
}
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
#define BGRT_SHOW(_name, _member) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab._member); \
} \
struct kobj_attribute bgrt_attr_##_name = __ATTR_RO(_name)
static ssize_t show_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.status);
}
static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
static ssize_t show_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_type);
}
static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
static ssize_t show_xoffset(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_x);
}
static DEVICE_ATTR(xoffset, S_IRUGO, show_xoffset, NULL);
static ssize_t show_yoffset(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", bgrt_tab.image_offset_y);
}
static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
BGRT_SHOW(version, version);
BGRT_SHOW(status, status);
BGRT_SHOW(type, image_type);
BGRT_SHOW(xoffset, image_offset_x);
BGRT_SHOW(yoffset, image_offset_y);
static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
@ -60,11 +39,11 @@ static ssize_t image_read(struct file *file, struct kobject *kobj,
static BIN_ATTR_RO(image, 0); /* size gets filled in later */
static struct attribute *bgrt_attributes[] = {
&dev_attr_version.attr,
&dev_attr_status.attr,
&dev_attr_type.attr,
&dev_attr_xoffset.attr,
&dev_attr_yoffset.attr,
&bgrt_attr_version.attr,
&bgrt_attr_status.attr,
&bgrt_attr_type.attr,
&bgrt_attr_xoffset.attr,
&bgrt_attr_yoffset.attr,
NULL,
};

View File

@ -1240,6 +1240,7 @@ static int __init acpi_init(void)
result = acpi_bus_init();
if (result) {
kobject_put(acpi_kobj);
disable_acpi();
return result;
}

View File

@ -325,11 +325,11 @@ int acpi_device_modalias(struct device *dev, char *buf, int size)
EXPORT_SYMBOL_GPL(acpi_device_modalias);
static ssize_t
acpi_device_modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return __acpi_device_modalias(to_acpi_device(dev), buf, 1024);
}
static DEVICE_ATTR(modalias, 0444, acpi_device_modalias_show, NULL);
static DEVICE_ATTR_RO(modalias);
static ssize_t real_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
@ -358,8 +358,8 @@ static ssize_t power_state_show(struct device *dev,
static DEVICE_ATTR_RO(power_state);
static ssize_t
acpi_eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
eject_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct acpi_device *acpi_device = to_acpi_device(d);
acpi_object_type not_used;
@ -387,28 +387,28 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
return status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
static DEVICE_ATTR_WO(eject);
static ssize_t
acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf)
hid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
}
static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
static DEVICE_ATTR_RO(hid);
static ssize_t acpi_device_uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return sprintf(buf, "%s\n", acpi_dev->pnp.unique_id);
}
static DEVICE_ATTR(uid, 0444, acpi_device_uid_show, NULL);
static DEVICE_ATTR_RO(uid);
static ssize_t acpi_device_adr_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t adr_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@ -417,16 +417,16 @@ static ssize_t acpi_device_adr_show(struct device *dev,
else
return sprintf(buf, "0x%08llx\n", acpi_dev->pnp.bus_address);
}
static DEVICE_ATTR(adr, 0444, acpi_device_adr_show, NULL);
static DEVICE_ATTR_RO(adr);
static ssize_t acpi_device_path_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t path_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
return acpi_object_path(acpi_dev->handle, buf);
}
static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL);
static DEVICE_ATTR_RO(path);
/* sysfs file that shows description text from the ACPI _STR method */
static ssize_t description_show(struct device *dev,
@ -446,7 +446,7 @@ static ssize_t description_show(struct device *dev,
(wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer,
acpi_dev->pnp.str_obj->buffer.length,
UTF16_LITTLE_ENDIAN, buf,
PAGE_SIZE);
PAGE_SIZE - 1);
buf[result++] = '\n';
@ -455,8 +455,8 @@ static ssize_t description_show(struct device *dev,
static DEVICE_ATTR_RO(description);
static ssize_t
acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
char *buf) {
sun_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long sun;
@ -467,11 +467,11 @@ acpi_device_sun_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", sun);
}
static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL);
static DEVICE_ATTR_RO(sun);
static ssize_t
acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
char *buf) {
hrv_show(struct device *dev, struct device_attribute *attr,
char *buf) {
struct acpi_device *acpi_dev = to_acpi_device(dev);
acpi_status status;
unsigned long long hrv;
@ -482,7 +482,7 @@ acpi_device_hrv_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%llu\n", hrv);
}
static DEVICE_ATTR(hrv, 0444, acpi_device_hrv_show, NULL);
static DEVICE_ATTR_RO(hrv);
static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *buf) {

View File

@ -485,7 +485,7 @@ int dock_notify(struct acpi_device *adev, u32 event)
/*
* show_docked - read method for "docked" file in sysfs
*/
static ssize_t show_docked(struct device *dev,
static ssize_t docked_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
@ -494,25 +494,25 @@ static ssize_t show_docked(struct device *dev,
acpi_bus_get_device(dock_station->handle, &adev);
return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
static DEVICE_ATTR_RO(docked);
/*
* show_flags - read method for flags file in sysfs
*/
static ssize_t show_flags(struct device *dev,
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
}
static DEVICE_ATTR(flags, S_IRUGO, show_flags, NULL);
static DEVICE_ATTR_RO(flags);
/*
* write_undock - write method for "undock" file in sysfs
*/
static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t undock_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
struct dock_station *dock_station = dev->platform_data;
@ -526,13 +526,13 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
acpi_scan_lock_release();
return ret ? ret: count;
}
static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
static DEVICE_ATTR_WO(undock);
/*
* show_dock_uid - read method for "uid" file in sysfs
*/
static ssize_t show_dock_uid(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t uid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long long lbuf;
struct dock_station *dock_station = dev->platform_data;
@ -543,10 +543,10 @@ static ssize_t show_dock_uid(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
}
static DEVICE_ATTR(uid, S_IRUGO, show_dock_uid, NULL);
static DEVICE_ATTR_RO(uid);
static ssize_t show_dock_type(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct dock_station *dock_station = dev->platform_data;
char *type;
@ -562,7 +562,7 @@ static ssize_t show_dock_type(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", type);
}
static DEVICE_ATTR(type, S_IRUGO, show_dock_type, NULL);
static DEVICE_ATTR_RO(type);
static struct attribute *dock_attributes[] = {
&dev_attr_docked.attr,

View File

@ -1826,6 +1826,22 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "GL702VMK"),}, NULL},
{
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BA", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X505BA"),}, NULL},
{
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X505BP", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X505BP"),}, NULL},
{
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BA", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X542BA"),}, NULL},
{
ec_honor_ecdt_gpe, "ASUSTeK COMPUTER INC. X542BP", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X542BP"),}, NULL},
{
ec_honor_ecdt_gpe, "ASUS X550VXK", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X550VXK"),}, NULL},

View File

@ -888,15 +888,16 @@ static void acpi_release_power_resource(struct device *dev)
kfree(resource);
}
static ssize_t acpi_power_in_use_show(struct device *dev,
struct device_attribute *attr,
char *buf) {
static ssize_t resource_in_use_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct acpi_power_resource *resource;
resource = to_power_resource(to_acpi_device(dev));
return sprintf(buf, "%u\n", !!resource->ref_count);
}
static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL);
static DEVICE_ATTR_RO(resource_in_use);
static void acpi_power_sysfs_remove(struct acpi_device *device)
{

View File

@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/sched.h> /* need_resched() */
#include <linux/sort.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
@ -540,10 +541,37 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
return;
}
static int acpi_cst_latency_cmp(const void *a, const void *b)
{
const struct acpi_processor_cx *x = a, *y = b;
if (!(x->valid && y->valid))
return 0;
if (x->latency > y->latency)
return 1;
if (x->latency < y->latency)
return -1;
return 0;
}
static void acpi_cst_latency_swap(void *a, void *b, int n)
{
struct acpi_processor_cx *x = a, *y = b;
u32 tmp;
if (!(x->valid && y->valid))
return;
tmp = x->latency;
x->latency = y->latency;
y->latency = tmp;
}
static int acpi_processor_power_verify(struct acpi_processor *pr)
{
unsigned int i;
unsigned int working = 0;
unsigned int last_latency = 0;
unsigned int last_type = 0;
bool buggy_latency = false;
pr->power.timer_broadcast_on_state = INT_MAX;
@ -567,12 +595,24 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
}
if (!cx->valid)
continue;
if (cx->type >= last_type && cx->latency < last_latency)
buggy_latency = true;
last_latency = cx->latency;
last_type = cx->type;
lapic_timer_check_state(i, pr, cx);
tsc_check_state(cx->type);
working++;
}
if (buggy_latency) {
pr_notice("FW issue: working around C-state latencies out of order\n");
sort(&pr->power.states[1], max_cstate,
sizeof(struct acpi_processor_cx),
acpi_cst_latency_cmp,
acpi_cst_latency_swap);
}
lapic_timer_propagate_broadcast(pr);
return (working);

View File

@ -430,6 +430,13 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
}
}
static bool irq_is_legacy(struct acpi_resource_irq *irq)
{
return irq->triggering == ACPI_EDGE_SENSITIVE &&
irq->polarity == ACPI_ACTIVE_HIGH &&
irq->shareable == ACPI_EXCLUSIVE;
}
/**
* acpi_dev_resource_interrupt - Extract ACPI interrupt resource information.
* @ares: Input ACPI resource object.
@ -468,7 +475,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
}
acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity,
irq->shareable, true);
irq->shareable, irq_is_legacy(irq));
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq;

View File

@ -200,7 +200,7 @@ static void ahci_sunxi_start_engine(struct ata_port *ap)
}
static const struct ata_port_info ahci_sunxi_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ | ATA_FLAG_NO_DIPM,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_platform_ops,

View File

@ -928,7 +928,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
/* INT[3] (IRQ_EP93XX_EXT3) line connected as pull down */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = -ENXIO;
err = irq;
goto err_rel_gpio;
}

View File

@ -898,10 +898,11 @@ static int octeon_cf_probe(struct platform_device *pdev)
return -EINVAL;
}
irq_handler = octeon_cf_interrupt;
i = platform_get_irq(dma_dev, 0);
if (i > 0)
if (i > 0) {
irq = i;
irq_handler = octeon_cf_interrupt;
}
}
of_node_put(dma_node);
}

View File

@ -115,10 +115,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ resource found\n");
return -ENOENT;
return irq;
}
if (!irq)
return -EINVAL;
gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
if (IS_ERR(gpiod)) {

View File

@ -469,10 +469,12 @@ static int ahci_highbank_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
if (irq < 0) {
dev_err(dev, "no irq\n");
return -EINVAL;
return irq;
}
if (!irq)
return -EINVAL;
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {

View File

@ -3295,7 +3295,7 @@ static void __exit ia_module_exit(void)
{
pci_unregister_driver(&ia_driver);
del_timer(&ia_timer);
del_timer_sync(&ia_timer);
}
module_init(ia_module_init);

View File

@ -297,7 +297,7 @@ static void __exit nicstar_cleanup(void)
{
XPRINTK("nicstar: nicstar_cleanup() called.\n");
del_timer(&ns_timer);
del_timer_sync(&ns_timer);
pci_unregister_driver(&nicstar_driver);
@ -525,6 +525,15 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
/* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
writel(0x00000000, card->membase + VPM);
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Initialize TSQ */
card->tsq.org = dma_alloc_coherent(&card->pcidev->dev,
NS_TSQSIZE + NS_TSQ_ALIGNMENT,
@ -751,15 +760,6 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->efbie = 1;
card->intcnt = 0;
if (request_irq
(pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) {
printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
error = 9;
ns_init_card_error(card, error);
return error;
}
/* Register device */
card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops,
-1, NULL);
@ -837,10 +837,12 @@ static void ns_init_card_error(ns_dev *card, int error)
dev_kfree_skb_any(hb);
}
if (error >= 12) {
kfree(card->rsq.org);
dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT,
card->rsq.org, card->rsq.dma);
}
if (error >= 11) {
kfree(card->tsq.org);
dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT,
card->tsq.org, card->tsq.dma);
}
if (error >= 10) {
free_irq(card->pcidev->irq, card);

View File

@ -2701,11 +2701,6 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
struct btmtk_wmt_hdr *hdr;
int err;
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* Send the WMT command and wait until the WMT event returns */
hlen = sizeof(*hdr) + wmt_params->dlen;
if (hlen > 255)
@ -2727,6 +2722,11 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
return err;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
if (err < 0)
return err;
/* The vendor specific WMT commands are all answered by a vendor
* specific event and will have the Command Status or Command
* Complete as with usual HCI command flow control.
@ -3264,6 +3264,11 @@ static int btusb_setup_qca_download_fw(struct hci_dev *hdev,
sent += size;
count -= size;
/* ep2 need time to switch from function acl to function dfu,
* so we add 20ms delay here.
*/
msleep(20);
while (count) {
size = min_t(size_t, count, QCA_DFU_PACKET_LEN);

View File

@ -134,7 +134,7 @@ static int exynos_trng_probe(struct platform_device *pdev)
return PTR_ERR(trng->mem);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Could not get runtime PM.\n");
goto err_pm_get;
@ -167,7 +167,7 @@ err_register:
clk_disable_unprepare(trng->clk);
err_clock:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
err_pm_get:
pm_runtime_disable(&pdev->dev);

View File

@ -366,16 +366,18 @@ static int __ipmi_set_timeout(struct ipmi_smi_msg *smi_msg,
data[0] = 0;
WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
if ((ipmi_version_major > 1)
|| ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
/* This is an IPMI 1.5-only feature. */
data[0] |= WDOG_DONT_STOP_ON_SET;
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/*
* In ipmi 1.0, setting the timer stops the watchdog, we
* need to start it back up again.
*/
hbnow = 1;
if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
if ((ipmi_version_major > 1) ||
((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
/* This is an IPMI 1.5-only feature. */
data[0] |= WDOG_DONT_STOP_ON_SET;
} else {
/*
* In ipmi 1.0, setting the timer stops the watchdog, we
* need to start it back up again.
*/
hbnow = 1;
}
}
data[1] = 0;

View File

@ -544,6 +544,10 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
io_read_num_rec_bytes(iobase, &num_bytes_read);
if (num_bytes_read >= 4) {
DEBUGP(2, dev, "NumRecBytes = %i\n", num_bytes_read);
if (num_bytes_read > 4) {
rc = -EIO;
goto exit_setprotocol;
}
break;
}
usleep_range(10000, 11000);

View File

@ -125,8 +125,7 @@ static struct clk_factor_table sd_factor_table[] = {
{ 12, 1, 13 }, { 13, 1, 14 }, { 14, 1, 15 }, { 15, 1, 16 },
{ 16, 1, 17 }, { 17, 1, 18 }, { 18, 1, 19 }, { 19, 1, 20 },
{ 20, 1, 21 }, { 21, 1, 22 }, { 22, 1, 23 }, { 23, 1, 24 },
{ 24, 1, 25 }, { 25, 1, 26 }, { 26, 1, 27 }, { 27, 1, 28 },
{ 28, 1, 29 }, { 29, 1, 30 }, { 30, 1, 31 }, { 31, 1, 32 },
{ 24, 1, 25 },
/* bit8: /128 */
{ 256, 1, 1 * 128 }, { 257, 1, 2 * 128 }, { 258, 1, 3 * 128 }, { 259, 1, 4 * 128 },
@ -135,14 +134,20 @@ static struct clk_factor_table sd_factor_table[] = {
{ 268, 1, 13 * 128 }, { 269, 1, 14 * 128 }, { 270, 1, 15 * 128 }, { 271, 1, 16 * 128 },
{ 272, 1, 17 * 128 }, { 273, 1, 18 * 128 }, { 274, 1, 19 * 128 }, { 275, 1, 20 * 128 },
{ 276, 1, 21 * 128 }, { 277, 1, 22 * 128 }, { 278, 1, 23 * 128 }, { 279, 1, 24 * 128 },
{ 280, 1, 25 * 128 }, { 281, 1, 26 * 128 }, { 282, 1, 27 * 128 }, { 283, 1, 28 * 128 },
{ 284, 1, 29 * 128 }, { 285, 1, 30 * 128 }, { 286, 1, 31 * 128 }, { 287, 1, 32 * 128 },
{ 280, 1, 25 * 128 },
{ 0, 0, 0 },
};
static struct clk_factor_table bisp_factor_table[] = {
{ 0, 1, 1 }, { 1, 1, 2 }, { 2, 1, 3 }, { 3, 1, 4 },
{ 4, 1, 5 }, { 5, 1, 6 }, { 6, 1, 7 }, { 7, 1, 8 },
static struct clk_factor_table de_factor_table[] = {
{ 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
{ 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
{ 8, 1, 12 },
{ 0, 0, 0 },
};
static struct clk_factor_table hde_factor_table[] = {
{ 0, 1, 1 }, { 1, 2, 3 }, { 2, 1, 2 }, { 3, 2, 5 },
{ 4, 1, 3 }, { 5, 1, 4 }, { 6, 1, 6 }, { 7, 1, 8 },
{ 0, 0, 0 },
};
@ -156,6 +161,13 @@ static struct clk_div_table rmii_ref_div_table[] = {
{ 0, 0 },
};
static struct clk_div_table std12rate_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 5 }, { 5, 6 }, { 6, 7 }, { 7, 8 },
{ 8, 9 }, { 9, 10 }, { 10, 11 }, { 11, 12 },
{ 0, 0 },
};
static struct clk_div_table i2s_div_table[] = {
{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
{ 4, 6 }, { 5, 8 }, { 6, 12 }, { 7, 16 },
@ -188,39 +200,39 @@ static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNE
/* factor clocks */
static OWL_FACTOR(ahb_clk, "ahb_clk", "h_clk", CMU_BUSCLK1, 2, 2, ahb_factor_table, 0, 0);
static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 3, bisp_factor_table, 0, 0);
static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 3, bisp_factor_table, 0, 0);
static OWL_FACTOR(de1_clk, "de_clk1", "de_clk", CMU_DECLK, 0, 4, de_factor_table, 0, 0);
static OWL_FACTOR(de2_clk, "de_clk2", "de_clk", CMU_DECLK, 4, 4, de_factor_table, 0, 0);
/* composite clocks */
static OWL_COMP_FACTOR(vce_clk, "vce_clk", hde_clk_mux_p,
OWL_MUX_HW(CMU_VCECLK, 4, 2),
OWL_GATE_HW(CMU_DEVCLKEN0, 26, 0),
OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, bisp_factor_table),
OWL_FACTOR_HW(CMU_VCECLK, 0, 3, 0, hde_factor_table),
0);
static OWL_COMP_FACTOR(vde_clk, "vde_clk", hde_clk_mux_p,
OWL_MUX_HW(CMU_VDECLK, 4, 2),
OWL_GATE_HW(CMU_DEVCLKEN0, 25, 0),
OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, bisp_factor_table),
OWL_FACTOR_HW(CMU_VDECLK, 0, 3, 0, hde_factor_table),
0);
static OWL_COMP_FACTOR(bisp_clk, "bisp_clk", bisp_clk_mux_p,
static OWL_COMP_DIV(bisp_clk, "bisp_clk", bisp_clk_mux_p,
OWL_MUX_HW(CMU_BISPCLK, 4, 1),
OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
OWL_FACTOR_HW(CMU_BISPCLK, 0, 3, 0, bisp_factor_table),
OWL_DIVIDER_HW(CMU_BISPCLK, 0, 4, 0, std12rate_div_table),
0);
static OWL_COMP_FACTOR(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
static OWL_COMP_DIV(sensor0_clk, "sensor0_clk", sensor_clk_mux_p,
OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
OWL_FACTOR_HW(CMU_SENSORCLK, 0, 3, 0, bisp_factor_table),
CLK_IGNORE_UNUSED);
OWL_DIVIDER_HW(CMU_SENSORCLK, 0, 4, 0, std12rate_div_table),
0);
static OWL_COMP_FACTOR(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
static OWL_COMP_DIV(sensor1_clk, "sensor1_clk", sensor_clk_mux_p,
OWL_MUX_HW(CMU_SENSORCLK, 4, 1),
OWL_GATE_HW(CMU_DEVCLKEN0, 14, 0),
OWL_FACTOR_HW(CMU_SENSORCLK, 8, 3, 0, bisp_factor_table),
CLK_IGNORE_UNUSED);
OWL_DIVIDER_HW(CMU_SENSORCLK, 8, 4, 0, std12rate_div_table),
0);
static OWL_COMP_FACTOR(sd0_clk, "sd0_clk", sd_clk_mux_p,
OWL_MUX_HW(CMU_SD0CLK, 9, 1),
@ -300,7 +312,7 @@ static OWL_COMP_FIXED_FACTOR(i2c3_clk, "i2c3_clk", "ethernet_pll_clk",
static OWL_COMP_DIV(uart0_clk, "uart0_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART0CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 6, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART0CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
@ -312,31 +324,31 @@ static OWL_COMP_DIV(uart1_clk, "uart1_clk", uart_clk_mux_p,
static OWL_COMP_DIV(uart2_clk, "uart2_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART2CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 8, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART2CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(uart3_clk, "uart3_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART3CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 19, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART3CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(uart4_clk, "uart4_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART4CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 20, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART4CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(uart5_clk, "uart5_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART5CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 21, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART5CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(uart6_clk, "uart6_clk", uart_clk_mux_p,
OWL_MUX_HW(CMU_UART6CLK, 16, 1),
OWL_GATE_HW(CMU_DEVCLKEN1, 18, 0),
OWL_DIVIDER_HW(CMU_UART1CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
OWL_DIVIDER_HW(CMU_UART6CLK, 0, 8, CLK_DIVIDER_ROUND_CLOSEST, NULL),
CLK_IGNORE_UNUSED);
static OWL_COMP_DIV(i2srx_clk, "i2srx_clk", i2s_clk_mux_p,

View File

@ -304,6 +304,8 @@ static const struct si5341_reg_default si5341_reg_defaults[] = {
{ 0x094A, 0x00 }, /* INx_TO_PFD_EN (disabled) */
{ 0x0A02, 0x00 }, /* Not in datasheet */
{ 0x0B44, 0x0F }, /* PDIV_ENB (datasheet does not mention what it is) */
{ 0x0B57, 0x10 }, /* VCO_RESET_CALCODE (not described in datasheet) */
{ 0x0B58, 0x05 }, /* VCO_RESET_CALCODE (not described in datasheet) */
};
/* Read and interpret a 44-bit followed by a 32-bit value in the regmap */
@ -482,6 +484,9 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
SI5341_SYNTH_N_NUM(synth->index), &n_num, &n_den);
if (err < 0)
return err;
/* Check for bogus/uninitialized settings */
if (!n_num || !n_den)
return 0;
/*
* n_num and n_den are shifted left as much as possible, so to prevent
@ -665,6 +670,9 @@ static long si5341_output_clk_round_rate(struct clk_hw *hw, unsigned long rate,
{
unsigned long r;
if (!rate)
return 0;
r = *parent_rate >> 1;
/* If rate is an even divisor, no changes to parent required */
@ -693,11 +701,16 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_si5341_output *output = to_clk_si5341_output(hw);
/* Frequency divider is (r_div + 1) * 2 */
u32 r_div = (parent_rate / rate) >> 1;
u32 r_div;
int err;
u8 r[3];
if (!rate)
return -EINVAL;
/* Frequency divider is (r_div + 1) * 2 */
r_div = (parent_rate / rate) >> 1;
if (r_div <= 1)
r_div = 0;
else if (r_div >= BIT(24))
@ -924,7 +937,7 @@ static const struct si5341_reg_default si5341_preamble[] = {
{ 0x0B25, 0x00 },
{ 0x0502, 0x01 },
{ 0x0505, 0x03 },
{ 0x0957, 0x1F },
{ 0x0957, 0x17 },
{ 0x0B4E, 0x1A },
};

View File

@ -1602,7 +1602,7 @@ static struct clk_regmap g12b_cpub_clk_trace = {
};
static const struct pll_mult_range g12a_gp0_pll_mult_range = {
.min = 55,
.min = 125,
.max = 255,
};

View File

@ -75,6 +75,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
/* Core Clock Outputs */
DEF_FIXED("za2", R8A77995_CLK_ZA2, CLK_PLL0D3, 2, 1),
DEF_FIXED("z2", R8A77995_CLK_Z2, CLK_PLL0D3, 1, 1),
DEF_FIXED("ztr", R8A77995_CLK_ZTR, CLK_PLL1, 6, 1),
DEF_FIXED("zt", R8A77995_CLK_ZT, CLK_PLL1, 4, 1),

View File

@ -1089,7 +1089,8 @@ static int clk_pllu_enable(struct clk_hw *hw)
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (ret < 0)
@ -1706,15 +1707,13 @@ static int clk_pllu_tegra114_enable(struct clk_hw *hw)
return -EINVAL;
}
if (clk_pll_is_enabled(hw))
return 0;
input_rate = clk_hw_get_rate(__clk_get_hw(osc));
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
if (!clk_pll_is_enabled(hw))
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (ret < 0)

View File

@ -348,7 +348,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \
_val = read_sysreg(reg); \
_retries--; \
} while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
} while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \

View File

@ -1361,9 +1361,14 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
ret = cpufreq_table_validate_and_sort(policy);
if (ret)
goto out_exit_policy;
goto out_offline_policy;
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@ -1507,6 +1512,10 @@ out_destroy_policy:
up_write(&policy->rwsem);
out_offline_policy:
if (cpufreq_driver->offline)
cpufreq_driver->offline(policy);
out_exit_policy:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);

View File

@ -306,6 +306,10 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
* Entry 192: NPS_CORE_INT_ACTIVE
*/
nr_vecs = pci_msix_vec_count(pdev);
if (nr_vecs < 0) {
dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs);
return nr_vecs;
}
/* Enable MSI-X */
ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);

View File

@ -40,6 +40,10 @@ static int psp_probe_timeout = 5;
module_param(psp_probe_timeout, int, 0644);
MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
static bool psp_dead;
static int psp_timeout;

View File

@ -213,7 +213,7 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
goto e_err;
goto free_irqs;
}
}
@ -221,10 +221,12 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = sp_init(sp);
if (ret)
goto e_err;
goto free_irqs;
return 0;
free_irqs:
sp_free_irqs(sp);
e_err:
dev_notice(dev, "initialization failed\n");
return ret;

View File

@ -329,7 +329,7 @@ static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
buf1 = buf->next;
phys1 = buf->phys_next;
dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
dma_pool_free(buffer_pool, buf, phys);
buf = buf1;
phys = phys1;

View File

@ -538,13 +538,15 @@ static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
* The status field indicates if the device is enabled when the status
* is 'okay'. Otherwise the device driver will be disabled.
*
* @prop - struct property point containing the maxsyncop for the update
* @devdata: struct nx842_devdata to use for dev_info
* @prop: struct property point containing the maxsyncop for the update
*
* Returns:
* 0 - Device is available
* -ENODEV - Device is not available
*/
static int nx842_OF_upd_status(struct property *prop)
static int nx842_OF_upd_status(struct nx842_devdata *devdata,
struct property *prop)
{
const char *status = (const char *)prop->value;
@ -758,7 +760,7 @@ static int nx842_OF_upd(struct property *new_prop)
goto out;
/* Perform property updates */
ret = nx842_OF_upd_status(status);
ret = nx842_OF_upd_status(new_devdata, status);
if (ret)
goto error_out;
@ -1071,6 +1073,7 @@ static const struct vio_device_id nx842_vio_driver_ids[] = {
{"ibm,compression-v1", "ibm,compression"},
{"", ""},
};
MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids);
static struct vio_driver nx842_vio_driver = {
.name = KBUILD_MODNAME,

View File

@ -364,7 +364,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
int err;
err = pm_runtime_get_sync(dd->dev);
err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
@ -2236,7 +2236,7 @@ static int omap_sham_suspend(struct device *dev)
static int omap_sham_resume(struct device *dev)
{
int err = pm_runtime_get_sync(dev);
int err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "failed to get sync: %d\n", err);
return err;

View File

@ -1256,7 +1256,11 @@ static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
return -EINVAL;
}
qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
if (status) {
pr_err("QAT: failed to read register");
return status;
}
gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
data16low = 0xffff & data;
data16hi = 0xffff & (data >> 0x10);

View File

@ -385,7 +385,6 @@ static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
return 0;
}
#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
struct icp_qat_uof_initmem *init_mem)
{

View File

@ -1009,6 +1009,7 @@ static int hash_hw_final(struct ahash_request *req)
goto out;
}
} else if (req->nbytes == 0 && ctx->keylen > 0) {
ret = -EPERM;
dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
__func__);
goto out;

View File

@ -249,6 +249,9 @@ static int __init i10nm_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(i10nm_cpuids);
if (!id)
return -ENODEV;

View File

@ -1555,6 +1555,9 @@ static int __init pnd2_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(pnd2_cpuids);
if (!id)
return -ENODEV;

View File

@ -3512,6 +3512,9 @@ static int __init sbridge_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(sbridge_cpuids);
if (!id)
return -ENODEV;

View File

@ -605,6 +605,9 @@ static int __init skx_init(void)
if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
return -EBUSY;
if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
return -ENODEV;
id = x86_match_cpu(skx_cpuids);
if (!id)
return -ENODEV;

View File

@ -197,6 +197,7 @@ static const struct of_device_id ti_edac_of_match[] = {
{ .compatible = "ti,emif-dra7xx", .data = (void *)EMIF_TYPE_DRA7 },
{},
};
MODULE_DEVICE_TABLE(of, ti_edac_of_match);
static int _emif_get_id(struct device_node *node)
{

View File

@ -197,6 +197,7 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
struct regmap *regmap = pmic->regmap;
struct mrfld_extcon_data *data;
unsigned int status;
unsigned int id;
int irq, ret;
@ -244,6 +245,14 @@ static int mrfld_extcon_probe(struct platform_device *pdev)
/* Get initial state */
mrfld_extcon_role_detect(data);
/*
* Cached status value is used for cable detection, see comments
* in mrfld_extcon_cable_detect(), we need to sync cached value
* with a real state of the hardware.
*/
regmap_read(regmap, BCOVE_SCHGRIRQ1, &status);
data->status = status;
mrfld_extcon_clear(data, BCOVE_MIRQLVL1, BCOVE_LVL1_CHGR);
mrfld_extcon_clear(data, BCOVE_MCHGRIRQ1, BCOVE_CHGRIRQ_ALL);

View File

@ -729,7 +729,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
2, info->status);
if (ret) {
dev_err(info->dev, "failed to read MUIC register\n");
return ret;
goto err_irq;
}
cable_type = max8997_muic_get_cable_type(info,
MAX8997_CABLE_GROUP_ADC, &attached);
@ -784,3 +784,4 @@ module_platform_driver(max8997_muic_driver);
MODULE_DESCRIPTION("Maxim MAX8997 Extcon driver");
MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:max8997-muic");

View File

@ -88,7 +88,6 @@ static struct reg_data sm5502_reg_data[] = {
| SM5502_REG_INTM2_MHL_MASK,
.invert = true,
},
{ }
};
/* List of detectable cables */

View File

@ -296,15 +296,13 @@ static int fw_cfg_do_platform_probe(struct platform_device *pdev)
return 0;
}
static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
static ssize_t fw_cfg_showrev(struct kobject *k, struct kobj_attribute *a,
char *buf)
{
return sprintf(buf, "%u\n", fw_cfg_rev);
}
static const struct {
struct attribute attr;
ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
} fw_cfg_rev_attr = {
static const struct kobj_attribute fw_cfg_rev_attr = {
.attr = { .name = "rev", .mode = S_IRUSR },
.show = fw_cfg_showrev,
};

View File

@ -1026,24 +1026,32 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
/* add svc client device(s) */
svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
if (!svc)
return -ENOMEM;
if (!svc) {
ret = -ENOMEM;
goto err_free_kfifo;
}
svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
if (!svc->stratix10_svc_rsu) {
dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
return -ENOMEM;
ret = -ENOMEM;
goto err_free_kfifo;
}
ret = platform_device_add(svc->stratix10_svc_rsu);
if (ret) {
platform_device_put(svc->stratix10_svc_rsu);
return ret;
}
if (ret)
goto err_put_device;
dev_set_drvdata(dev, svc);
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
err_put_device:
platform_device_put(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
return ret;
}

View File

@ -476,6 +476,7 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);
return 0;

View File

@ -718,7 +718,7 @@ static ssize_t cfam_read(struct file *filep, char __user *buf, size_t count,
rc = count;
fail:
*offset = off;
return count;
return rc;
}
static ssize_t cfam_write(struct file *filep, const char __user *buf,
@ -755,7 +755,7 @@ static ssize_t cfam_write(struct file *filep, const char __user *buf,
rc = count;
fail:
*offset = off;
return count;
return rc;
}
static loff_t cfam_llseek(struct file *file, loff_t offset, int whence)

View File

@ -445,6 +445,7 @@ int fsi_occ_submit(struct device *dev, const void *request, size_t req_len,
goto done;
if (resp->return_status == OCC_RESP_CMD_IN_PRG ||
resp->return_status == OCC_RESP_CRIT_INIT ||
resp->seq_no != seq_no) {
rc = -ETIMEDOUT;

View File

@ -325,7 +325,8 @@ static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
static int sbefifo_request_reset(struct sbefifo *sbefifo)
{
struct device *dev = &sbefifo->fsi_dev->dev;
u32 status, timeout;
unsigned long end_time;
u32 status;
int rc;
dev_dbg(dev, "Requesting FIFO reset\n");
@ -341,7 +342,8 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
}
/* Wait for it to complete */
for (timeout = 0; timeout < SBEFIFO_RESET_TIMEOUT; timeout++) {
end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
while (!time_after(jiffies, end_time)) {
rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
if (rc) {
dev_err(dev, "Failed to read UP fifo status during reset"
@ -355,7 +357,7 @@ static int sbefifo_request_reset(struct sbefifo *sbefifo)
return 0;
}
msleep(1);
cond_resched();
}
dev_err(dev, "FIFO reset timed out\n");
@ -400,7 +402,7 @@ static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
/* The FIFO already contains a reset request from the SBE ? */
if (down_status & SBEFIFO_STS_RESET_REQ) {
dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
rc = sbefifo_regw(sbefifo, SBEFIFO_UP, SBEFIFO_PERFORM_RESET);
rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
if (rc) {
sbefifo->broken = true;
dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);

View File

@ -38,9 +38,10 @@
#define SCOM_STATUS_PIB_RESP_MASK 0x00007000
#define SCOM_STATUS_PIB_RESP_SHIFT 12
#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_PROTECTION | \
SCOM_STATUS_PARITY | \
SCOM_STATUS_PIB_ABORT | \
#define SCOM_STATUS_FSI2PIB_ERROR (SCOM_STATUS_PROTECTION | \
SCOM_STATUS_PARITY | \
SCOM_STATUS_PIB_ABORT)
#define SCOM_STATUS_ANY_ERR (SCOM_STATUS_FSI2PIB_ERROR | \
SCOM_STATUS_PIB_RESP_MASK)
/* SCOM address encodings */
#define XSCOM_ADDR_IND_FLAG BIT_ULL(63)
@ -240,13 +241,14 @@ static int handle_fsi2pib_status(struct scom_device *scom, uint32_t status)
{
uint32_t dummy = -1;
if (status & SCOM_STATUS_PROTECTION)
return -EPERM;
if (status & SCOM_STATUS_PARITY) {
if (status & SCOM_STATUS_FSI2PIB_ERROR)
fsi_device_write(scom->fsi_dev, SCOM_FSI2PIB_RESET_REG, &dummy,
sizeof(uint32_t));
if (status & SCOM_STATUS_PROTECTION)
return -EPERM;
if (status & SCOM_STATUS_PARITY)
return -EIO;
}
/* Return -EBUSY on PIB abort to force a retry */
if (status & SCOM_STATUS_PIB_ABORT)
return -EBUSY;

View File

@ -1252,6 +1252,7 @@ config GPIO_TPS68470
config GPIO_TQMX86
tristate "TQ-Systems QTMX86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
help
This driver supports GPIO on the TQMX86 IO controller.
@ -1319,6 +1320,7 @@ menu "PCI GPIO expanders"
config GPIO_AMD8111
tristate "AMD 8111 GPIO driver"
depends on X86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
help
The AMD 8111 south bridge contains 32 GPIO pins which can be used.

View File

@ -55,12 +55,6 @@ static struct {
spinlock_t mem_limit_lock;
} kfd_mem_limit;
/* Struct used for amdgpu_amdkfd_bo_validate */
struct amdgpu_vm_parser {
uint32_t domain;
bool wait;
};
static const char * const domain_bit_to_string[] = {
"CPU",
"GTT",
@ -293,11 +287,9 @@ validate_fail:
return ret;
}
static int amdgpu_amdkfd_validate(void *param, struct amdgpu_bo *bo)
static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
{
struct amdgpu_vm_parser *p = param;
return amdgpu_amdkfd_bo_validate(bo, p->domain, p->wait);
return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
}
/* vm_validate_pt_pd_bos - Validate page table and directory BOs
@ -311,20 +303,15 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
{
struct amdgpu_bo *pd = vm->root.base.bo;
struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
struct amdgpu_vm_parser param;
int ret;
param.domain = AMDGPU_GEM_DOMAIN_VRAM;
param.wait = false;
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate_vm_bo, NULL);
if (ret) {
pr_err("amdgpu: failed to validate PT BOs\n");
return ret;
}
ret = amdgpu_amdkfd_validate(&param, pd);
ret = amdgpu_amdkfd_validate_vm_bo(NULL, pd);
if (ret) {
pr_err("amdgpu: failed to validate PD\n");
return ret;

Some files were not shown because too many files have changed in this diff Show More