This is the 5.4.147 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmFDItIACgkQONu9yGCS
 aT7qHw/+MC8nqbYT/fMu3ZsTsNB8JZelGhrpXVAFc4E80oZDVEF3aNh8koTk13o/
 sI+9LZD6vPJ7kxPBT45PPxDfwM7VlVlQTVWQ8lUjY+8a2Ml7xZSiz9I3bMX0MsvU
 ylXeAJ4BntXI4679ccSjWGwwcQa+PrHsPrpZqKLBI4+pjxps3eMGwK0yE5jcRd4Z
 WlhgYbE8QhmB4iWSA5CBr7IY5pVIlKpvovlvIi1TlU1C9LXU6O8OBPkhmbuFF4fG
 HY2ge6d+xZItcn9RFi+uH51PwPBpN9U2I+QakQ4iyMNgF1uqHzfWh30ZeINxHzw2
 2Nn/nCmNeUwoOt6YSQGxlZUieqqvq5y4VeXo2nThqBdds8GPJ4AhvvxmM/NnP0EV
 lfI6RSxJcYULfl0XQB5sj3fJ2Fo7G7Mx+kg0q0018iTuOx9kNdQF/WXdLqHvBUIi
 VIaRFR2wKeWXCV7tmb5o8928clv0FWJRi19Gcq8597zWxGC8mlSOg48jVVMkE9rU
 IaYhIiIRL/Dkf34Hal2+mZbvbD5IjtViw9uYOzME7NuF4VVHD6RavsMOM8AuUc+t
 OtSRo6mID1rH+RrvFZhZRzcT3Fg5NVbmEhiVG7tH26ZEJE2Gs/CjexQgTHf25+VL
 /H8Mnjjw7gAmvqAJyG9s3eocnhuuWeYj5YWg3vAxVNoUqUT2uO4=
 =RNKE
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEdQaENiSDAlGTDEbB7G51OISzHs0FAmFDnvkACgkQ7G51OISz
 Hs17yQ/8CoCEE3h3RZQ07Y4RSyePZU96IRGqd5Dhyli0BUgWahmaaoBrXYQPL2c1
 IzBW2/e0vxCwMflVHxRMS6tXoTyj3dXuNG30SU+pkCDnsWyrWxtNALFgDt0a/wHa
 pszFKfnXKxrYOKKU2gSPxFu1+0Tm0zV6puwgvwcuKzUFx/wjIZxH+8+fLhTWYu2X
 7OBuwCvDjbsQgnruKbyysI14DBu4rfYm/7hXLgRLCtYRdUYeuVrR1W5N8QEqLYr4
 daZHCl2T4VF6qiiE5FdZc6yfW0H6KSK8A1B9PYZnSJyeYwLS1edXQFbZq4VBSjPh
 wIefx3xB0ZKuyPUHc1EYjxvxfYJCbWjFaOT8aT+qDxkpYgMRltur1fmE1fS0P9hp
 3g2JrSl5EzzNqklZUuC51X4Gq2rGrO31FCXcOh/rAfbJ4sHeUlWU2pnTN7ilJt/W
 yW51Rl+mYPNTY964iCkMzVmNyAH9oIgJyR7a9P0gkIUwCqq780POtN2bMzkS/Om0
 lauKR5NmuNVw//jfeQddrrdVILfErZ5tHplGNsk3VFMi2/ITNEbmm1CJGb2090F/
 EK1lcWOj6vuETN9Lqc/dv2hX0UgDG8I1SVb1SXwRLCCZREiuoS+Z3EmMfLAkmV46
 tXz+x34BTvMl3CEZMvy69ovTrJlh6537mFx2T7+FNBqeDLHT2Z4=
 =yJQM
 -----END PGP SIGNATURE-----

Merge tag 'v5.4.147' into 5.4-2.3.x-imx

This is the 5.4.147 stable release

Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
This commit is contained in:
Andrey Zhizhikin 2021-09-16 19:45:58 +00:00
commit 33478cc104
157 changed files with 1078 additions and 714 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 145
SUBLEVEL = 147
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -208,12 +208,12 @@
};
pinctrl_hvi3c3_default: hvi3c3_default {
function = "HVI3C3";
function = "I3C3";
groups = "HVI3C3";
};
pinctrl_hvi3c4_default: hvi3c4_default {
function = "HVI3C4";
function = "I3C4";
groups = "HVI3C4";
};

View File

@ -241,8 +241,13 @@
"pp2", "ppmmu2", "pp4", "ppmmu4",
"pp5", "ppmmu5", "pp6", "ppmmu6";
resets = <&reset RESET_MALI>;
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core";
assigned-clocks = <&clkc CLKID_MALI>;
assigned-clock-rates = <318750000>;
operating-points-v2 = <&gpu_opp_table>;
};
};

View File

@ -148,7 +148,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 0 1148 0>;
pwm-dutycycle-range = <100 0>;
@ -232,7 +232,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 1 1148 0>;
pwm-dutycycle-range = <100 0>;

View File

@ -39,6 +39,8 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 0 1148 0>;
pwm-dutycycle-range = <100 0>;
@ -84,7 +86,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 1 1148 0>;
pwm-dutycycle-range = <100 0>;

View File

@ -130,7 +130,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&p5v0>;
pwm-supply = <&p5v0>;
pwms = <&pwm_cd 0 12218 0>;
pwm-dutycycle-range = <91 0>;
@ -162,7 +162,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&p5v0>;
pwm-supply = <&p5v0>;
pwms = <&pwm_cd 1 12218 0>;
pwm-dutycycle-range = <91 0>;

View File

@ -1602,6 +1602,9 @@ exit:
rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:

View File

@ -113,7 +113,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x11001000 0x1000>,
<0x11002000 0x1000>,
<0x11002000 0x2000>,
<0x11004000 0x2000>,
<0x11006000 0x2000>;
};

View File

@ -277,10 +277,6 @@
interrupt-parent = <&gpio1>;
interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
/* Depends on LVDS */
max-clock = <135000000>;
min-vrefresh = <50>;
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
adi,input-clock = "1x";

View File

@ -701,6 +701,19 @@ emit_cond_jmp:
}
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
/*
* Nothing required here.
*
* In case of arm64, we rely on the firmware mitigation of
* Speculative Store Bypass as controlled via the ssbd kernel
* parameter. Whenever the mitigation is enabled, it works
* for all of the kernel code with no need to provide any
* additional instructions.
*/
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:

View File

@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
for (i = 0; i < MAX_UNIT; i++) {
if (nfeth_dev[i]) {
unregister_netdev(nfeth_dev[0]);
free_netdev(nfeth_dev[0]);
unregister_netdev(nfeth_dev[i]);
free_netdev(nfeth_dev[i]);
}
}
free_irq(nfEtherIRQ, nfeth_interrupt);

View File

@ -1355,6 +1355,9 @@ jeq_common:
}
break;
case BPF_ST | BPF_NOSPEC: /* speculation barrier */
break;
case BPF_ST | BPF_B | BPF_MEM:
case BPF_ST | BPF_H | BPF_MEM:
case BPF_ST | BPF_W | BPF_MEM:

View File

@ -644,6 +644,12 @@ emit_clear:
}
break;
/*
* BPF_ST NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
break;
/*
* BPF_ST(X)
*/

View File

@ -1313,6 +1313,10 @@ out_be:
emit(rv_ld(rd, 0, RV_REG_T1), ctx);
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
emit_imm(RV_REG_T1, imm, ctx);

View File

@ -873,6 +873,7 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
/* indexed by vcpu_idx */
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
};

View File

@ -327,24 +327,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
rc->debugfs_root_entry = debugfs_create_dir(rc->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = rc;
rc->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = rc;
rc->prev = debug_area_last;
}
debug_area_last = rc;
rc->next = NULL;
refcount_set(&rc->ref_count, 1);
out:
return rc;
@ -404,27 +386,10 @@ static void debug_info_get(debug_info_t *db_info)
*/
static void debug_info_put(debug_info_t *db_info)
{
int i;
if (!db_info)
return;
if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;
debugfs_remove(db_info->debugfs_entries[i]);
}
debugfs_remove(db_info->debugfs_root_entry);
if (db_info == debug_area_first)
debug_area_first = db_info->next;
if (db_info == debug_area_last)
debug_area_last = db_info->prev;
if (db_info->prev)
db_info->prev->next = db_info->next;
if (db_info->next)
db_info->next->prev = db_info->prev;
if (refcount_dec_and_test(&db_info->ref_count))
debug_info_free(db_info);
}
}
/*
@ -648,6 +613,31 @@ static int debug_close(struct inode *inode, struct file *file)
return 0; /* success */
}
/* Create debugfs entries and add to internal list. */
static void _debug_register(debug_info_t *id)
{
/* create root directory */
id->debugfs_root_entry = debugfs_create_dir(id->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = id;
id->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = id;
id->prev = debug_area_last;
}
debug_area_last = id;
id->next = NULL;
debug_register_view(id, &debug_level_view);
debug_register_view(id, &debug_flush_view);
debug_register_view(id, &debug_pages_view);
}
/**
* debug_register_mode() - creates and initializes debug area.
*
@ -677,19 +667,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
if ((uid != 0) || (gid != 0))
pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
BUG_ON(!initialized);
mutex_lock(&debug_mutex);
/* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if (!rc)
goto out;
debug_register_view(rc, &debug_level_view);
debug_register_view(rc, &debug_flush_view);
debug_register_view(rc, &debug_pages_view);
out:
if (!rc)
if (rc) {
mutex_lock(&debug_mutex);
_debug_register(rc);
mutex_unlock(&debug_mutex);
} else {
pr_err("Registering debug feature %s failed\n", name);
mutex_unlock(&debug_mutex);
}
return rc;
}
EXPORT_SYMBOL(debug_register_mode);
@ -718,6 +705,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
}
EXPORT_SYMBOL(debug_register);
/* Remove debugfs entries and remove from internal list. */
static void _debug_unregister(debug_info_t *id)
{
int i;
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
continue;
debugfs_remove(id->debugfs_entries[i]);
}
debugfs_remove(id->debugfs_root_entry);
if (id == debug_area_first)
debug_area_first = id->next;
if (id == debug_area_last)
debug_area_last = id->prev;
if (id->prev)
id->prev->next = id->next;
if (id->next)
id->next->prev = id->prev;
}
/**
* debug_unregister() - give back debug area.
*
@ -731,8 +739,10 @@ void debug_unregister(debug_info_t *id)
if (!id)
return;
mutex_lock(&debug_mutex);
debug_info_put(id);
_debug_unregister(id);
mutex_unlock(&debug_mutex);
debug_info_put(id);
}
EXPORT_SYMBOL(debug_unregister);

View File

@ -408,13 +408,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
@ -2984,18 +2984,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{
int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_id);
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (psw_ioint_disabled(vcpu))
continue;
deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
if (deliverable_mask) {
/* lately kicked but not yet running */
if (test_and_set_bit(vcpu_id, gi->kicked_mask))
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
return;
kvm_s390_vcpu_wakeup(vcpu);
return;

View File

@ -3726,7 +3726,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu);
}
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);

View File

@ -67,7 +67,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)

View File

@ -101,6 +101,9 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
/*
* The first 1MB of 1:1 mapping is mapped with 4KB pages
*/
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@ -146,30 +149,26 @@ static void __init kasan_early_vmemmap_populate(unsigned long address,
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PMD_SIZE) &&
if (IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
pmd_populate(&init_mm, pm_dir,
kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
/* the first megabyte of 1:1 is mapped with 4k pages */
if (has_edat && address && end - address >= PMD_SIZE &&
mode != POPULATE_ZERO_SHADOW) {
void *page;
if (mode == POPULATE_ZERO_SHADOW) {
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
} else if (has_edat && address) {
void *page;
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {

View File

@ -913,6 +913,11 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
break;
}
break;
/*
* BPF_NOSPEC (speculation barrier)
*/
case BPF_ST | BPF_NOSPEC:
break;
/*
* BPF_ST(X)
*/

View File

@ -1287,6 +1287,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
return 1;
break;
}
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
break;
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:

View File

@ -562,6 +562,7 @@ static struct perf_ibs perf_ibs_op = {
.start = perf_ibs_start,
.stop = perf_ibs_stop,
.read = perf_ibs_read,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
},
.msr = MSR_AMD64_IBSOPCTL,
.config_mask = IBS_OP_CONFIG_MASK,

View File

@ -242,6 +242,12 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
case QOS_L3_MBM_LOCAL_EVENT_ID:
m = &rr->d->mbm_local[rmid];
break;
default:
/*
* Code would never reach here because an invalid
* event id would fail the __rmid_read.
*/
return RMID_VAL_ERROR;
}
if (rr->first) {

View File

@ -2057,12 +2057,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
~PIN_BASED_VMX_PREEMPTION_TIMER);
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.pi_pending = false;
if (nested_cpu_has_posted_intr(vmcs12))
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
} else {
else
exec_control &= ~PIN_BASED_POSTED_INTR;
}
pin_controls_set(vmx, exec_control);
/*

View File

@ -2764,6 +2764,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
adjust_tsc_offset_guest(vcpu, adj);
/* Before back to guest, tsc_timestamp must be adjusted
* as well, otherwise guest's percpu pvclock time could jump.
*/
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}

View File

@ -728,6 +728,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
}
break;
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
if (boot_cpu_has(X86_FEATURE_XMM2))
/* Emit 'lfence' */
EMIT3(0x0F, 0xAE, 0xE8);
break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_B:
if (is_ereg(dst_reg))

View File

@ -1705,6 +1705,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
i++;
break;
}
/* speculation barrier */
case BPF_ST | BPF_NOSPEC:
if (boot_cpu_has(X86_FEATURE_XMM2))
/* Emit 'lfence' */
EMIT3(0x0F, 0xAE, 0xE8);
break;
/* ST: *(u8*)(dst_reg + off) = imm */
case BPF_ST | BPF_MEM | BPF_H:
case BPF_ST | BPF_MEM | BPF_B:

View File

@ -46,11 +46,19 @@ endif
redirect_openssl = 2>&1
quiet_redirect_openssl = 2>&1
silent_redirect_openssl = 2>/dev/null
openssl_available = $(shell openssl help 2>/dev/null && echo yes)
# We do it this way rather than having a boolean option for enabling an
# external private key, because 'make randconfig' might enable such a
# boolean option and we unfortunately can't make it depend on !RANDCONFIG.
ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
ifeq ($(openssl_available),yes)
X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
endif
$(obj)/signing_key.pem: $(obj)/x509.genkey
@$(kecho) "###"
@$(kecho) "### Now generating an X.509 key pair to be used for signing modules."

View File

@ -6394,7 +6394,7 @@ int ata_host_start(struct ata_host *host)
have_stop = 1;
}
if (host->ops->host_stop)
if (host->ops && host->ops->host_stop)
have_stop = 1;
if (have_stop) {

View File

@ -1503,7 +1503,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
reg + i, ret);
reg + regmap_get_offset(map, i), ret);
return ret;
}
}

View File

@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
{
device_initialize(&core->dev);
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
err = device_register(&core->dev);
err = device_add(&core->dev);
if (err) {
bcma_err(bus, "Could not register dev for core 0x%03X\n",
core->id.id);
put_device(&core->dev);
return;
}
core->dev_registered = true;
@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
put_device(&core->dev);
}
}

View File

@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
{ "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
11, 1, 0 },
{ }
};
static struct clk *clk_muxing_get_src(

View File

@ -568,7 +568,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
ch->flags |= flag;
/* setup timeout if no clockevent */
if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
if (ch->cmt->num_channels == 1 &&
flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(ch, ch->max_match_value);
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
@ -604,20 +605,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
unsigned long flags;
u32 has_wrapped;
u64 value;
u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
raw = sh_cmt_get_counter(ch, &has_wrapped);
if (ch->cmt->num_channels == 1) {
unsigned long flags;
u64 value;
u32 raw;
if (unlikely(has_wrapped))
raw += ch->match_value + 1;
raw_spin_unlock_irqrestore(&ch->lock, flags);
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
raw = sh_cmt_get_counter(ch, &has_wrapped);
return value + raw;
if (unlikely(has_wrapped))
raw += ch->match_value + 1;
raw_spin_unlock_irqrestore(&ch->lock, flags);
return value + raw;
}
return sh_cmt_get_counter(ch, &has_wrapped);
}
static int sh_cmt_clocksource_enable(struct clocksource *cs)
@ -680,7 +686,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",

View File

@ -1230,12 +1230,13 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
case 1:
case 3:
quad8_preset_register_set(priv, count->id, ceiling);
break;
mutex_unlock(&priv->lock);
return len;
}
mutex_unlock(&priv->lock);
return len;
return -EINVAL;
}
static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,

View File

@ -175,15 +175,19 @@ static int dcp_vmi_irq_bak, dcp_irq_bak;
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
int dma_err;
struct dcp *sdcp = global_sdcp;
const int chan = actx->chan;
uint32_t stat;
unsigned long ret;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
DMA_TO_DEVICE);
dma_err = dma_mapping_error(sdcp->dev, desc_phys);
if (dma_err)
return dma_err;
reinit_completion(&sdcp->completion[chan]);
/* Clear status register. */
@ -221,18 +225,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct ablkcipher_request *req, int init)
{
dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, key_phys);
if (ret)
return ret;
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, src_phys);
if (ret)
goto err_src;
dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
ret = dma_mapping_error(sdcp->dev, dst_phys);
if (ret)
goto err_dst;
if (actx->fill % AES_BLOCK_SIZE) {
dev_err(sdcp->dev, "Invalid block size!\n");
@ -270,10 +285,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
aes_done_run:
dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
return ret;
}
@ -585,6 +602,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, buf_phys);
if (ret)
return ret;
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@ -617,6 +638,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
if (rctx->fini) {
digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
ret = dma_mapping_error(sdcp->dev, digest_phys);
if (ret)
goto done_run;
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}

View File

@ -1734,7 +1734,7 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish;
} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
omap_sham_update_dma_stop(dd);
if (dd->err) {
err = dd->err;

View File

@ -123,10 +123,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@ -123,10 +123,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@ -239,8 +239,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
@ -263,12 +263,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
return 0;
}
static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
}

View File

@ -105,6 +105,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int ret;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
@ -171,9 +172,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
hw_data->enable_vf2pf_comms(accel_dev);
ret = hw_data->enable_vf2pf_comms(accel_dev);
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(adf_dev_init);

View File

@ -59,6 +59,8 @@
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@ -111,7 +113,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
u32 vf_mask;
unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs */
vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
@ -132,8 +134,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
* unless the VF is malicious and is attempting to
* flood the host OS with VF2PF interrupts.
*/
for_each_set_bit(i, (const unsigned long *)&vf_mask,
(sizeof(vf_mask) * BITS_PER_BYTE)) {
for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
vf_info = accel_dev->pf.vf_info + i;
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {

View File

@ -231,7 +231,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
return ret;
}
EXPORT_SYMBOL_GPL(adf_iov_putmsg);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
@ -361,6 +360,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
reinit_completion(&accel_dev->vf.iov_msg_completion);
/* Send request from VF to PF */
ret = adf_iov_putmsg(accel_dev, msg, 0);
if (ret) {

View File

@ -49,14 +49,14 @@
#include "adf_pf2vf_msg.h"
/**
* adf_vf2pf_init() - send init msg to PF
* adf_vf2pf_notify_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init messge from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@ -69,17 +69,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
EXPORT_SYMBOL_GPL(adf_vf2pf_init);
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
/**
* adf_vf2pf_shutdown() - send shutdown msg to PF
* adf_vf2pf_notify_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown messge from the VF to a PF
*
* Return: void
*/
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@ -89,4 +89,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);

View File

@ -203,6 +203,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
bool handled = false;
u32 v_int;
/* Read VF INT source CSR to determine the source of VF interrupt */
@ -215,7 +216,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
/* Schedule tasklet to handle interrupt BH */
tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
return IRQ_HANDLED;
handled = true;
}
/* Check bundle interrupt */
@ -227,10 +228,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
0);
tasklet_hi_schedule(&bank->resp_handler);
return IRQ_HANDLED;
handled = true;
}
return IRQ_NONE;
return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)

View File

@ -123,10 +123,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@ -26,8 +26,8 @@
pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
#define I10NM_GET_DIMMMTR(m, i, j) \
readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i, j) \
readl((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i) \
readl((m)->mbase + 0x20970 + (i) * 0x4000)
#define I10NM_GET_MCMTR(m, i) \
readl((m)->mbase + 0x20ef8 + (i) * 0x4000)
@ -156,11 +156,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
continue;
ndimms = 0;
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
for (j = 0; j < I10NM_NUM_DIMMS; j++) {
dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
mtr, mcddrtcfg, imc->mc, i, j);

View File

@ -163,17 +163,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
return 0;
}
static struct device *get_mfd_cell_dev(const char *device_name, int r)
static int acp_genpd_add_device(struct device *dev, void *data)
{
char auto_dev_name[25];
struct device *dev;
struct generic_pm_domain *gpd = data;
int ret;
snprintf(auto_dev_name, sizeof(auto_dev_name),
"%s.%d.auto", device_name, r);
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
ret = pm_genpd_add_device(gpd, dev);
if (ret)
dev_err(dev, "Failed to add dev to genpd %d\n", ret);
return dev;
return ret;
}
static int acp_genpd_remove_device(struct device *dev, void *data)
{
int ret;
ret = pm_genpd_remove_device(dev);
if (ret)
dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
/* Continue to remove */
return 0;
}
/**
@ -184,11 +195,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
*/
static int acp_hw_init(void *handle)
{
int r, i;
int r;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -344,15 +354,10 @@ static int acp_hw_init(void *handle)
if (r)
goto failure;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
goto failure;
}
}
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
acp_genpd_add_device);
if (r)
goto failure;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
@ -413,10 +418,8 @@ failure:
*/
static int acp_hw_fini(void *handle)
{
int i, ret;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
@ -461,13 +464,8 @@ static int acp_hw_fini(void *handle)
udelay(100);
}
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(dev);
/* If removal fails, dont giveup and try rest */
if (ret)
dev_err(dev, "remove dev from genpd failed\n");
}
device_for_each_child(adev->acp.parent, NULL,
acp_genpd_remove_device);
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);

View File

@ -281,10 +281,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
int i;
for (i = 0; i < ctx->mixer_count; i++) {
DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
}
}

View File

@ -26,8 +26,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
}
phy_pdev = of_find_device_by_node(phy_node);
if (phy_pdev)
if (phy_pdev) {
msm_dsi->phy = platform_get_drvdata(phy_pdev);
msm_dsi->phy_dev = &phy_pdev->dev;
}
of_node_put(phy_node);
@ -36,8 +38,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
return -EPROBE_DEFER;
}
msm_dsi->phy_dev = get_device(&phy_pdev->dev);
return 0;
}

View File

@ -59,7 +59,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
if (IS_ERR(pfdev->bus_clock)) {
dev_err(pfdev->dev, "get bus_clock failed %ld\n",
PTR_ERR(pfdev->bus_clock));
return PTR_ERR(pfdev->bus_clock);
err = PTR_ERR(pfdev->bus_clock);
goto disable_clock;
}
if (pfdev->bus_clock) {

View File

@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev->irq = platform_get_irq(pdev, 0);
if (iic_force_poll)
if (dev->irq < 0 || iic_force_poll)
dev->irq = 0;
if (dev->irq) {

View File

@ -467,16 +467,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -ENXIO;
ret = irq;
goto unmap;
}
ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
pdev->name, adapter_data);
if (ret) {
ret = -EIO;
if (ret)
goto unmap;
}
memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
new_adapter->owner = THIS_MODULE;

View File

@ -932,7 +932,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c->pdmabase);
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
if (irq < 0)
return irq;
init_completion(&i2c->msg_complete);

View File

@ -1141,7 +1141,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
*/
if (!(i2c->quirks & QUIRK_POLL)) {
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
if (ret < 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
clk_unprepare(i2c->clk);
return ret;

View File

@ -103,10 +103,9 @@ static int lt3593_led_probe(struct platform_device *pdev)
init_data.default_label = ":";
ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
if (ret < 0) {
fwnode_handle_put(child);
fwnode_handle_put(child);
if (ret < 0)
return ret;
}
led_data->cdev.dev->of_node = dev->of_node;
platform_set_drvdata(pdev, led_data);

View File

@ -6,10 +6,33 @@
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include "../leds.h"
static struct led_trigger *ledtrig_audio[NUM_AUDIO_LEDS];
static enum led_brightness audio_state[NUM_AUDIO_LEDS];
static int ledtrig_audio_mute_activate(struct led_classdev *led_cdev)
{
led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MUTE]);
return 0;
}
static int ledtrig_audio_micmute_activate(struct led_classdev *led_cdev)
{
led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MICMUTE]);
return 0;
}
static struct led_trigger ledtrig_audio[NUM_AUDIO_LEDS] = {
[LED_AUDIO_MUTE] = {
.name = "audio-mute",
.activate = ledtrig_audio_mute_activate,
},
[LED_AUDIO_MICMUTE] = {
.name = "audio-micmute",
.activate = ledtrig_audio_micmute_activate,
},
};
enum led_brightness ledtrig_audio_get(enum led_audio type)
{
return audio_state[type];
@ -19,24 +42,22 @@ EXPORT_SYMBOL_GPL(ledtrig_audio_get);
void ledtrig_audio_set(enum led_audio type, enum led_brightness state)
{
audio_state[type] = state;
led_trigger_event(ledtrig_audio[type], state);
led_trigger_event(&ledtrig_audio[type], state);
}
EXPORT_SYMBOL_GPL(ledtrig_audio_set);
static int __init ledtrig_audio_init(void)
{
led_trigger_register_simple("audio-mute",
&ledtrig_audio[LED_AUDIO_MUTE]);
led_trigger_register_simple("audio-micmute",
&ledtrig_audio[LED_AUDIO_MICMUTE]);
led_trigger_register(&ledtrig_audio[LED_AUDIO_MUTE]);
led_trigger_register(&ledtrig_audio[LED_AUDIO_MICMUTE]);
return 0;
}
module_init(ledtrig_audio_init);
static void __exit ledtrig_audio_exit(void)
{
led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MUTE]);
led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MICMUTE]);
led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MUTE]);
led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MICMUTE]);
}
module_exit(ledtrig_audio_exit);

View File

@ -839,20 +839,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
if (!d->full_dirty_stripes)
return -ENOMEM;
goto out_free_stripe_sectors_dirty;
idx = ida_simple_get(&bcache_device_idx, 0,
BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
if (idx < 0)
return idx;
goto out_free_full_dirty_stripes;
if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
goto err;
goto out_ida_remove;
d->disk = alloc_disk(BCACHE_MINORS);
if (!d->disk)
goto err;
goto out_bioset_exit;
set_capacity(d->disk, sectors);
snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
@ -887,8 +887,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
return 0;
err:
out_bioset_exit:
bioset_exit(&d->bio_split);
out_ida_remove:
ida_simple_remove(&bcache_device_idx, idx);
out_free_full_dirty_stripes:
kvfree(d->full_dirty_stripes);
out_free_stripe_sectors_dirty:
kvfree(d->stripe_sectors_dirty);
return -ENOMEM;
}

View File

@ -2233,6 +2233,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd)
/* get initial HDMI status */
state->hdmi_status = io_read(sd, REG_HDMI_FLAGS);
io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN);
return 0;
}

View File

@ -2023,17 +2023,25 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
u32 src_fourcc, dst_fourcc;
int ret;
if (!ctx->initialized) {
ret = __coda_decoder_seq_init(ctx);
if (ret < 0)
return ret;
}
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
src_fourcc = q_data_src->fourcc;
dst_fourcc = q_data_dst->fourcc;
if (!ctx->initialized) {
ret = __coda_decoder_seq_init(ctx);
if (ret < 0)
return ret;
} else {
ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
CODA9_FRAME_TILED2LINEAR);
if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
ctx->frame_mem_ctrl |= (0x3 << 9) |
((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
}
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);

View File

@ -308,6 +308,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt)
return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),

View File

@ -524,13 +524,13 @@ cxd2880_spi_probe(struct spi_device *spi)
if (IS_ERR(dvb_spi->vcc_supply)) {
if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto fail_adapter;
goto fail_regulator;
}
dvb_spi->vcc_supply = NULL;
} else {
ret = regulator_enable(dvb_spi->vcc_supply);
if (ret)
goto fail_adapter;
goto fail_regulator;
}
dvb_spi->spi = spi;
@ -618,6 +618,9 @@ fail_frontend:
fail_attach:
dvb_unregister_adapter(&dvb_spi->adapter);
fail_adapter:
if (!dvb_spi->vcc_supply)
regulator_disable(dvb_spi->vcc_supply);
fail_regulator:
kfree(dvb_spi);
return ret;
}

View File

@ -17,7 +17,8 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
if (d->props.i2c_algo == NULL) {
err("no i2c algorithm specified");
return -EINVAL;
ret = -EINVAL;
goto err;
}
strscpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
@ -27,11 +28,15 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
i2c_set_adapdata(&d->i2c_adap, d);
if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0)
ret = i2c_add_adapter(&d->i2c_adap);
if (ret < 0) {
err("could not add i2c adapter");
goto err;
}
d->state |= DVB_USB_STATE_I2C;
err:
return ret;
}

View File

@ -194,8 +194,8 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
err_adapter_init:
dvb_usb_adapter_exit(d);
err_i2c_init:
dvb_usb_i2c_exit(d);
err_i2c_init:
if (d->priv && d->props.priv_destroy)
d->props.priv_destroy(d);
err_priv_init:

View File

@ -130,7 +130,7 @@ ret:
static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
{
int i;
int i, ret;
u8 b;
mac[0] = 0x00;
@ -139,7 +139,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
/* this is a complete guess, but works for my box */
for (i = 136; i < 139; i++) {
dibusb_read_eeprom_byte(d,i, &b);
ret = dibusb_read_eeprom_byte(d, i, &b);
if (ret)
return ret;
mac[5 - (i - 136)] = b;
}

View File

@ -291,16 +291,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6])
{
u8 i, *buf;
int ret;
struct vp702x_device_state *st = d->priv;
mutex_lock(&st->buf_mutex);
buf = st->buf;
for (i = 6; i < 12; i++)
vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1);
for (i = 6; i < 12; i++) {
ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1,
&buf[i - 6], 1);
if (ret < 0)
goto err;
}
memcpy(mac, buf, 6);
err:
mutex_unlock(&st->buf_mutex);
return 0;
return ret;
}
static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)

View File

@ -842,7 +842,6 @@ error:
kfree(ir);
ref_put:
em28xx_shutdown_buttons(dev);
kref_put(&dev->ref, em28xx_free_device);
return err;
}

View File

@ -691,49 +691,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board,
struct device *dev)
{
struct go7007 *go;
int i;
go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
if (go == NULL)
return NULL;
go->dev = dev;
go->board_info = board;
go->board_id = 0;
go->tuner_type = -1;
go->channel_number = 0;
go->name[0] = 0;
mutex_init(&go->hw_lock);
init_waitqueue_head(&go->frame_waitq);
spin_lock_init(&go->spinlock);
go->status = STATUS_INIT;
memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
go->i2c_adapter_online = 0;
go->interrupt_available = 0;
init_waitqueue_head(&go->interrupt_waitq);
go->input = 0;
go7007_update_board(go);
go->encoder_h_halve = 0;
go->encoder_v_halve = 0;
go->encoder_subsample = 0;
go->format = V4L2_PIX_FMT_MJPEG;
go->bitrate = 1500000;
go->fps_scale = 1;
go->pali = 0;
go->aspect_ratio = GO7007_RATIO_1_1;
go->gop_size = 0;
go->ipb = 0;
go->closed_gop = 0;
go->repeat_seqhead = 0;
go->seq_header_enable = 0;
go->gop_header_enable = 0;
go->dvd_mode = 0;
go->interlace_coding = 0;
for (i = 0; i < 4; ++i)
go->modet[i].enable = 0;
for (i = 0; i < 1624; ++i)
go->modet_map[i] = 0;
go->audio_deliver = NULL;
go->audio_enabled = 0;
return go;
}

View File

@ -782,6 +782,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
int ret = 0;
/* Set external dma config: burst size, burst width */
memset(&cfg, 0, sizeof(cfg));
cfg.dst_addr = host->phy_regs + fifo_offset;
cfg.src_addr = cfg.dst_addr;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -631,6 +631,7 @@ static int moxart_probe(struct platform_device *pdev)
host->dma_chan_tx, host->dma_chan_rx);
host->have_dma = true;
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -2013,10 +2013,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
if (index >= mcam->bmap_entries)
break;
entry = index + 1;
if (mcam->entry2cntr_map[index] != req->cntr)
continue;
entry = index + 1;
npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
index, req->cntr);
}

View File

@ -234,18 +234,12 @@ struct ttc_params {
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);

View File

@ -1123,7 +1123,7 @@ void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
ttc_params->inner_ttc = &priv->fs.inner_ttc;
}
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
@ -1142,8 +1142,8 @@ void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
ft_attr->prio = MLX5E_NIC_PRIO;
}
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
{
struct mlx5e_flow_table *ft = &ttc->ft;
int err;
@ -1173,8 +1173,8 @@ err:
return err;
}
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc)
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc)
{
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;

View File

@ -319,17 +319,6 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
}
mlx5e_set_ttc_basic_params(priv, &ttc_params);
mlx5e_set_inner_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err);
goto err_destroy_arfs_tables;
}
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
@ -338,13 +327,11 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
goto err_destroy_inner_ttc_table;
goto err_destroy_arfs_tables;
}
return 0;
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@ -354,7 +341,6 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv);
}
@ -379,7 +365,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv, true);
err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;

View File

@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca)
skb_put(qca->rx_skb, retcode);
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_checksum_none_assert(qca->rx_skb);
netif_rx_ni(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);

View File

@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
skb_put(qca->rx_skb, retcode);
qca->rx_skb->protocol = eth_type_trans(
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_checksum_none_assert(qca->rx_skb);
netif_rx_ni(qca->rx_skb);
qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
netdev->mtu +

View File

@ -2510,8 +2510,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
goto free_data_skb;
for (index = 0; index < num_pri_streams; index++) {
if (WARN_ON(!data_sync_bufs[index].skb))
if (WARN_ON(!data_sync_bufs[index].skb)) {
ret = -ENOMEM;
goto free_data_skb;
}
ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
data_sync_bufs[index].

View File

@ -2103,7 +2103,7 @@ cleanup:
err = brcmf_pcie_probe(pdev, NULL);
if (err)
brcmf_err(bus, "probe after resume failed, err=%d\n", err);
__brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
return err;
}

View File

@ -1038,8 +1038,10 @@ static int rsi_load_9116_firmware(struct rsi_hw *adapter)
}
ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
if (!ta_firmware)
if (!ta_firmware) {
status = -ENOMEM;
goto fail_release_fw;
}
fw_p = ta_firmware;
instructions_sz = fw_entry->size;
rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);

View File

@ -806,6 +806,7 @@ static int rsi_probe(struct usb_interface *pfunction,
} else {
rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
__func__, id->idProduct);
status = -ENODEV;
goto err1;
}

View File

@ -665,13 +665,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
if (ret)
return ret;
ctrl->ctrl.queue_count = nr_io_queues + 1;
if (ctrl->ctrl.queue_count < 2) {
if (nr_io_queues == 0) {
dev_err(ctrl->ctrl.device,
"unable to set any I/O queues\n");
return -ENOMEM;
}
ctrl->ctrl.queue_count = nr_io_queues + 1;
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);

View File

@ -1649,13 +1649,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
if (ret)
return ret;
ctrl->queue_count = nr_io_queues + 1;
if (ctrl->queue_count < 2) {
if (nr_io_queues == 0) {
dev_err(ctrl->device,
"unable to set any I/O queues\n");
return -ENOMEM;
}
ctrl->queue_count = nr_io_queues + 1;
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);

View File

@ -116,6 +116,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (!sqsize) {
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto err;
}
@ -250,11 +251,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
}
status = nvmet_install_queue(ctrl, req);
if (status) {
/* pass back cntlid that had the issue of installing queue */
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
if (status)
goto out_ctrl_put;
}
/* pass back cntlid for successful completion */
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);

View File

@ -2253,7 +2253,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
if (enable) {
int error;
if (pci_pme_capable(dev, state))
/*
* Enable PME signaling if the device can signal PME from
* D3cold regardless of whether or not it can signal PME from
* the current target state, because that will allow it to
* signal PME when the hierarchy above it goes into D3cold and
* the device itself ends up in D3cold as a result of that.
*/
if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
pci_pme_active(dev, true);
else
ret = 1;
@ -2357,16 +2364,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
if (dev->current_state == PCI_D3cold)
target_state = PCI_D3cold;
if (wakeup) {
if (wakeup && dev->pme_support) {
pci_power_t state = target_state;
/*
* Find the deepest state from which the device can generate
* PME#.
*/
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
target_state--;
}
while (state && !(dev->pme_support & (1 << state)))
state--;
if (state)
return state;
else if (dev->pme_support & 1)
return PCI_D0;
}
return target_state;

View File

@ -149,7 +149,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
}
if (ret < 0) {
dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
return ret;
}
@ -163,7 +163,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
ret = regmap_write(info->regmap, reg, (unsigned int)val);
if (ret < 0)
dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
return ret;
}

View File

@ -726,7 +726,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
struct max17042_config_data *config = chip->pdata->config_data;
max17042_override_por(map, MAX17042_TGAIN, config->tgain);
max17042_override_por(map, MAx17042_TOFF, config->toff);
max17042_override_por(map, MAX17042_TOFF, config->toff);
max17042_override_por(map, MAX17042_CGAIN, config->cgain);
max17042_override_por(map, MAX17042_COFF, config->coff);

View File

@ -37,7 +37,6 @@ struct vctrl_voltage_table {
struct vctrl_data {
struct regulator_dev *rdev;
struct regulator_desc desc;
struct regulator *ctrl_reg;
bool enabled;
unsigned int min_slew_down_rate;
unsigned int ovp_threshold;
@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
static int vctrl_get_voltage(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
int ctrl_uV;
if (!rdev->supply)
return -EPROBE_DEFER;
ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
return vctrl_calc_output_voltage(vctrl, ctrl_uV);
}
@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
unsigned int *selector)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
int orig_ctrl_uV;
int uV;
int ret;
if (!rdev->supply)
return -EPROBE_DEFER;
orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
if (req_min_uV >= uV || !vctrl->ovp_threshold)
/* voltage rising or no OVP */
return regulator_set_voltage_rdev(ctrl_reg->rdev,
return regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
PM_SUSPEND_ON);
@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
ret = regulator_set_voltage_rdev(rdev->supply->rdev,
next_ctrl_uV,
next_ctrl_uV,
PM_SUSPEND_ON);
@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
err:
/* Try to go back to original voltage */
regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
PM_SUSPEND_ON);
return ret;
@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
unsigned int selector)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
struct regulator *ctrl_reg = vctrl->ctrl_reg;
unsigned int orig_sel = vctrl->sel;
int ret;
if (!rdev->supply)
return -EPROBE_DEFER;
if (selector >= rdev->desc->n_voltages)
return -EINVAL;
if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
/* voltage rising or no OVP */
ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
ret = regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl->vtable[selector].ctrl,
vctrl->vtable[selector].ctrl,
PM_SUSPEND_ON);
@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
else
next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
ret = regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl->vtable[next_sel].ctrl,
vctrl->vtable[next_sel].ctrl,
PM_SUSPEND_ON);
@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
err:
if (vctrl->sel != orig_sel) {
/* Try to go back to original voltage */
if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
if (!regulator_set_voltage_rdev(rdev->supply->rdev,
vctrl->vtable[orig_sel].ctrl,
vctrl->vtable[orig_sel].ctrl,
PM_SUSPEND_ON))
@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
u32 pval;
u32 vrange_ctrl[2];
vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
if (IS_ERR(vctrl->ctrl_reg))
return PTR_ERR(vctrl->ctrl_reg);
ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
if (!ret) {
vctrl->ovp_threshold = pval;
@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
return at->ctrl - bt->ctrl;
}
static int vctrl_init_vtable(struct platform_device *pdev)
static int vctrl_init_vtable(struct platform_device *pdev,
struct regulator *ctrl_reg)
{
struct vctrl_data *vctrl = platform_get_drvdata(pdev);
struct regulator_desc *rdesc = &vctrl->desc;
struct regulator *ctrl_reg = vctrl->ctrl_reg;
struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
int n_voltages;
int ctrl_uV;
@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
static int vctrl_enable(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
int ret = regulator_enable(vctrl->ctrl_reg);
if (!ret)
vctrl->enabled = true;
vctrl->enabled = true;
return ret;
return 0;
}
static int vctrl_disable(struct regulator_dev *rdev)
{
struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
int ret = regulator_disable(vctrl->ctrl_reg);
if (!ret)
vctrl->enabled = false;
vctrl->enabled = false;
return ret;
return 0;
}
static int vctrl_is_enabled(struct regulator_dev *rdev)
@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
struct regulator_desc *rdesc;
struct regulator_config cfg = { };
struct vctrl_voltage_range *vrange_ctrl;
struct regulator *ctrl_reg;
int ctrl_uV;
int ret;
@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
if (IS_ERR(ctrl_reg))
return PTR_ERR(ctrl_reg);
vrange_ctrl = &vctrl->vrange.ctrl;
rdesc = &vctrl->desc;
rdesc->name = "vctrl";
rdesc->type = REGULATOR_VOLTAGE;
rdesc->owner = THIS_MODULE;
rdesc->supply_name = "ctrl";
if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
(regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
if ((regulator_get_linear_step(ctrl_reg) == 1) ||
(regulator_count_voltages(ctrl_reg) == -EINVAL)) {
rdesc->continuous_voltage_range = true;
rdesc->ops = &vctrl_ops_cont;
} else {
@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
cfg.init_data = init_data;
if (!rdesc->continuous_voltage_range) {
ret = vctrl_init_vtable(pdev);
ret = vctrl_init_vtable(pdev, ctrl_reg);
if (ret)
return ret;
ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
/* Use locked consumer API when not in regulator framework */
ctrl_uV = regulator_get_voltage(ctrl_reg);
if (ctrl_uV < 0) {
dev_err(&pdev->dev, "failed to get control voltage\n");
return ctrl_uV;
@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
}
}
/* Drop ctrl-supply here in favor of regulator core managed supply */
devm_regulator_put(ctrl_reg);
vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
if (IS_ERR(vctrl->rdev)) {
ret = PTR_ERR(vctrl->rdev);

View File

@ -426,9 +426,26 @@ static ssize_t pimpampom_show(struct device *dev,
}
static DEVICE_ATTR_RO(pimpampom);
static ssize_t dev_busid_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct subchannel *sch = to_subchannel(dev);
struct pmcw *pmcw = &sch->schib.pmcw;
if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
pmcw->dev);
else
return sysfs_emit(buf, "none\n");
}
static DEVICE_ATTR_RO(dev_busid);
static struct attribute *io_subchannel_type_attrs[] = {
&dev_attr_chpids.attr,
&dev_attr_pimpampom.attr,
&dev_attr_dev_busid.attr,
NULL,
};
ATTRIBUTE_GROUPS(io_subchannel_type);

View File

@ -235,12 +235,11 @@ static int rpmhpd_power_on(struct generic_pm_domain *domain)
static int rpmhpd_power_off(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
int ret = 0;
int ret;
mutex_lock(&rpmhpd_lock);
ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
ret = rpmhpd_aggregate_corner(pd, 0);
if (!ret)
pd->enabled = false;

View File

@ -109,7 +109,7 @@ struct smsm_entry {
DECLARE_BITMAP(irq_enabled, 32);
DECLARE_BITMAP(irq_rising, 32);
DECLARE_BITMAP(irq_falling, 32);
u32 last_value;
unsigned long last_value;
u32 *remote_state;
u32 *subscription;
@ -204,8 +204,7 @@ static irqreturn_t smsm_intr(int irq, void *data)
u32 val;
val = readl(entry->remote_state);
changed = val ^ entry->last_value;
entry->last_value = val;
changed = val ^ xchg(&entry->last_value, val);
for_each_set_bit(i, entry->irq_enabled, 32) {
if (!(changed & BIT(i)))
@ -266,6 +265,12 @@ static void smsm_unmask_irq(struct irq_data *irqd)
struct qcom_smsm *smsm = entry->smsm;
u32 val;
/* Make sure our last cached state is up-to-date */
if (readl(entry->remote_state) & BIT(irq))
set_bit(irq, &entry->last_value);
else
clear_bit(irq, &entry->last_value);
set_bit(irq, entry->irq_enabled);
if (entry->subscription) {

View File

@ -6,8 +6,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
#
config ROCKCHIP_GRF
bool
default y
bool "Rockchip General Register Files support" if COMPILE_TEST
default y if ARCH_ROCKCHIP
help
The General Register Files are a central component providing
special additional settings registers for a lot of soc-components.

View File

@ -423,6 +423,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
goto err_rx_dma_buf;
}
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = phy_addr + SPI_POPR;
cfg.dst_addr = phy_addr + SPI_PUSHR;
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;

View File

@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
struct dma_slave_config cfg;
int ret;
memset(&cfg, 0, sizeof(cfg));
cfg.device_fc = true;
cfg.src_addr = pic32s->dma_base + buf_offset;
cfg.dst_addr = pic32s->dma_base + buf_offset;

View File

@ -102,7 +102,7 @@
#define HWRST_STATUS_WATCHDOG 0xf0
/* Use default timeout 50 ms that converts to watchdog values */
#define WDG_LOAD_VAL ((50 * 1000) / 32768)
#define WDG_LOAD_VAL ((50 * 32768) / 1000)
#define WDG_LOAD_MASK GENMASK(15, 0)
#define WDG_UNLOCK_KEY 0xe551

View File

@ -533,7 +533,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
@ -551,7 +551,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}
@ -567,7 +567,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
@ -591,7 +591,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
ZYNQ_QSPI_IXR_RXTX_MASK);
if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
if (!wait_for_completion_timeout(&xqspi->data_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
}

View File

@ -2176,8 +2176,6 @@ static int tty_fasync(int fd, struct file *filp, int on)
* Locking:
* Called functions take tty_ldiscs_lock
* current->signal->tty check is safe without locks
*
* FIXME: may race normal receive processing
*/
static int tiocsti(struct tty_struct *tty, char __user *p)
@ -2193,8 +2191,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return -EIO;
tty_buffer_lock_exclusive(tty->port);
if (ld->ops->receive_buf)
ld->ops->receive_buf(tty, &ch, &mbz, 1);
tty_buffer_unlock_exclusive(tty->port);
tty_ldisc_deref(ld);
return 0;
}

View File

@ -1878,7 +1878,9 @@ static int at91udc_probe(struct platform_device *pdev)
clk_disable(udc->iclk);
/* request UDC and maybe VBUS irqs */
udc->udp_irq = platform_get_irq(pdev, 0);
udc->udp_irq = retval = platform_get_irq(pdev, 0);
if (retval < 0)
goto err_unprepare_iclk;
retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
driver_name, udc);
if (retval) {

View File

@ -565,7 +565,8 @@ static int bdc_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev,
"No suitable DMA config available, abort\n");
return -ENOTSUPP;
ret = -ENOTSUPP;
goto phycleanup;
}
dev_dbg(dev, "Using 32-bit address\n");
}

View File

@ -1922,14 +1922,6 @@ static int mv_u3d_probe(struct platform_device *dev)
goto err_get_irq;
}
u3d->irq = r->start;
if (request_irq(u3d->irq, mv_u3d_irq,
IRQF_SHARED, driver_name, u3d)) {
u3d->irq = 0;
dev_err(&dev->dev, "Request irq %d for u3d failed\n",
u3d->irq);
retval = -ENODEV;
goto err_request_irq;
}
/* initialize gadget structure */
u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
@ -1942,6 +1934,15 @@ static int mv_u3d_probe(struct platform_device *dev)
mv_u3d_eps_init(u3d);
if (request_irq(u3d->irq, mv_u3d_irq,
IRQF_SHARED, driver_name, u3d)) {
u3d->irq = 0;
dev_err(&dev->dev, "Request irq %d for u3d failed\n",
u3d->irq);
retval = -ENODEV;
goto err_request_irq;
}
/* external vbus detection */
if (u3d->vbus) {
u3d->clock_gating = 1;
@ -1965,8 +1966,8 @@ static int mv_u3d_probe(struct platform_device *dev)
err_unregister:
free_irq(u3d->irq, u3d);
err_request_irq:
err_get_irq:
err_request_irq:
kfree(u3d->status_req);
err_alloc_status_req:
kfree(u3d->eps);

View File

@ -2692,10 +2692,15 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a774c0-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
.compatible = "renesas,r8a7795-usb3-peri",
.data = &renesas_usb3_priv_gen3,
},
{
}, {
.compatible = "renesas,r8a77990-usb3-peri",
.data = &renesas_usb3_priv_r8a77990,
}, {
.compatible = "renesas,rcar-gen3-usb3-peri",
.data = &renesas_usb3_priv_gen3,
},
@ -2704,18 +2709,10 @@ static const struct of_device_id usb3_of_match[] = {
MODULE_DEVICE_TABLE(of, usb3_of_match);
static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
{
.soc_id = "r8a774c0",
.data = &renesas_usb3_priv_r8a77990,
},
{
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &renesas_usb3_priv_r8a7795_es1,
},
{
.soc_id = "r8a77990",
.data = &renesas_usb3_priv_r8a77990,
},
{ /* sentinel */ },
};

Some files were not shown because too many files have changed in this diff Show More