Merge pull request #334 from zandrey/5.4-2.3.x-imx

Update 5.4-2.3.x-imx up to v5.4.118
This commit is contained in:
Otavio Salvador 2021-05-11 13:48:34 -03:00 committed by GitHub
commit d1bb99cd3c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 1545 additions and 1105 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 117
SUBLEVEL = 118
EXTRAVERSION =
NAME = Kleptomaniac Octopus
@ -765,16 +765,16 @@ KBUILD_CFLAGS += -Wno-tautological-compare
KBUILD_CFLAGS += -mno-global-merge
else
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += -Wno-unused-but-set-variable
# Warn about unmarked fall-throughs in switch statement.
# Disabled for clang while comment to attribute conversion happens and
# https://github.com/ClangBuiltLinux/linux/issues/636 is discussed.
KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough,)
endif
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls

View File

@ -122,8 +122,8 @@ asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
-e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
-e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)

View File

@ -156,7 +156,8 @@
};
nb_periph_clk: nb-periph-clk@13000 {
compatible = "marvell,armada-3700-periph-clock-nb";
compatible = "marvell,armada-3700-periph-clock-nb",
"syscon";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
<&tbg 3>, <&xtalclk>;

View File

@ -1137,7 +1137,7 @@
<&mmsys CLK_MM_DSI1_DIGITAL>,
<&mipi_tx1>;
clock-names = "engine", "digital", "hs";
phy = <&mipi_tx1>;
phys = <&mipi_tx1>;
phy-names = "dphy";
status = "disabled";
};

View File

@ -28,6 +28,13 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
/*
* Discard .note.gnu.property sections which are unused and have
* different alignment requirement from vDSO note sections.
*/
/DISCARD/ : {
*(.note.GNU-stack .note.gnu.property)
}
.note : { *(.note.*) } :text :note
. = ALIGN(16);
@ -48,7 +55,6 @@ SECTIONS
PROVIDE(end = .);
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
}

View File

@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_ERRNO_H
#define _ASM_POWERPC_ERRNO_H
#undef EDEADLOCK
#include <asm-generic/errno.h>
#undef EDEADLOCK

View File

@ -368,14 +368,11 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
pa = pte_pfn(*ptep);
/* On radix we can do hugepage mappings for io, so handle that */
if (hugepage_shift) {
pa <<= hugepage_shift;
pa |= token & ((1ul << hugepage_shift) - 1);
} else {
pa <<= PAGE_SHIFT;
pa |= token & (PAGE_SIZE - 1);
}
if (!hugepage_shift)
hugepage_shift = PAGE_SHIFT;
pa <<= PAGE_SHIFT;
pa |= token & ((1ul << hugepage_shift) - 1);
return pa;
}

View File

@ -53,6 +53,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
/* max hunk is ARCH_RNG_BUF_SIZE */
if (nbytes > ARCH_RNG_BUF_SIZE)
return false;
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;

View File

@ -557,7 +557,7 @@ void show_code(struct pt_regs *regs)
void print_fn_code(unsigned char *code, unsigned long len)
{
char buffer[64], *ptr;
char buffer[128], *ptr;
int opsize, i;
while (len) {

View File

@ -40,6 +40,7 @@ REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
REALMODE_CFLAGS += $(CLANG_FLAGS)
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit

View File

@ -1826,7 +1826,7 @@ static void setup_getcpu(int cpu)
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
struct desc_struct d = { };
if (boot_cpu_has(X86_FEATURE_RDTSCP))
if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
write_rdtscp_aux(cpudata);
/* Store CPU and node number in limit. */

View File

@ -568,7 +568,7 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
if (unlikely(!mem))
if (IS_ERR_OR_NULL(mem))
return;
alg = tfm->__crt_alg;

View File

@ -34,22 +34,18 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
u8 *buf = NULL;
int err;
crypto_stats_get(alg);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf) {
crypto_alg_put(alg);
if (!buf)
return -ENOMEM;
}
err = get_random_bytes_wait(buf, slen);
if (err) {
crypto_alg_put(alg);
if (err)
goto out;
}
seed = buf;
}
crypto_stats_get(alg);
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
crypto_stats_rng_seed(alg, err);
out:

View File

@ -329,7 +329,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
int index)
{
struct platform_device *pdev;
int irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
int irq;
/*
* According to SBSA specification the size of refresh and control
@ -338,7 +338,7 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
struct resource res[] = {
DEFINE_RES_MEM(wd->control_frame_address, SZ_4K),
DEFINE_RES_MEM(wd->refresh_frame_address, SZ_4K),
DEFINE_RES_IRQ(irq),
{},
};
int nr_res = ARRAY_SIZE(res);
@ -348,10 +348,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
if (!(wd->refresh_frame_address && wd->control_frame_address)) {
pr_err(FW_BUG "failed to get the Watchdog base address.\n");
acpi_unregister_gsi(wd->timer_interrupt);
return -EINVAL;
}
irq = map_gt_gsi(wd->timer_interrupt, wd->timer_flags);
res[2] = (struct resource)DEFINE_RES_IRQ(irq);
if (irq <= 0) {
pr_warn("failed to map the Watchdog interrupt.\n");
nr_res--;
@ -364,7 +365,8 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
*/
pdev = platform_device_register_simple("sbsa-gwdt", index, res, nr_res);
if (IS_ERR(pdev)) {
acpi_unregister_gsi(wd->timer_interrupt);
if (irq > 0)
acpi_unregister_gsi(wd->timer_interrupt);
return PTR_ERR(pdev);
}

View File

@ -44,6 +44,8 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
sizeof(struct acpi_table_header)))
return -EFAULT;
uncopied_bytes = max_size = table.length;
/* make sure the buf is not allocated */
kfree(buf);
buf = kzalloc(max_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@ -57,6 +59,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
(*ppos + count < count) ||
(count > uncopied_bytes)) {
kfree(buf);
buf = NULL;
return -EINVAL;
}
@ -78,7 +81,6 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
}
kfree(buf);
return count;
}

View File

@ -1728,6 +1728,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
#ifdef CONFIG_ARM64
if (pdev->vendor == PCI_VENDOR_ID_HUAWEI &&
pdev->device == 0xa235 &&
pdev->revision < 0x30)
hpriv->flags |= AHCI_HFLAG_NO_SXS;
if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
hpriv->irq_handler = ahci_thunderx_irq_handler;
#endif

View File

@ -242,6 +242,7 @@ enum {
suspend/resume */
AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
from phy_power_on() */
AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */
/* ap->flags bits */

View File

@ -493,6 +493,11 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
cap |= HOST_CAP_ALPM;
}
if ((cap & HOST_CAP_SXS) && (hpriv->flags & AHCI_HFLAG_NO_SXS)) {
dev_info(dev, "controller does not support SXS, disabling CAP_SXS\n");
cap &= ~HOST_CAP_SXS;
}
if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
port_map, hpriv->force_port_map);

View File

@ -602,6 +602,51 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
return 0;
}
/* Interconnect instances to probe before l4_per instances */
static struct resource early_bus_ranges[] = {
/* am3/4 l4_wkup */
{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
/* omap4/5 and dra7 l4_cfg */
{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
/* omap4 l4_wkup */
{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
/* omap5 and dra7 l4_wkup without dra7 dcan segment */
{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
};
static atomic_t sysc_defer = ATOMIC_INIT(10);
/**
* sysc_defer_non_critical - defer non_critical interconnect probing
* @ddata: device driver data
*
* We want to probe l4_cfg and l4_wkup interconnect instances before any
* l4_per instances as l4_per instances depend on resources on l4_cfg and
* l4_wkup interconnects.
*/
static int sysc_defer_non_critical(struct sysc *ddata)
{
struct resource *res;
int i;
if (!atomic_read(&sysc_defer))
return 0;
for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
res = &early_bus_ranges[i];
if (ddata->module_pa >= res->start &&
ddata->module_pa <= res->end) {
atomic_set(&sysc_defer, 0);
return 0;
}
}
atomic_dec_if_positive(&sysc_defer);
return -EPROBE_DEFER;
}
static struct device_node *stdout_path;
static void sysc_init_stdout_path(struct sysc *ddata)
@ -826,6 +871,10 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
error = sysc_defer_non_critical(ddata);
if (error)
return error;
sysc_check_children(ddata);
error = sysc_parse_registers(ddata);

View File

@ -107,6 +107,9 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
int log_version;
int rc = 0;
if (chip->flags & TPM_CHIP_FLAG_VIRTUAL)
return;
rc = tpm_read_log(chip);
if (rc < 0)
return;

View File

@ -17,6 +17,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
{
struct efi_tcg2_final_events_table *final_tbl = NULL;
int final_events_log_size = efi_tpm_final_log_size;
struct linux_efi_tpm_eventlog *log_tbl;
struct tpm_bios_log *log;
u32 log_size;
@ -66,12 +67,12 @@ int tpm_read_log_efi(struct tpm_chip *chip)
ret = tpm_log_version;
if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
efi_tpm_final_log_size == 0 ||
final_events_log_size == 0 ||
tpm_log_version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
goto out;
final_tbl = memremap(efi.tpm_final_log,
sizeof(*final_tbl) + efi_tpm_final_log_size,
sizeof(*final_tbl) + final_events_log_size,
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
@ -80,10 +81,18 @@ int tpm_read_log_efi(struct tpm_chip *chip)
goto out;
}
efi_tpm_final_log_size -= log_tbl->final_events_preboot_size;
/*
* The 'final events log' size excludes the 'final events preboot log'
* at its beginning.
*/
final_events_log_size -= log_tbl->final_events_preboot_size;
/*
* Allocate memory for the 'combined log' where we will append the
* 'final events log' to.
*/
tmp = krealloc(log->bios_event_log,
log_size + efi_tpm_final_log_size,
log_size + final_events_log_size,
GFP_KERNEL);
if (!tmp) {
kfree(log->bios_event_log);
@ -94,15 +103,19 @@ int tpm_read_log_efi(struct tpm_chip *chip)
log->bios_event_log = tmp;
/*
* Copy any of the final events log that didn't also end up in the
* main log. Events can be logged in both if events are generated
* Append any of the 'final events log' that didn't also end up in the
* 'main log'. Events can be logged in both if events are generated
* between GetEventLog() and ExitBootServices().
*/
memcpy((void *)log->bios_event_log + log_size,
final_tbl->events + log_tbl->final_events_preboot_size,
efi_tpm_final_log_size);
final_events_log_size);
/*
* The size of the 'combined log' is the size of the 'main log' plus
* the size of the 'final events log'.
*/
log->bios_event_log_end = log->bios_event_log +
log_size + efi_tpm_final_log_size;
log_size + final_events_log_size;
out:
memunmap(final_tbl);

View File

@ -146,6 +146,7 @@ static void __init __socfpga_gate_init(struct device_node *node,
if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) {
pr_err("%s: failed to find altr,sys-mgr regmap!\n",
__func__);
kfree(socfpga_clk);
return;
}
}

View File

@ -103,7 +103,7 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd)
dd->err = 0;
}
err = pm_runtime_get_sync(dd->dev);
err = pm_runtime_resume_and_get(dd->dev);
if (err < 0) {
dev_err(dd->dev, "failed to get sync: %d\n", err);
return err;
@ -1153,7 +1153,7 @@ static int omap_aes_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
pm_runtime_enable(dev);
err = pm_runtime_get_sync(dev);
err = pm_runtime_resume_and_get(dev);
if (err < 0) {
dev_err(dev, "%s: failed to get_sync(%d)\n",
__func__, err);
@ -1318,7 +1318,7 @@ static int omap_aes_suspend(struct device *dev)
static int omap_aes_resume(struct device *dev)
{
pm_runtime_get_sync(dev);
pm_runtime_resume_and_get(dev);
return 0;
}
#endif

View File

@ -715,7 +715,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
dma_addr_t bloutp;
struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
@ -727,6 +727,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!bufl))
return -ENOMEM;
for_each_sg(sgl, sg, n, i)
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, blp)))
goto err_in;
@ -760,10 +763,14 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err_in;
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i)
bufers[i].addr = DMA_MAPPING_ERROR;
bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err_out;
bufers = buflout->bufers;
for_each_sg(sglout, sg, n, i) {
int y = sg_nctr;

View File

@ -537,7 +537,7 @@ static int stm32_cryp_hw_init(struct stm32_cryp *cryp)
int ret;
u32 cfg, hw_mode;
pm_runtime_get_sync(cryp->dev);
pm_runtime_resume_and_get(cryp->dev);
/* Disable interrupt */
stm32_cryp_write(cryp, CRYP_IMSCR, 0);
@ -2054,7 +2054,7 @@ static int stm32_cryp_remove(struct platform_device *pdev)
if (!cryp)
return -ENODEV;
ret = pm_runtime_get_sync(cryp->dev);
ret = pm_runtime_resume_and_get(cryp->dev);
if (ret < 0)
return ret;

View File

@ -810,7 +810,7 @@ static void stm32_hash_finish_req(struct ahash_request *req, int err)
static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
struct stm32_hash_request_ctx *rctx)
{
pm_runtime_get_sync(hdev->dev);
pm_runtime_resume_and_get(hdev->dev);
if (!(HASH_FLAGS_INIT & hdev->flags)) {
stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
@ -959,7 +959,7 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
u32 *preg;
unsigned int i;
pm_runtime_get_sync(hdev->dev);
pm_runtime_resume_and_get(hdev->dev);
while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
cpu_relax();
@ -997,7 +997,7 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
preg = rctx->hw_context;
pm_runtime_get_sync(hdev->dev);
pm_runtime_resume_and_get(hdev->dev);
stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
@ -1553,7 +1553,7 @@ static int stm32_hash_remove(struct platform_device *pdev)
if (!hdev)
return -ENODEV;
ret = pm_runtime_get_sync(hdev->dev);
ret = pm_runtime_resume_and_get(hdev->dev);
if (ret < 0)
return ret;

View File

@ -597,7 +597,7 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
struct arizona *arizona = info->arizona;
int id_gpio = arizona->pdata.hpdet_id_gpio;
unsigned int report = EXTCON_JACK_HEADPHONE;
int ret, reading;
int ret, reading, state;
bool mic = false;
mutex_lock(&info->lock);
@ -610,12 +610,11 @@ static irqreturn_t arizona_hpdet_irq(int irq, void *data)
}
/* If the cable was removed while measuring ignore the result */
ret = extcon_get_state(info->edev, EXTCON_MECHANICAL);
if (ret < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n",
ret);
state = extcon_get_state(info->edev, EXTCON_MECHANICAL);
if (state < 0) {
dev_err(arizona->dev, "Failed to check cable state: %d\n", state);
goto out;
} else if (!ret) {
} else if (!state) {
dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
goto done;
}
@ -668,7 +667,7 @@ done:
ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
/* If we have a mic then reenable MICDET */
if (mic || info->mic)
if (state && (mic || info->mic))
arizona_start_mic(info);
if (info->hpdet_active) {
@ -676,7 +675,9 @@ done:
info->hpdet_active = false;
}
info->hpdet_done = true;
/* Do not set hp_det done when the cable has been unplugged */
if (state)
info->hpdet_done = true;
out:
mutex_unlock(&info->lock);
@ -1724,25 +1725,6 @@ static int arizona_extcon_remove(struct platform_device *pdev)
bool change;
int ret;
ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, 0,
&change);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
ret);
} else if (change) {
regulator_disable(info->micvdd);
pm_runtime_put(info->dev);
}
gpiod_put(info->micd_pol_gpio);
pm_runtime_disable(&pdev->dev);
regmap_update_bits(arizona->regmap,
ARIZONA_MICD_CLAMP_CONTROL,
ARIZONA_MICD_CLAMP_MODE_MASK, 0);
if (info->micd_clamp) {
jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
@ -1758,10 +1740,31 @@ static int arizona_extcon_remove(struct platform_device *pdev)
arizona_free_irq(arizona, jack_irq_rise, info);
arizona_free_irq(arizona, jack_irq_fall, info);
cancel_delayed_work_sync(&info->hpdet_work);
cancel_delayed_work_sync(&info->micd_detect_work);
cancel_delayed_work_sync(&info->micd_timeout_work);
ret = regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
ARIZONA_MICD_ENA, 0,
&change);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to disable micd on remove: %d\n",
ret);
} else if (change) {
regulator_disable(info->micvdd);
pm_runtime_put(info->dev);
}
regmap_update_bits(arizona->regmap,
ARIZONA_MICD_CLAMP_CONTROL,
ARIZONA_MICD_CLAMP_MODE_MASK, 0);
regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
ARIZONA_JD1_ENA, 0);
arizona_clk32k_disable(arizona);
gpiod_put(info->micd_pol_gpio);
pm_runtime_disable(&pdev->dev);
return 0;
}

View File

@ -469,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
if (!src)
if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);

View File

@ -984,7 +984,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
if (!ttm->sg->sgl)
if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */

View File

@ -231,7 +231,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
version_major, version_minor);
} else {
unsigned int enc_major, enc_minor, dec_minor;

View File

@ -311,15 +311,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
}
/*
* NOTE psp_xgmi_node_info.num_hops layout is as follows:
* num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
* num_hops[5:3] = reserved
* num_hops[2:0] = number of hops
*/
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
uint8_t num_hops_mask = 0x7;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
return top->nodes[i].num_hops;
return top->nodes[i].num_hops & num_hops_mask;
return -EINVAL;
}

View File

@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
{
seq_printf(m, "echo gpu_id > hang_hws\n");
return 0;
}
static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
const char __user *user_buf, size_t size, loff_t *ppos)
@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
NULL, &kfd_debugfs_hang_hws_fops);
kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
}
void kfd_debugfs_fini(void)

View File

@ -1011,6 +1011,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
static int initialize_cpsch(struct device_queue_manager *dqm)
{
uint64_t num_sdma_queues;
uint64_t num_xgmi_sdma_queues;
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
mutex_init(&dqm->lock_hidden);
@ -1019,8 +1022,18 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
num_sdma_queues = get_num_sdma_queues(dqm);
if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
dqm->sdma_bitmap = ULLONG_MAX;
else
dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
dqm->xgmi_sdma_bitmap = ULLONG_MAX;
else
dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);

View File

@ -5372,10 +5372,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
int x, y;
int xorigin = 0, yorigin = 0;
position->enable = false;
position->x = 0;
position->y = 0;
if (!crtc || !plane->state->fb)
return 0;
@ -5427,7 +5423,7 @@ static void handle_cursor_update(struct drm_plane *plane,
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint64_t address = afb ? afb->address : 0;
struct dc_cursor_position position;
struct dc_cursor_position position = {0};
struct dc_cursor_attributes attributes;
int ret;
@ -7330,7 +7326,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (adev->asic_type >= CHIP_NAVI10) {
if (dc_resource_is_dsc_encoding_supported(dc)) {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
ret = add_affected_mst_dsc_crtcs(state, crtc);

View File

@ -1961,7 +1961,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
dc->hwss.optimize_bandwidth(dc, dc->current_state);
dc->optimized_required = true;
} else {
if (!dc->optimize_seamless_boot)
dc->hwss.prepare_bandwidth(dc, dc->current_state);

View File

@ -3435,6 +3435,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels

View File

@ -3467,6 +3467,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.DCCEnabledInAnyPlane = true;
}
}
mode_lib->vba.UrgentLatency = mode_lib->vba.UrgentLatencyPixelDataOnly;
for (i = 0; i <= mode_lib->vba.soc.num_states; i++) {
locals->FabricAndDRAMBandwidthPerState[i] = dml_min(
mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels

View File

@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "12/17/2020", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

View File

@ -2966,7 +2966,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
const u16 wm[8])
const u16 wm[])
{
int level, max_level = ilk_wm_max_level(dev_priv);

View File

@ -41,7 +41,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
u32 total_lines, vclks_line, cfg;
long vsync_clk_speed;
struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
int pp_id = mixer->pp;
@ -51,8 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
total_lines = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
@ -64,15 +64,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
vsync_clk_speed);
return -EINVAL;
}
vclks_line = vsync_clk_speed * 100 / total_lines_x100;
vclks_line = vsync_clk_speed / total_lines;
cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
| MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
/*
* Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
* the vsync_clk equating to roughly half the desired panel refresh rate.
* This is only necessary as stability fallback if interrupts from the
* panel arrive too late or not at all, but is currently used by default
* because these panel interrupts are not wired up yet.
*/
mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
mdp5_write(mdp5_kms,
REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
mdp5_write(mdp5_kms,
REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);

View File

@ -1237,6 +1237,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
void qxl_modeset_fini(struct qxl_device *qdev)
{
if (qdev->dumb_shadow_bo) {
drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
qxl_destroy_monitors_object(qdev);
drm_mode_config_cleanup(&qdev->ddev);
}

View File

@ -20,7 +20,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
WARN_ON(ret_overrun != 1);
if (ret_overrun != 1)
pr_warn("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)

View File

@ -543,7 +543,7 @@ static void intel_th_gth_disable(struct intel_th_device *thdev,
output->active = false;
for_each_set_bit(master, gth->output[output->port].master,
TH_CONFIGURABLE_MASTERS) {
TH_CONFIGURABLE_MASTERS + 1) {
gth_master_set(gth, master, -1);
}
spin_unlock(&gth->gth_lock);
@ -697,7 +697,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);

View File

@ -268,6 +268,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Alder Lake-M */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x54a6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
/* Rocket Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c19),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{ 0 },
};

View File

@ -109,7 +109,7 @@ static bool ili210x_touchdata_to_coords(struct ili210x *priv, u8 *touchdata,
if (finger >= ILI210X_TOUCHES)
return false;
if (touchdata[0] & BIT(finger))
if (!(touchdata[0] & BIT(finger)))
return false;
*x = get_unaligned_be16(touchdata + 1 + (finger * 4) + 0);

View File

@ -621,6 +621,10 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
irqnr = gic_read_iar();
/* Check for special IDs first */
if ((irqnr >= 1020 && irqnr <= 1023))
return;
if (gic_supports_nmi() &&
unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
gic_handle_nmi(irqnr, regs);
@ -632,10 +636,6 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
/* Check for special IDs first */
if ((irqnr >= 1020 && irqnr <= 1023))
return;
/* Treat anything but SGIs in a uniform way */
if (likely(irqnr > 15)) {
int err;

View File

@ -3762,6 +3762,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
r = -EINVAL;
ti->error = "Invalid bitmap_flush_interval argument";
goto bad;
}
ic->bitmap_flush_interval = msecs_to_jiffies(val);
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {

View File

@ -1892,6 +1892,14 @@ static bool rs_takeover_requested(struct raid_set *rs)
return rs->md.new_level != rs->md.level;
}
/* True if layout is set to reshape. */
static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev)
{
return (use_mddev ? rs->md.delta_disks : rs->delta_disks) ||
rs->md.new_layout != rs->md.layout ||
rs->md.new_chunk_sectors != rs->md.chunk_sectors;
}
/* True if @rs is requested to reshape by ctr */
static bool rs_reshape_requested(struct raid_set *rs)
{
@ -1904,9 +1912,7 @@ static bool rs_reshape_requested(struct raid_set *rs)
if (rs_is_raid0(rs))
return false;
change = mddev->new_layout != mddev->layout ||
mddev->new_chunk_sectors != mddev->chunk_sectors ||
rs->delta_disks;
change = rs_is_layout_change(rs, false);
/* Historical case to support raid1 reshape without delta disks */
if (rs_is_raid1(rs)) {
@ -2843,7 +2849,7 @@ static sector_t _get_reshape_sectors(struct raid_set *rs)
}
/*
*
* Reshape:
* - change raid layout
* - change chunk size
* - add disks
@ -2952,6 +2958,20 @@ static int rs_setup_reshape(struct raid_set *rs)
return r;
}
/*
* If the md resync thread has updated superblock with max reshape position
* at the end of a reshape but not (yet) reset the layout configuration
* changes -> reset the latter.
*/
static void rs_reset_inconclusive_reshape(struct raid_set *rs)
{
if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) {
rs_set_cur(rs);
rs->md.delta_disks = 0;
rs->md.reshape_backwards = 0;
}
}
/*
* Enable/disable discard support on RAID set depending on
* RAID level and discard properties of underlying RAID members.
@ -3216,11 +3236,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto bad;
/* Catch any inconclusive reshape superblock content. */
rs_reset_inconclusive_reshape(rs);
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
/* Keep array frozen */
/* Keep array frozen until resume. */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@ -3234,7 +3257,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
r = md_start(&rs->md);
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);

View File

@ -572,6 +572,7 @@ out_tag_set:
blk_mq_free_tag_set(md->tag_set);
out_kfree_tag_set:
kfree(md->tag_set);
md->tag_set = NULL;
return err;
}
@ -581,6 +582,7 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
if (md->tag_set) {
blk_mq_free_tag_set(md->tag_set);
kfree(md->tag_set);
md->tag_set = NULL;
}
}

View File

@ -34,12 +34,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
} __packed;
} __attribute__((packed, aligned(8)));
struct btree_node {
struct node_header header;
__le64 keys[0];
} __packed;
} __attribute__((packed, aligned(8)));
/*

View File

@ -339,6 +339,8 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
*/
begin = do_div(index_begin, ll->entries_per_block);
end = do_div(end, ll->entries_per_block);
if (end == 0)
end = ll->entries_per_block;
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;

View File

@ -33,7 +33,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
} __packed;
} __attribute__ ((packed, aligned(8)));
#define MAX_METADATA_BITMAPS 255
@ -43,7 +43,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
} __packed;
} __attribute__ ((packed, aligned(8)));
struct ll_disk;
@ -86,7 +86,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
} __packed;
} __attribute__ ((packed, aligned(8)));
#define ENTRIES_PER_BYTE 4
@ -94,7 +94,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
} __packed;
} __attribute__ ((packed, aligned(8)));
enum allocation_event {
SM_NONE,

View File

@ -458,6 +458,8 @@ static void raid1_end_write_request(struct bio *bio)
if (!test_bit(Faulty, &rdev->flags))
set_bit(R1BIO_WriteError, &r1_bio->state);
else {
/* Fail the request */
set_bit(R1BIO_Degraded, &r1_bio->state);
/* Finished with this branch */
r1_bio->bios[mirror] = NULL;
to_put = bio;

View File

@ -241,6 +241,7 @@ static void dvb_media_device_free(struct dvb_device *dvbdev)
if (dvbdev->adapter->conn) {
media_device_unregister_entity(dvbdev->adapter->conn);
kfree(dvbdev->adapter->conn);
dvbdev->adapter->conn = NULL;
kfree(dvbdev->adapter->conn_pads);
dvbdev->adapter->conn_pads = NULL;

View File

@ -1964,7 +1964,7 @@ static int adv7511_remove(struct i2c_client *client)
adv7511_set_isr(sd, false);
adv7511_init_setup(sd);
cancel_delayed_work(&state->edid_handler);
cancel_delayed_work_sync(&state->edid_handler);
i2c_unregister_device(state->i2c_edid);
i2c_unregister_device(state->i2c_cec);
i2c_unregister_device(state->i2c_pktmem);

View File

@ -3606,7 +3606,7 @@ static int adv76xx_remove(struct i2c_client *client)
io_write(sd, 0x6e, 0);
io_write(sd, 0x73, 0);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv76xx_unregister_clients(to_state(sd));

View File

@ -3586,7 +3586,7 @@ static int adv7842_remove(struct i2c_client *client)
struct adv7842_state *state = to_state(sd);
adv7842_irq_enable(sd, false);
cancel_delayed_work(&state->delayed_work_enable_hotplug);
cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
adv7842_unregister_clients(sd);

View File

@ -2192,7 +2192,7 @@ static int tc358743_remove(struct i2c_client *client)
del_timer_sync(&state->timer);
flush_work(&state->work_i2c_poll);
}
cancel_delayed_work(&state->delayed_work_enable_hotplug);
cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
cec_unregister_adapter(state->cec_adap);
v4l2_async_unregister_subdev(sd);
v4l2_device_unregister_subdev(sd);

View File

@ -2804,7 +2804,7 @@ static int tda1997x_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&state->hdl);
regulator_bulk_disable(TDA1997X_NUM_SUPPLIES, state->supplies);
cancel_delayed_work(&state->delayed_work_enable_hpd);
cancel_delayed_work_sync(&state->delayed_work_enable_hpd);
mutex_destroy(&state->page_lock);
mutex_destroy(&state->lock);

View File

@ -1008,7 +1008,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
printk(KERN_ERR "%s() failed (errno = %d), NO PCI configuration\n",
__func__, result);
result = -ENOMEM;
goto failed;
goto fail_pci;
}
/* Establish encoder defaults here */
@ -1062,7 +1062,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
100000, ENCODER_DEF_BITRATE);
if (hdl->error) {
result = hdl->error;
goto failed;
goto fail_hdl;
}
port->std = V4L2_STD_NTSC_M;
@ -1080,7 +1080,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
printk(KERN_INFO "%s: can't allocate mpeg device\n",
dev->name);
result = -ENOMEM;
goto failed;
goto fail_hdl;
}
port->v4l_device->ctrl_handler = hdl;
@ -1091,10 +1091,7 @@ int saa7164_encoder_register(struct saa7164_port *port)
if (result < 0) {
printk(KERN_INFO "%s: can't register mpeg device\n",
dev->name);
/* TODO: We're going to leak here if we don't dealloc
The buffers above. The unreg function can't deal wit it.
*/
goto failed;
goto fail_reg;
}
printk(KERN_INFO "%s: registered device video%d [mpeg]\n",
@ -1116,9 +1113,14 @@ int saa7164_encoder_register(struct saa7164_port *port)
saa7164_api_set_encoder(port);
saa7164_api_get_encoder(port);
return 0;
result = 0;
failed:
fail_reg:
video_device_release(port->v4l_device);
port->v4l_device = NULL;
fail_hdl:
v4l2_ctrl_handler_free(hdl);
fail_pci:
return result;
}

View File

@ -2,6 +2,7 @@
config STA2X11_VIP
tristate "STA2X11 VIP Video For Linux"
depends on STA2X11 || COMPILE_TEST
select GPIOLIB if MEDIA_SUBDRV_AUTOSELECT
select VIDEO_ADV7180 if MEDIA_SUBDRV_AUTOSELECT
select VIDEOBUF2_DMA_CONTIG
depends on PCI && VIDEO_V4L2 && VIRT_TO_BUS

View File

@ -480,7 +480,7 @@ static int regs_show(struct seq_file *s, void *data)
int ret;
unsigned int i;
ret = pm_runtime_get_sync(bdisp->dev);
ret = pm_runtime_resume_and_get(bdisp->dev);
if (ret < 0) {
seq_puts(s, "Cannot wake up IP\n");
return 0;

View File

@ -174,13 +174,13 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7b,
0x02, 0x03, 0x3f, 0xf0, 0x51, 0x61, 0x60, 0x5f,
0x02, 0x03, 0x3f, 0xf1, 0x51, 0x61, 0x60, 0x5f,
0x5e, 0x5d, 0x10, 0x1f, 0x04, 0x13, 0x22, 0x21,
0x20, 0x05, 0x14, 0x02, 0x11, 0x01, 0x23, 0x09,
0x07, 0x07, 0x83, 0x01, 0x00, 0x00, 0x6d, 0x03,
0x0c, 0x00, 0x10, 0x00, 0x00, 0x3c, 0x21, 0x00,
0x60, 0x01, 0x02, 0x03, 0x67, 0xd8, 0x5d, 0xc4,
0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xea, 0xe3,
0x01, 0x78, 0x00, 0x00, 0xe2, 0x00, 0xca, 0xe3,
0x05, 0x00, 0x00, 0xe3, 0x06, 0x01, 0x00, 0x4d,
0xd0, 0x00, 0xa0, 0xf0, 0x70, 0x3e, 0x80, 0x30,
0x20, 0x35, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00,
@ -189,7 +189,7 @@ static const u8 vivid_hdmi_edid[256] = {
0x00, 0x00, 0x1a, 0x1a, 0x1d, 0x00, 0x80, 0x51,
0xd0, 0x1c, 0x20, 0x40, 0x80, 0x35, 0x00, 0xc0,
0x1c, 0x32, 0x00, 0x00, 0x1c, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82,
};
static int vidioc_querycap(struct file *file, void *priv,

View File

@ -276,8 +276,14 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
/* read the interrupt flags */
iflags = dev->params.get_irq_causes(dev);
/* Check for RX overflow */
if (iflags & ITE_IRQ_RX_FIFO_OVERRUN) {
dev_warn(&dev->rdev->dev, "receive overflow\n");
ir_raw_event_reset(dev->rdev);
}
/* check for the receive interrupt */
if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
if (iflags & ITE_IRQ_RX_FIFO) {
/* read the FIFO bytes */
rx_bytes =
dev->params.get_rx_bytes(dev, rx_buf,

View File

@ -79,11 +79,17 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
}
}
if ((ret = dvb_usb_adapter_stream_init(adap)) ||
(ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs)) ||
(ret = dvb_usb_adapter_frontend_init(adap))) {
ret = dvb_usb_adapter_stream_init(adap);
if (ret)
return ret;
}
ret = dvb_usb_adapter_dvb_init(adap, adapter_nrs);
if (ret)
goto dvb_init_err;
ret = dvb_usb_adapter_frontend_init(adap);
if (ret)
goto frontend_init_err;
/* use exclusive FE lock if there is multiple shared FEs */
if (adap->fe_adap[1].fe)
@ -103,6 +109,12 @@ static int dvb_usb_adapter_init(struct dvb_usb_device *d, short *adapter_nrs)
}
return 0;
frontend_init_err:
dvb_usb_adapter_dvb_exit(adap);
dvb_init_err:
dvb_usb_adapter_stream_exit(adap);
return ret;
}
static int dvb_usb_adapter_exit(struct dvb_usb_device *d)
@ -158,22 +170,20 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
if (d->props.priv_init != NULL) {
ret = d->props.priv_init(d);
if (ret != 0) {
kfree(d->priv);
d->priv = NULL;
return ret;
}
if (ret != 0)
goto err_priv_init;
}
}
/* check the capabilities and set appropriate variables */
dvb_usb_device_power_ctrl(d, 1);
if ((ret = dvb_usb_i2c_init(d)) ||
(ret = dvb_usb_adapter_init(d, adapter_nums))) {
dvb_usb_exit(d);
return ret;
}
ret = dvb_usb_i2c_init(d);
if (ret)
goto err_i2c_init;
ret = dvb_usb_adapter_init(d, adapter_nums);
if (ret)
goto err_adapter_init;
if ((ret = dvb_usb_remote_init(d)))
err("could not initialize remote control.");
@ -181,6 +191,17 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
dvb_usb_device_power_ctrl(d, 0);
return 0;
err_adapter_init:
dvb_usb_adapter_exit(d);
err_i2c_init:
dvb_usb_i2c_exit(d);
if (d->priv && d->props.priv_destroy)
d->props.priv_destroy(d);
err_priv_init:
kfree(d->priv);
d->priv = NULL;
return ret;
}
/* determine the name and the state of the just found USB device */
@ -255,41 +276,50 @@ int dvb_usb_device_init(struct usb_interface *intf,
if (du != NULL)
*du = NULL;
if ((desc = dvb_usb_find_device(udev, props, &cold)) == NULL) {
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
err("no memory for 'struct dvb_usb_device'");
return -ENOMEM;
}
memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
desc = dvb_usb_find_device(udev, &d->props, &cold);
if (!desc) {
deb_err("something went very wrong, device was not found in current device list - let's see what comes next.\n");
return -ENODEV;
ret = -ENODEV;
goto error;
}
if (cold) {
info("found a '%s' in cold state, will try to load a firmware", desc->name);
ret = dvb_usb_download_firmware(udev, props);
if (!props->no_reconnect || ret != 0)
return ret;
goto error;
}
info("found a '%s' in warm state.", desc->name);
d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
if (d == NULL) {
err("no memory for 'struct dvb_usb_device'");
return -ENOMEM;
}
d->udev = udev;
memcpy(&d->props, props, sizeof(struct dvb_usb_device_properties));
d->desc = desc;
d->owner = owner;
usb_set_intfdata(intf, d);
if (du != NULL)
ret = dvb_usb_init(d, adapter_nums);
if (ret) {
info("%s error while loading driver (%d)", desc->name, ret);
goto error;
}
if (du)
*du = d;
ret = dvb_usb_init(d, adapter_nums);
info("%s successfully initialized and connected.", desc->name);
return 0;
if (ret == 0)
info("%s successfully initialized and connected.", desc->name);
else
info("%s error while loading driver (%d)", desc->name, ret);
error:
usb_set_intfdata(intf, NULL);
kfree(d);
return ret;
}
EXPORT_SYMBOL(dvb_usb_device_init);

View File

@ -485,7 +485,7 @@ extern int __must_check
dvb_usb_generic_write(struct dvb_usb_device *, u8 *, u16);
/* commonly used remote control parsing */
extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[], u32 *, int *);
extern int dvb_usb_nec_rc_key_to_event(struct dvb_usb_device *, u8[5], u32 *, int *);
/* commonly used firmware download types and function */
struct hexline {

View File

@ -1924,6 +1924,7 @@ ret:
return result;
out_free:
em28xx_uninit_usb_xfer(dev, EM28XX_DIGITAL_MODE);
kfree(dvb);
dev->dvb = NULL;
goto ret;

View File

@ -1576,6 +1576,8 @@ out:
#endif
v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
v4l2_device_unregister(&gspca_dev->v4l2_dev);
if (sd_desc->probe_error)
sd_desc->probe_error(gspca_dev);
kfree(gspca_dev->usb_buf);
kfree(gspca_dev);
return ret;

View File

@ -105,6 +105,7 @@ struct sd_desc {
cam_cf_op config; /* called on probe */
cam_op init; /* called on probe and resume */
cam_op init_controls; /* called on probe */
cam_v_op probe_error; /* called if probe failed, do cleanup here */
cam_op start; /* called on stream on after URBs creation */
cam_pkt_op pkt_scan;
/* optional operations */

View File

@ -158,7 +158,7 @@ static int
sq905_read_data(struct gspca_dev *gspca_dev, u8 *data, int size, int need_lock)
{
int ret;
int act_len;
int act_len = 0;
gspca_dev->usb_buf[0] = '\0';
if (need_lock)

View File

@ -529,12 +529,21 @@ static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
static int stv06xx_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id);
static void stv06xx_probe_error(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *)gspca_dev;
kfree(sd->sensor_priv);
sd->sensor_priv = NULL;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = stv06xx_config,
.init = stv06xx_init,
.init_controls = stv06xx_init_controls,
.probe_error = stv06xx_probe_error,
.start = stv06xx_start,
.stopN = stv06xx_stopN,
.pkt_scan = stv06xx_pkt_scan,

View File

@ -100,7 +100,7 @@ static irqreturn_t arizona_irq_thread(int irq, void *data)
unsigned int val;
int ret;
ret = pm_runtime_get_sync(arizona->dev);
ret = pm_runtime_resume_and_get(arizona->dev);
if (ret < 0) {
dev_err(arizona->dev, "Failed to resume device: %d\n", ret);
return IRQ_NONE;

View File

@ -621,6 +621,18 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
}
/*
* Make sure to update CACHE_CTRL in case it was changed. The cache
* will get turned back on if the card is re-initialized, e.g.
* suspend/resume or hw reset in recovery.
*/
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_CACHE_CTRL) &&
(cmd.opcode == MMC_SWITCH)) {
u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg) & 1;
card->ext_csd.cache_ctrl = value;
}
/*
* According to the SD specs, some commands require a delay after
* issuing the command.
@ -2215,6 +2227,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
case MMC_ISSUE_ASYNC:
switch (req_op(req)) {
case REQ_OP_FLUSH:
if (!mmc_cache_enabled(host)) {
blk_mq_end_request(req, BLK_STS_OK);
return MMC_REQ_FINISHED;
}
ret = mmc_blk_cqe_issue_flush(mq, req);
break;
case REQ_OP_READ:

View File

@ -52,8 +52,6 @@
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
static int __mmc_max_reserved_idx = -1;
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
* performance cost, and for other reasons may not always be desired.
@ -1223,7 +1221,7 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr)
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
return err;
goto power_cycle;
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
return -EIO;
@ -2394,120 +2392,10 @@ void mmc_stop_host(struct mmc_host *host)
mmc_release_host(host);
}
#ifdef CONFIG_PM_SLEEP
/* Do the card removal on suspend if card is assumed removeable
* Do that in pm notifier while userspace isn't yet frozen, so we will be able
to sync the card.
*/
static int mmc_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
struct mmc_host *host = container_of(
notify_block, struct mmc_host, pm_notify);
unsigned long flags;
int err = 0;
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
if (!host->bus_ops)
break;
/* Validate prerequisites for suspend */
if (host->bus_ops->pre_suspend)
err = host->bus_ops->pre_suspend(host);
if (!err)
break;
if (!mmc_card_is_removable(host)) {
dev_warn(mmc_dev(host),
"pre_suspend failed for non-removable host: "
"%d\n", err);
/* Avoid removing non-removable hosts */
break;
}
/* Calling bus_ops->remove() with a claimed host can deadlock */
host->bus_ops->remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
_mmc_detect_change(host, 0, false);
}
return 0;
}
void mmc_register_pm_notifier(struct mmc_host *host)
{
host->pm_notify.notifier_call = mmc_pm_notify;
register_pm_notifier(&host->pm_notify);
}
void mmc_unregister_pm_notifier(struct mmc_host *host)
{
unregister_pm_notifier(&host->pm_notify);
}
#endif
/*
* mmc_first_nonreserved_index() - get the first index that
* is not reserved
*/
int mmc_first_nonreserved_index(void)
{
return __mmc_max_reserved_idx + 1;
}
EXPORT_SYMBOL(mmc_first_nonreserved_index);
/*
* mmc_get_reserved_index() - get the index reserved for this host
* Return: The index reserved for this host or negative error value
* if no index is reserved for this host
*/
int mmc_get_reserved_index(struct mmc_host *host)
{
return of_alias_get_id(host->parent->of_node, "mmc");
}
EXPORT_SYMBOL(mmc_get_reserved_index);
static void mmc_of_reserve_idx(void)
{
int max;
max = of_alias_max_index("mmc");
if (max < 0)
return;
__mmc_max_reserved_idx = max;
pr_debug("MMC: reserving %d slots for of aliases\n",
__mmc_max_reserved_idx + 1);
}
static int __init mmc_init(void)
{
int ret;
mmc_of_reserve_idx();
ret = mmc_register_bus();
if (ret)
return ret;

View File

@ -29,6 +29,7 @@ struct mmc_bus_ops {
int (*shutdown)(struct mmc_host *);
int (*hw_reset)(struct mmc_host *);
int (*sw_reset)(struct mmc_host *);
bool (*cache_enabled)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@ -94,14 +95,6 @@ int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
#ifdef CONFIG_PM_SLEEP
void mmc_register_pm_notifier(struct mmc_host *host);
void mmc_unregister_pm_notifier(struct mmc_host *host);
#else
static inline void mmc_register_pm_notifier(struct mmc_host *host) { }
static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { }
#endif
void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq);
bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq);
@ -172,4 +165,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
host->ops->post_req(host, mrq, err);
}
static inline bool mmc_cache_enabled(struct mmc_host *host)
{
if (host->bus_ops->cache_enabled)
return host->bus_ops->cache_enabled(host);
return false;
}
#endif

View File

@ -33,6 +33,42 @@
static DEFINE_IDA(mmc_host_ida);
#ifdef CONFIG_PM_SLEEP
static int mmc_host_class_prepare(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
/*
* It's safe to access the bus_ops pointer, as both userspace and the
* workqueue for detecting cards are frozen at this point.
*/
if (!host->bus_ops)
return 0;
/* Validate conditions for system suspend. */
if (host->bus_ops->pre_suspend)
return host->bus_ops->pre_suspend(host);
return 0;
}
static void mmc_host_class_complete(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
_mmc_detect_change(host, 0, false);
}
static const struct dev_pm_ops mmc_host_class_dev_pm_ops = {
.prepare = mmc_host_class_prepare,
.complete = mmc_host_class_complete,
};
#define MMC_HOST_CLASS_DEV_PM_OPS (&mmc_host_class_dev_pm_ops)
#else
#define MMC_HOST_CLASS_DEV_PM_OPS NULL
#endif
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
@ -43,6 +79,7 @@ static void mmc_host_classdev_release(struct device *dev)
static struct class mmc_host_class = {
.name = "mmc_host",
.dev_release = mmc_host_classdev_release,
.pm = MMC_HOST_CLASS_DEV_PM_OPS,
};
int mmc_register_host_class(void)
@ -294,8 +331,6 @@ int mmc_of_parse(struct mmc_host *host)
if (device_property_read_bool(dev, "wakeup-source") ||
device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
if (device_property_read_bool(dev, "pm-ignore-notify"))
host->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
if (device_property_read_bool(dev, "mmc-ddr-3_3v"))
host->caps |= MMC_CAP_3_3V_DDR;
if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
@ -398,7 +433,6 @@ EXPORT_SYMBOL(mmc_of_parse_voltage);
struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
{
int err;
int alias_id;
struct mmc_host *host;
host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);
@ -407,16 +441,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
host->parent = dev;
alias_id = mmc_get_reserved_index(host);
if (alias_id >= 0)
err = ida_simple_get(&mmc_host_ida, alias_id,
alias_id + 1, GFP_KERNEL);
else
err = ida_simple_get(&mmc_host_ida,
mmc_first_nonreserved_index(),
0, GFP_KERNEL);
err = ida_simple_get(&mmc_host_ida, 0, 0, GFP_KERNEL);
if (err < 0) {
kfree(host);
return NULL;
@ -426,6 +452,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
dev_set_name(&host->class_dev, "mmc%d", host->index);
host->parent = dev;
host->class_dev.parent = dev;
host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev);
@ -487,9 +514,6 @@ int mmc_add_host(struct mmc_host *host)
#endif
mmc_start_host(host);
if (!(host->pm_caps& MMC_PM_IGNORE_PM_NOTIFY))
mmc_register_pm_notifier(host);
return 0;
}
@ -505,8 +529,6 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
if (!(host->pm_caps& MMC_PM_IGNORE_PM_NOTIFY))
mmc_unregister_pm_notifier(host);
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS

View File

@ -2019,6 +2019,12 @@ static void mmc_detect(struct mmc_host *host)
}
}
static bool _mmc_cache_enabled(struct mmc_host *host)
{
return host->card->ext_csd.cache_size > 0 &&
host->card->ext_csd.cache_ctrl & 1;
}
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
int err = 0;
@ -2197,6 +2203,7 @@ static const struct mmc_bus_ops mmc_ops = {
.alive = mmc_alive,
.shutdown = mmc_shutdown,
.hw_reset = _mmc_hw_reset,
.cache_enabled = _mmc_cache_enabled,
};
/*

View File

@ -965,9 +965,7 @@ int mmc_flush_cache(struct mmc_card *card)
{
int err = 0;
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
(card->ext_csd.cache_ctrl & 1)) {
if (mmc_cache_enabled(card->host)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
if (err)

View File

@ -135,6 +135,9 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1;
csd->erase_size <<= csd->write_blkbits - 9;
}
if (UNSTUFF_BITS(resp, 13, 1))
mmc_card_set_readonly(card);
break;
case 1:
/*
@ -169,6 +172,9 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->write_blkbits = 9;
csd->write_partial = 0;
csd->erase_size = 1;
if (UNSTUFF_BITS(resp, 13, 1))
mmc_card_set_readonly(card);
break;
default:
pr_err("%s: unrecognised CSD structure version %d\n",

View File

@ -925,21 +925,37 @@ out:
*/
static int mmc_sdio_pre_suspend(struct mmc_host *host)
{
int i, err = 0;
int i;
for (i = 0; i < host->card->sdio_funcs; i++) {
struct sdio_func *func = host->card->sdio_func[i];
if (func && sdio_func_present(func) && func->dev.driver) {
const struct dev_pm_ops *pmops = func->dev.driver->pm;
if (!pmops || !pmops->suspend || !pmops->resume) {
if (!pmops || !pmops->suspend || !pmops->resume)
/* force removal of entire card in that case */
err = -ENOSYS;
break;
}
goto remove;
}
}
return err;
return 0;
remove:
if (!mmc_card_is_removable(host)) {
dev_warn(mmc_dev(host),
"missing suspend/resume ops for non-removable SDIO card\n");
/* Don't remove a non-removable card - we can't re-detect it. */
return 0;
}
/* Remove the SDIO card and let it be re-detected later on. */
mmc_sdio_remove(host);
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
return 0;
}
/*

View File

@ -520,6 +520,7 @@ struct intel_host {
int drv_strength;
bool d3_retune;
bool rpm_retune_ok;
bool needs_pwr_off;
u32 glk_rx_ctrl1;
u32 glk_tun_val;
u32 active_ltr;
@ -647,9 +648,25 @@ out:
static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode,
unsigned short vdd)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);
int cntr;
u8 reg;
/*
* Bus power may control card power, but a full reset still may not
* reset the power, whereas a direct write to SDHCI_POWER_CONTROL can.
* That might be needed to initialize correctly, if the card was left
* powered on previously.
*/
if (intel_host->needs_pwr_off) {
intel_host->needs_pwr_off = false;
if (mode != MMC_POWER_OFF) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
usleep_range(10000, 12500);
}
}
sdhci_set_power(host, mode, vdd);
if (mode == MMC_POWER_OFF)
@ -1139,6 +1156,14 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
static void byt_needs_pwr_off(struct sdhci_pci_slot *slot)
{
struct intel_host *intel_host = sdhci_pci_priv(slot);
u8 reg = sdhci_readb(slot->host, SDHCI_POWER_CONTROL);
intel_host->needs_pwr_off = reg & SDHCI_POWER_ON;
}
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
byt_probe_slot(slot);
@ -1156,6 +1181,8 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
slot->chip->pdev->subsystem_device == PCI_SUBDEVICE_ID_NI_78E3)
slot->host->mmc->caps2 |= MMC_CAP2_AVOID_3_3V;
byt_needs_pwr_off(slot);
return 0;
}
@ -1907,6 +1934,8 @@ static const struct pci_device_id pci_ids[] = {
SDHCI_PCI_DEVICE(INTEL, CMLH_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, JSL_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, JSL_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(INTEL, LKF_EMMC, intel_glk_emmc),
SDHCI_PCI_DEVICE(INTEL, LKF_SD, intel_byt_sd),
SDHCI_PCI_DEVICE(O2, 8120, o2),
SDHCI_PCI_DEVICE(O2, 8220, o2),
SDHCI_PCI_DEVICE(O2, 8221, o2),

View File

@ -57,6 +57,8 @@
#define PCI_DEVICE_ID_INTEL_CMLH_SD 0x06f5
#define PCI_DEVICE_ID_INTEL_JSL_EMMC 0x4dc4
#define PCI_DEVICE_ID_INTEL_JSL_SD 0x4df8
#define PCI_DEVICE_ID_INTEL_LKF_EMMC 0x98c4
#define PCI_DEVICE_ID_INTEL_LKF_SD 0x98f8
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
#define PCI_DEVICE_ID_VIA_95D0 0x95d0

View File

@ -2667,6 +2667,37 @@ static bool sdhci_request_done(struct sdhci_host *host)
return true;
}
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
/*
* Do not finish until command and data lines are available for
* reset. Note there can only be one other mrq, so it cannot
* also be in mrqs_done, otherwise host->cmd and host->data_cmd
* would both be null.
*/
if (host->cmd || host->data_cmd) {
spin_unlock_irqrestore(&host->lock, flags);
return true;
}
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
/*
* Spec says we should do both at the same time, but Ricoh
* controllers do not like that.
*/
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
/*
* Always unmap the data buffers if they were mapped by
* sdhci_prepare_data() whenever we finish with a request.
@ -2719,35 +2750,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
}
}
/*
* The controller needs a reset of internal state machines
* upon error conditions.
*/
if (sdhci_needs_reset(host, mrq)) {
/*
* Do not finish until command and data lines are available for
* reset. Note there can only be one other mrq, so it cannot
* also be in mrqs_done, otherwise host->cmd and host->data_cmd
* would both be null.
*/
if (host->cmd || host->data_cmd) {
spin_unlock_irqrestore(&host->lock, flags);
return true;
}
/* Some controllers need this kick or reset won't work here */
if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
/* This is to force an update */
host->ops->set_clock(host, host->clock);
/* Spec says we should do both at the same time, but Ricoh
controllers do not like that. */
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
host->pending_reset = false;
}
host->mrqs_done[i] = NULL;
spin_unlock_irqrestore(&host->lock, flags);

View File

@ -639,7 +639,7 @@ static int uniphier_sd_probe(struct platform_device *pdev)
ret = tmio_mmc_host_probe(host);
if (ret)
goto free_host;
goto disable_clk;
ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
dev_name(dev), host);
@ -650,6 +650,8 @@ static int uniphier_sd_probe(struct platform_device *pdev)
remove_host:
tmio_mmc_host_remove(host);
disable_clk:
uniphier_sd_clk_disable(host);
free_host:
tmio_mmc_host_free(host);
@ -662,6 +664,7 @@ static int uniphier_sd_remove(struct platform_device *pdev)
tmio_mmc_host_remove(host);
uniphier_sd_clk_disable(host);
tmio_mmc_host_free(host);
return 0;
}

View File

@ -813,10 +813,12 @@ static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
NULL, 0,
chip->ecc.strength);
if (ret >= 0)
if (ret >= 0) {
mtd->ecc_stats.corrected += ret;
max_bitflips = max(ret, max_bitflips);
else
} else {
mtd->ecc_stats.failed++;
}
databuf += chip->ecc.size;
eccbuf += chip->ecc.bytes;

View File

@ -1133,12 +1133,14 @@ static const struct spi_device_id spinand_ids[] = {
{ .name = "spi-nand" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(spi, spinand_ids);
#ifdef CONFIG_OF
static const struct of_device_id spinand_of_ids[] = {
{ .compatible = "spi-nand" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, spinand_of_ids);
#endif
static struct spi_mem_driver spinand_drv = {

View File

@ -524,6 +524,16 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
u16 erif_index = 0;
int err;
/* Add the eRIF */
if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
err = mr->mr_ops->route_erif_add(mlxsw_sp,
rve->mr_route->route_priv,
erif_index);
if (err)
return err;
}
/* Update the route action, as the new eVIF can be a tunnel or a pimreg
* device which will require updating the action.
*/
@ -533,17 +543,7 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
rve->mr_route->route_priv,
route_action);
if (err)
return err;
}
/* Add the eRIF */
if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
err = mr->mr_ops->route_erif_add(mlxsw_sp,
rve->mr_route->route_priv,
erif_index);
if (err)
goto err_route_erif_add;
goto err_route_action_update;
}
/* Update the minimum MTU */
@ -561,14 +561,14 @@ mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
return 0;
err_route_min_mtu_update:
if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
erif_index);
err_route_erif_add:
if (route_action != rve->mr_route->route_action)
mr->mr_ops->route_action_update(mlxsw_sp,
rve->mr_route->route_priv,
rve->mr_route->route_action);
err_route_action_update:
if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
erif_index);
return err;
}

View File

@ -86,7 +86,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
if (cmd == NULL)
goto error_alloc;
cmd->hdr.type = cpu_to_le16(I2400M_MT_CMD_RF_CONTROL);
cmd->hdr.length = sizeof(cmd->sw_rf);
cmd->hdr.length = cpu_to_le16(sizeof(cmd->sw_rf));
cmd->hdr.version = cpu_to_le16(I2400M_L3L4_VERSION);
cmd->sw_rf.hdr.type = cpu_to_le16(I2400M_TLV_RF_OPERATION);
cmd->sw_rf.hdr.length = cpu_to_le16(sizeof(cmd->sw_rf.status));

View File

@ -1511,7 +1511,7 @@ static int rsi_restore(struct device *dev)
}
static const struct dev_pm_ops rsi_pm_ops = {
.suspend = rsi_suspend,
.resume = rsi_resume,
.resume_noirq = rsi_resume,
.freeze = rsi_freeze,
.thaw = rsi_thaw,
.restore = rsi_restore,

View File

@ -1666,20 +1666,10 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
int err;
int i, bars = 0;
/*
* Power state could be unknown at this point, either due to a fresh
* boot or a device removal call. So get the current power state
* so that things like MSI message writing will behave as expected
* (e.g. if the device really is in D0 at enable time).
*/
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
if (atomic_inc_return(&dev->enable_cnt) > 1)
if (atomic_inc_return(&dev->enable_cnt) > 1) {
pci_update_current_state(dev, dev->current_state);
return 0; /* already enabled */
}
bridge = pci_upstream_bridge(dev);
if (bridge)

View File

@ -236,7 +236,7 @@ int arm_pmu_device_probe(struct platform_device *pdev,
ret = armpmu_register(pmu);
if (ret)
goto out_free;
goto out_free_irqs;
return 0;

View File

@ -779,7 +779,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
cancel_delayed_work(&twl->id_workaround_work);
cancel_delayed_work_sync(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */

View File

@ -831,9 +831,15 @@ static const struct pci_device_id pmc_pci_ids[] = {
* the platform BIOS enforces 24Mhx Crystal to shutdown
* before PMC can assert SLP_S0#.
*/
static bool xtal_ignore;
static int quirk_xtal_ignore(const struct dmi_system_id *id)
{
struct pmc_dev *pmcdev = &pmc;
xtal_ignore = true;
return 0;
}
static void pmc_core_xtal_ignore(struct pmc_dev *pmcdev)
{
u32 value;
value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
@ -842,7 +848,6 @@ static int quirk_xtal_ignore(const struct dmi_system_id *id)
/* Low Voltage Mode Enable */
value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
return 0;
}
static const struct dmi_system_id pmc_core_dmi_table[] = {
@ -857,6 +862,14 @@ static const struct dmi_system_id pmc_core_dmi_table[] = {
{}
};
static void pmc_core_do_dmi_quirks(struct pmc_dev *pmcdev)
{
dmi_check_system(pmc_core_dmi_table);
if (xtal_ignore)
pmc_core_xtal_ignore(pmcdev);
}
static int pmc_core_probe(struct platform_device *pdev)
{
static bool device_initialized;
@ -898,7 +911,7 @@ static int pmc_core_probe(struct platform_device *pdev)
mutex_init(&pmcdev->lock);
platform_set_drvdata(pdev, pmcdev);
pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
dmi_check_system(pmc_core_dmi_table);
pmc_core_do_dmi_quirks(pmcdev);
pmc_core_dbgfs_register(pmcdev);

View File

@ -1499,27 +1499,6 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg)
return tval * 60;
}
/*
* Read an average power register.
* Return < 0 if something fails.
*/
static int bq27xxx_battery_read_pwr_avg(struct bq27xxx_device_info *di)
{
int tval;
tval = bq27xxx_read(di, BQ27XXX_REG_AP, false);
if (tval < 0) {
dev_err(di->dev, "error reading average power register %02x: %d\n",
BQ27XXX_REG_AP, tval);
return tval;
}
if (di->opts & BQ27XXX_O_ZERO)
return (tval * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
else
return tval;
}
/*
* Returns true if a battery over temperature condition is detected
*/
@ -1604,8 +1583,6 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di)
}
if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
cache.cycle_count = bq27xxx_battery_read_cyct(di);
if (di->regs[BQ27XXX_REG_AP] != INVALID_REG_ADDR)
cache.power_avg = bq27xxx_battery_read_pwr_avg(di);
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
@ -1668,6 +1645,32 @@ static int bq27xxx_battery_current(struct bq27xxx_device_info *di,
return 0;
}
/*
* Get the average power in µW
* Return < 0 if something fails.
*/
static int bq27xxx_battery_pwr_avg(struct bq27xxx_device_info *di,
union power_supply_propval *val)
{
int power;
power = bq27xxx_read(di, BQ27XXX_REG_AP, false);
if (power < 0) {
dev_err(di->dev,
"error reading average power register %02x: %d\n",
BQ27XXX_REG_AP, power);
return power;
}
if (di->opts & BQ27XXX_O_ZERO)
val->intval = (power * BQ27XXX_POWER_CONSTANT) / BQ27XXX_RS;
else
/* Other gauges return a signed value in units of 10mW */
val->intval = (int)((s16)power) * 10000;
return 0;
}
static int bq27xxx_battery_status(struct bq27xxx_device_info *di,
union power_supply_propval *val)
{
@ -1835,7 +1838,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
ret = bq27xxx_simple_value(di->cache.energy, val);
break;
case POWER_SUPPLY_PROP_POWER_AVG:
ret = bq27xxx_simple_value(di->cache.power_avg, val);
ret = bq27xxx_battery_pwr_avg(di, val);
break;
case POWER_SUPPLY_PROP_HEALTH:
ret = bq27xxx_simple_value(di->cache.health, val);

View File

@ -382,7 +382,7 @@ static int gab_remove(struct platform_device *pdev)
}
kfree(adc_bat->psy_desc.properties);
cancel_delayed_work(&adc_bat->bat_work);
cancel_delayed_work_sync(&adc_bat->bat_work);
return 0;
}

View File

@ -501,7 +501,7 @@ static int lp8788_set_irqs(struct platform_device *pdev,
ret = request_threaded_irq(virq, NULL,
lp8788_charger_irq_thread,
0, name, pchg);
IRQF_ONESHOT, name, pchg);
if (ret)
break;
}

View File

@ -1095,7 +1095,7 @@ static int pm2xxx_wall_charger_probe(struct i2c_client *i2c_client,
ret = request_threaded_irq(gpio_to_irq(pm2->pdata->gpio_irq_number),
NULL,
pm2xxx_charger_irq[0].isr,
pm2->pdata->irq_type,
pm2->pdata->irq_type | IRQF_ONESHOT,
pm2xxx_charger_irq[0].name, pm2);
if (ret != 0) {

View File

@ -394,7 +394,7 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
gpio_free(pdata->gpio_charge_finished);
}
cancel_delayed_work(&bat_work);
cancel_delayed_work_sync(&bat_work);
if (pdata->exit)
pdata->exit();

View File

@ -301,7 +301,7 @@ static int tps65090_charger_probe(struct platform_device *pdev)
if (irq != -ENXIO) {
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65090_charger_isr, 0, "tps65090-charger", cdata);
tps65090_charger_isr, IRQF_ONESHOT, "tps65090-charger", cdata);
if (ret) {
dev_err(cdata->dev,
"Unable to register irq %d err %d\n", irq,

View File

@ -238,7 +238,7 @@ static int tps65217_charger_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CHARGER_IRQS; i++) {
ret = devm_request_threaded_irq(&pdev->dev, irq[i], NULL,
tps65217_charger_irq,
0, "tps65217-charger",
IRQF_ONESHOT, "tps65217-charger",
charger);
if (ret) {
dev_err(charger->dev,

Some files were not shown because too many files have changed in this diff Show More