This is the 4.9.128 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAluitdUACgkQONu9yGCS
 aT6k7g//fxvMjnxYwVZ6LQjFkBakhP4gU/unxKppIFb0Iti+LBpx3Fg5JbtpRM8y
 oCAAMb9F5Dxl38691BqBpCdihmrGurh9EDn9ZZDsoUtipQ7UQozmGDbbc4fV0cQW
 /2Fs7GjFp1XopOAvFp7z/Xn5Xi8fSxgWg6l6aQ5VzsUtK69FM4TmNPJ2x1adow+N
 UsewbUkLkD9dMIrrgIajeCsErRYnJHy6FuY/pWSXVuY1gyTlCW/lsxKU2OoS5l5Z
 u/ZxprWl7z6FjSfzHc4AYrBR6t2zCAV2DNZhX5lDU/EHzTAttq0sSaeEfWzqYrJM
 q/bStS8eEWj+MkqfrppWRc1I/ZwUrjMB1P8OJEW8UUFTjAcRvlBz/Z1poOCRkBRp
 flsohLfao2/dbPxT0oLQMidBRX9t90TQLxwGY2SJ2/dL6uU/sapQDo8/iwyCZvC6
 u57DLf82P9tUvZgnUOoNuCPSqiZQG/tGPGFRGU0EgHVtoE7S285FvRJZoNlRH+oT
 EyAHtavzbgsoJb0oP12eOBfq0dwvmXt6i7ypzPoy/Fh+zQTb5Fk1fH+4GF+gFqWY
 //CpKO+zIdeFzWZ/r8eAkS+uLgvgywNbZV6N8EJYqviajKfU5A/RX5GsQ4oaFfk6
 ++Anyn58UYYSkw7ov5ynLNmNr+tlMjKUuRXw0u9rtAhcTQcnHYc=
 =YQcR
 -----END PGP SIGNATURE-----

Merge tag 'v4.9.128' into 4.9-2.3.x-imx

This is the 4.9.128 stable release
This commit is contained in:
Gary Bisson 2018-09-24 14:01:55 +02:00
commit cc9333d7aa
81 changed files with 357 additions and 195 deletions

View File

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 127
SUBLEVEL = 128
EXTRAVERSION =
NAME = Roaring Lionus

View File

@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set

View File

@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set

View File

@ -1,5 +1,4 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set

View File

@ -366,6 +366,7 @@ static int __init octeon_ehci_device_init(void)
return 0;
pd = of_find_device_by_node(ehci_node);
of_node_put(ehci_node);
if (!pd)
return 0;
@ -428,6 +429,7 @@ static int __init octeon_ohci_device_init(void)
return 0;
pd = of_find_device_by_node(ohci_node);
of_node_put(ohci_node);
if (!pd)
return 0;

View File

@ -159,6 +159,7 @@ void __init arch_init_irq(void)
"mti,cpu-interrupt-controller");
if (!cpu_has_veic && !intc_node)
mips_cpu_irq_init();
of_node_put(intc_node);
irqchip_init();
}

View File

@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
static inline unsigned long isa_virt_to_bus(volatile void * address)
static inline unsigned long isa_virt_to_bus(volatile void *address)
{
return (unsigned long)address - PAGE_OFFSET;
return virt_to_phys(address);
}
static inline void * isa_bus_to_virt(unsigned long address)
static inline void *isa_bus_to_virt(unsigned long address)
{
return (void *)(address + PAGE_OFFSET);
return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys

View File

@ -118,7 +118,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;

View File

@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
if (WARN_ON(size == 0))
return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
BUG_ON(size == 0);
if (WARN_ON(size == 0))
return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {

View File

@ -152,8 +152,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
top_of_kernel_stack = sp;
p->set_child_tid = p->clear_child_tid = NULL;
/* Locate userspace context on stack... */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);

View File

@ -156,7 +156,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0039U);
/* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
if (read_guest_real(vcpu, crycb_addr + 72,
vsie_page->crycb.dea_wrapping_key_mask, 56))
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;

View File

@ -330,8 +330,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
WARN_ON_ONCE(in_nmi());
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.

View File

@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
blkcg->css.id, GFP_NOWAIT);
blkcg->css.id,
GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */
if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
}
spin_lock_init(&blkcg->lock);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@ -1238,7 +1239,7 @@ pd_prealloc:
if (blkg->pd[pol->plid])
continue;
pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {

View File

@ -2950,7 +2950,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* for devices that support queuing, otherwise we still have a problem
* with sync vs async workloads.
*/
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
!cfqd->cfq_group_idle)
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@ -3866,7 +3867,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
cfqq = kmem_cache_alloc_node(cfq_pool,
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
cfqd->queue->node);
if (!cfqq) {
cfqq = &cfqd->oom_cfqq;

View File

@ -177,7 +177,7 @@ int aix_partition(struct parsed_partitions *state)
u32 vgda_sector = 0;
u32 vgda_len = 0;
int numlvs = 0;
struct pvd *pvd;
struct pvd *pvd = NULL;
struct lv_info {
unsigned short pps_per_lv;
unsigned short pps_found;
@ -231,10 +231,11 @@ int aix_partition(struct parsed_partitions *state)
if (lvip[i].pps_per_lv)
foundlvs += 1;
}
/* pvd loops depend on n[].name and lvip[].pps_per_lv */
pvd = alloc_pvd(state, vgda_sector + 17);
}
put_dev_sector(sect);
}
pvd = alloc_pvd(state, vgda_sector + 17);
if (pvd) {
int numpps = be16_to_cpu(pvd->pp_count);
int psn_part1 = be32_to_cpu(pvd->psn_part1);
@ -281,10 +282,14 @@ int aix_partition(struct parsed_partitions *state)
next_lp_ix += 1;
}
for (i = 0; i < state->limit; i += 1)
if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
char tmp[sizeof(n[i].name) + 1]; // null char
snprintf(tmp, sizeof(tmp), "%s", n[i].name);
pr_warn("partition %s (%u pp's found) is "
"not contiguous\n",
n[i].name, lvip[i].pps_found);
tmp, lvip[i].pps_found);
}
kfree(pvd);
}
kfree(n);

View File

@ -2132,6 +2132,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
deto = 20;
}
/* Make dito, mdat, deto bits to 0s */
devslp &= ~GENMASK_ULL(24, 2);
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
(deto << PORT_DEVSLP_DETO_OFFSET) |

View File

@ -125,6 +125,7 @@ config BT_HCIUART_LL
config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI

View File

@ -115,7 +115,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
/* Lock the adapter for the duration of the whole sequence. */
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
i2c_lock_adapter(tpm_dev.client->adapter);
i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
if (tpm_dev.chip_type == SLB9645) {
/* use a combined read for newer chips
@ -156,7 +156,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
}
out:
i2c_unlock_adapter(tpm_dev.client->adapter);
i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
@ -188,7 +188,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
if (!tpm_dev.client->adapter->algo->master_xfer)
return -EOPNOTSUPP;
i2c_lock_adapter(tpm_dev.client->adapter);
i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* prepend the 'register address' to the buffer */
tpm_dev.buf[0] = addr;
@ -207,7 +207,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
usleep_range(sleep_low, sleep_hi);
}
i2c_unlock_adapter(tpm_dev.client->adapter);
i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
/* take care of 'guard time' */
usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);

View File

@ -189,6 +189,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
static int tpm_tis_spi_probe(struct spi_device *dev)
{
struct tpm_tis_spi_phy *phy;
int irq;
phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
GFP_KERNEL);
@ -201,7 +202,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
if (!phy->iobuf)
return -ENOMEM;
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
/* If the SPI device has an IRQ then use that */
if (dev->irq > 0)
irq = dev->irq;
else
irq = -1;
return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
NULL);
}

View File

@ -495,9 +495,10 @@ err_irq_alloc_descs:
chip = chip_save;
err_gpiochip_add:
chip = chip_save;
while (--i >= 0) {
chip--;
gpiochip_remove(&chip->gpio);
chip++;
}
kfree(chip_save);

View File

@ -723,4 +723,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
postcore_initcall(tegra_gpio_init);
subsys_initcall(tegra_gpio_init);

View File

@ -135,6 +135,7 @@
#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
#define SBREG_SMBCTRL_DNV 0xcf000c
/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS 0x08
@ -1387,7 +1388,11 @@ static void i801_add_tco(struct i801_priv *priv)
spin_unlock(&p2sb_spinlock);
res = &tco_res[ICH_RES_MEM_OFF];
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
else
res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
res->end = res->start + 3;
res->flags = IORESOURCE_MEM;

View File

@ -538,6 +538,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
{
u8 rx_watermark;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
unsigned long flags;
/* Clear and enable Rx full interrupt. */
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@ -553,6 +554,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
rx_watermark = IIC_RX_FIFO_DEPTH;
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
local_irq_save(flags);
if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@ -563,6 +565,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
local_irq_restore(flags);
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);

View File

@ -1409,9 +1409,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
(addr->src_addr.ss_family == AF_IB ||
cma_protocol_roce_dev_port(id->device, port_num));
return !addr->dev_addr.bound_dev_if ||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
addr->dev_addr.bound_dev_if == net_dev->ifindex);
/*
* Net namespaces must match, and if the listner is listening
* on a specific netdevice than netdevice must match as well.
*/
if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
(!!addr->dev_addr.bound_dev_if ==
(addr->dev_addr.bound_dev_if == net_dev->ifindex)))
return true;
else
return false;
}
static struct rdma_id_private *cma_find_listener(

View File

@ -978,7 +978,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
free_rd_atomic_resource(qp, res);
rxe_advance_resp_resource(qp);
memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb));
memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
sizeof(skb->cb) - sizeof(ack_pkt));
res->type = RXE_ATOMIC_MASK;
res->atomic.skb = skb;

View File

@ -1671,10 +1671,11 @@ static int mxt_get_object_table(struct mxt_data *data)
break;
case MXT_TOUCH_MULTI_T9:
data->multitouch = MXT_TOUCH_MULTI_T9;
/* Only handle messages from first T9 instance */
data->T9_reportid_min = min_id;
data->T9_reportid_max = max_id;
data->num_touchids = object->num_report_ids
* mxt_obj_instances(object);
data->T9_reportid_max = min_id +
object->num_report_ids - 1;
data->num_touchids = object->num_report_ids;
break;
case MXT_SPT_MESSAGECOUNT_T44:
data->T44_address = object->start_address;

View File

@ -44,7 +44,7 @@ struct ipmmu_vmsa_domain {
struct io_pgtable_ops *iop;
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
struct mutex mutex; /* Protects mappings */
};
struct ipmmu_vmsa_archdata {
@ -464,7 +464,7 @@ static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
if (!domain)
return NULL;
spin_lock_init(&domain->lock);
mutex_init(&domain->mutex);
return &domain->io_domain;
}
@ -488,7 +488,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
@ -497,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return -ENXIO;
}
spin_lock_irqsave(&domain->lock, flags);
mutex_lock(&domain->mutex);
if (!domain->mmu) {
/* The domain hasn't been used yet, initialize it. */
@ -513,7 +512,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
ret = -EINVAL;
}
spin_unlock_irqrestore(&domain->lock, flags);
mutex_unlock(&domain->mutex);
if (ret < 0)
return ret;

View File

@ -531,8 +531,9 @@ init_pmu(void)
int timeout;
struct adb_request req;
out_8(&via[B], via[B] | TREQ); /* negate TREQ */
out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
/* Negate TREQ. Set TACK to input and TREQ to output. */
out_8(&via[B], in_8(&via[B]) | TREQ);
out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
timeout = 100000;
@ -1454,8 +1455,8 @@ pmu_sr_intr(void)
struct adb_request *req;
int bite = 0;
if (via[B] & TREQ) {
printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
if (in_8(&via[B]) & TREQ) {
printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
out_8(&via[IFR], SR_INT);
return NULL;
}

View File

@ -4207,6 +4207,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->failed++;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
else if (!rdev) {
rdev = rcu_dereference(
conf->disks[i].replacement);
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
}
}
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {

View File

@ -898,7 +898,10 @@ static int helene_x_pon(struct helene_priv *priv)
helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
/* 0x81 - 0x94 */
data[0] = 0x18; /* xtal 24 MHz */
if (priv->xtal == SONY_HELENE_XTAL_16000)
data[0] = 0x10; /* xtal 16 MHz */
else
data[0] = 0x18; /* xtal 24 MHz */
data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
data[3] = 0x80; /* REFOUT signal output 500mVpp */

View File

@ -249,24 +249,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_buf, *src_buf;
size_t dec_y_addr;
struct s5p_mfc_buf *dst_buf, *src_buf;
u32 dec_y_addr;
unsigned int frame_type;
/* Make sure we actually have a new frame before continuing. */
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
return;
dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
/* Copy timestamp / timecode from decoded src to dst and set
appropriate flags. */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
== dec_y_addr) {
dst_buf->b->timecode =
src_buf->b->timecode;
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
if (addr == dec_y_addr) {
dst_buf->b->timecode = src_buf->b->timecode;
dst_buf->b->vb2_buf.timestamp =
src_buf->b->vb2_buf.timestamp;
dst_buf->b->flags &=
@ -302,10 +302,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
{
struct s5p_mfc_dev *dev = ctx->dev;
struct s5p_mfc_buf *dst_buf;
size_t dspl_y_addr;
u32 dspl_y_addr;
unsigned int frame_type;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
if (IS_MFCV6_PLUS(dev))
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
get_disp_frame_type, ctx);
@ -324,9 +324,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
/* The MFC returns address of the buffer, now we have to
* check which videobuf does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
/* Check if this is the buffer we're looking for */
if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
== dspl_y_addr) {
if (addr == dspl_y_addr) {
list_del(&dst_buf->list);
ctx->dst_queue_cnt--;
dst_buf->b->sequence = ctx->sequence;

View File

@ -209,14 +209,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
* The TSC_ADC_SS controller design assumes the OCP clock is
* at least 6x faster than the ADC clock.
*/
clk = clk_get(&pdev->dev, "adc_tsc_fck");
clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get TSC fck\n");
err = PTR_ERR(clk);
goto err_disable_clk;
}
clock_rate = clk_get_rate(clk);
clk_put(clk);
tscadc->clk_div = clock_rate / ADC_CLK;
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */

View File

@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
goto scif_bind_exit;
}
} else {
pn = scif_get_new_port();
if (!pn) {
ret = -ENOSPC;
ret = scif_get_new_port();
if (ret < 0)
goto scif_bind_exit;
}
pn = ret;
}
ep->state = SCIFEP_BOUND;
@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
err = -EISCONN;
break;
case SCIFEP_UNBOUND:
ep->port.port = scif_get_new_port();
if (!ep->port.port) {
err = -ENOSPC;
} else {
ep->port.node = scif_info.nodeid;
ep->conn_async_state = ASYNC_CONN_IDLE;
}
err = scif_get_new_port();
if (err < 0)
break;
ep->port.port = err;
ep->port.node = scif_info.nodeid;
ep->conn_async_state = ASYNC_CONN_IDLE;
/* Fall through */
case SCIFEP_BOUND:
/*

View File

@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
return err;
goto err_sysfs_group;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
return err;
goto err_sysfs_group;
}
/* get reference of pdev for request_firmware
*/

View File

@ -1615,8 +1615,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@ -1635,8 +1637,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
if (!e) {
err = -ENOMEM;
goto out_free;
}
e->pnum = aeb->pnum;
e->ec = aeb->ec;

View File

@ -3117,7 +3117,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
on_each_cpu(mvneta_percpu_enable, pp, true);
mvneta_start_dev(pp);
mvneta_port_up(pp);
netdev_update_features(dev);

View File

@ -22,7 +22,7 @@
#include <linux/mdio-mux.h>
#include <linux/delay.h>
#define MDIO_PARAM_OFFSET 0x00
#define MDIO_PARAM_OFFSET 0x23c
#define MDIO_PARAM_MIIM_CYCLE 29
#define MDIO_PARAM_INTERNAL_SEL 25
#define MDIO_PARAM_BUS_ID 22
@ -30,20 +30,22 @@
#define MDIO_PARAM_PHY_ID 16
#define MDIO_PARAM_PHY_DATA 0
#define MDIO_READ_OFFSET 0x04
#define MDIO_READ_OFFSET 0x240
#define MDIO_READ_DATA_MASK 0xffff
#define MDIO_ADDR_OFFSET 0x08
#define MDIO_ADDR_OFFSET 0x244
#define MDIO_CTRL_OFFSET 0x0C
#define MDIO_CTRL_OFFSET 0x248
#define MDIO_CTRL_WRITE_OP 0x1
#define MDIO_CTRL_READ_OP 0x2
#define MDIO_STAT_OFFSET 0x10
#define MDIO_STAT_OFFSET 0x24c
#define MDIO_STAT_DONE 1
#define BUS_MAX_ADDR 32
#define EXT_BUS_START_ADDR 16
#define MDIO_REG_ADDR_SPACE_SIZE 0x250
struct iproc_mdiomux_desc {
void *mux_handle;
void __iomem *base;
@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
md->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res->start & 0xfff) {
/* For backward compatibility in case the
* base address is specified with an offset.
*/
dev_info(&pdev->dev, "fix base address in dt-blob\n");
res->start &= ~0xfff;
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
md->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(md->base)) {
dev_err(&pdev->dev, "failed to ioremap register\n");

View File

@ -3003,6 +3003,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
/* the firmware is ignoring the "radar" flag of the
* channel and is scanning actively using Probe Requests
* on "Radar detection"/DFS channels which are not
* marked as "available"
*/
ch->passive |= ch->chan_radar;
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;

View File

@ -1452,6 +1452,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
cfg->wmi_send_separate = __cpu_to_le32(0);
cfg->num_ocb_vdevs = __cpu_to_le32(0);
cfg->num_ocb_channels = __cpu_to_le32(0);
cfg->num_ocb_schedules = __cpu_to_le32(0);
cfg->host_capab = __cpu_to_le32(0);
ath10k_wmi_put_host_mem_chunks(ar, chunks);

View File

@ -1227,6 +1227,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
__le32 wmi_send_separate;
__le32 num_ocb_vdevs;
__le32 num_ocb_channels;
__le32 num_ocb_schedules;
__le32 host_capab;
} __packed;
struct wmi_tlv_init_cmd {

View File

@ -2915,16 +2915,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr;
u16 ctl = NO_CTL;
if (!chan)
return;
if (!test)
ctl = ath9k_regd_get_ctl(reg, chan);
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
ah->eep_ops->set_txpower(ah, chan,
ath9k_regd_get_ctl(reg, chan),
ah->eep_ops->set_txpower(ah, chan, ctl,
get_antenna_gain(ah, chan), new_pwr, test);
}

View File

@ -84,7 +84,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = info->status.status_driver_data[0];
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_STATUS_EOSP)) {
ieee80211_tx_status(hw, skb);
return;
}

View File

@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
u8 beacon)
u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
}
if (beacon || probe_rsp)
status->boottime_ns = ktime_get_boot_ns();
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
@ -194,7 +197,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;

View File

@ -2045,6 +2045,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
retval = -ENOMEM;
goto out_free_device_extension;
}
@ -2067,6 +2068,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
retval = -ENOMEM;
goto out_release_mem_region;
}
@ -2074,8 +2076,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
if (twa_reset_sequence(tw_dev, 0))
if (twa_reset_sequence(tw_dev, 0)) {
retval = -ENOMEM;
goto out_iounmap;
}
/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||

View File

@ -1600,6 +1600,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
retval = -ENOMEM;
goto out_free_device_extension;
}
@ -1614,6 +1615,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
retval = -ENOMEM;
goto out_release_mem_region;
}
@ -1623,6 +1625,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
retval = -ENOMEM;
goto out_iounmap;
}

View File

@ -2281,6 +2281,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
retval = -ENOMEM;
goto out_free_device_extension;
}
@ -2295,6 +2296,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
retval = -ENOMEM;
goto out_release_mem_region;
}

View File

@ -536,7 +536,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
memcpy(buf + 8, inquiry_string, sendbytes - 8);
strncpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag) {
/* Additional Length */
buf[4] = 0x33;

View File

@ -1247,7 +1247,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
reg = 0;
rtsx_read_register(chip, XD_CTL, &reg);
if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
wait_timeout(100);
mdelay(100);
if (detect_card_cd(chip,
XD_CARD) != STATUS_SUCCESS) {

View File

@ -316,6 +316,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@ -352,7 +353,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
spin_lock_irq(&se_nacl->nacl_sess_lock);
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@ -361,7 +362,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
spin_unlock_irq(&se_nacl->nacl_sess_lock);
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);

View File

@ -1913,7 +1913,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
ByteIO_t UPCIRingInd = 0;
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
pci_enable_device(dev))
pci_enable_device(dev) || i >= NUM_BOARDS)
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);

View File

@ -841,8 +841,6 @@ int __uio_register_device(struct module *owner,
if (ret)
goto err_uio_dev_add_attributes;
info->uio_dev = idev;
if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
/*
* Note that we deliberately don't use devm_request_irq
@ -858,6 +856,7 @@ int __uio_register_device(struct module *owner,
goto err_request_irq;
}
info->uio_dev = idev;
return 0;
err_request_irq:

View File

@ -3681,6 +3681,9 @@ int xhci_disable_slot(struct xhci_hcd *xhci, struct xhci_command *command,
return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev->udev = NULL;
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||

View File

@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/magic.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@ -123,7 +124,8 @@ struct autofs_sb_info {
static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
{
return (struct autofs_sb_info *)(sb->s_fs_info);
return sb->s_magic != AUTOFS_SUPER_MAGIC ?
NULL : (struct autofs_sb_info *)(sb->s_fs_info);
}
static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)

View File

@ -14,7 +14,6 @@
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/bitops.h>
#include <linux/magic.h>
#include "autofs_i.h"
#include <linux/module.h>

View File

@ -1665,7 +1665,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct super_block *sb = sbi->sb;
__u32 in;
int ret;
int ret = 0;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;

View File

@ -877,7 +877,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
goto next;
sum = page_address(sum_page);
f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
if (type != GET_SUM_TYPE((&sum->footer))) {
f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
"type [%d, %d] in SSA and SIT",
segno, type, GET_SUM_TYPE((&sum->footer)));
set_sbi_flag(sbi, SBI_NEED_FSCK);
goto next;
}
/*
* this is to avoid deadlock:

View File

@ -124,6 +124,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(dn);
set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
f2fs_msg(fio.sbi->sb, KERN_WARNING,
"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
"run fsck to fix.",
__func__, dn->inode->i_ino, dn->data_blkaddr);
return -EINVAL;
}
f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
read_inline_data(page, dn->inode_page);
@ -351,6 +361,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
if (err)
goto out;
if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(&dn);
set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
"%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
"run fsck to fix.",
__func__, dir->i_ino, dn.data_blkaddr);
err = -EINVAL;
goto out;
}
f2fs_wait_on_page_writeback(page, DATA, true);
zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE);

View File

@ -1463,7 +1463,9 @@ next_step:
!is_cold_node(page)))
continue;
lock_node:
if (!trylock_page(page))
if (wbc->sync_mode == WB_SYNC_ALL)
lock_page(page);
else if (!trylock_page(page))
continue;
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {

View File

@ -386,6 +386,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
if (IS_CURSEC(sbi, secno))
goto skip_free;
next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
@ -393,6 +395,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++;
}
}
skip_free:
spin_unlock(&free_i->segmap_lock);
}

View File

@ -1425,12 +1425,17 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
unsigned int main_segs, blocks_per_seg;
unsigned int sit_segs, nat_segs;
unsigned int sit_bitmap_size, nat_bitmap_size;
unsigned int log_blocks_per_seg;
int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
fsmeta += le32_to_cpu(raw_super->segment_count_sit);
fsmeta += le32_to_cpu(raw_super->segment_count_nat);
sit_segs = le32_to_cpu(raw_super->segment_count_sit);
fsmeta += sit_segs;
nat_segs = le32_to_cpu(raw_super->segment_count_nat);
fsmeta += nat_segs;
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
@ -1451,6 +1456,18 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi)
return 1;
}
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
f2fs_msg(sbi->sb, KERN_ERR,
"Wrong bitmap size: sit: %u, nat:%u",
sit_bitmap_size, nat_bitmap_size);
return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;

View File

@ -175,9 +175,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
{
u32 oldseq, newseq;
/* Is the stateid still not initialised? */
/* Is the stateid not initialised? */
if (!pnfs_layout_is_valid(lo))
return NFS4ERR_DELAY;
return NFS4ERR_NOMATCHING_LAYOUT;
/* Mismatched stateid? */
if (!nfs4_stateid_match_other(&lo->plh_stateid, new))

View File

@ -968,16 +968,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
if (cps.clp)
nfs_put_client(cps.clp);
goto out_invalidcred;
}
}
cps.minorversion = hdr_arg.minorversion;
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
if (cps.clp)
nfs_put_client(cps.clp);
return rpc_system_err;
}
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(nops, rqstp, &xdr_in,
argp, &xdr_out, resp, &cps);

View File

@ -396,7 +396,7 @@ struct kioctx_table;
struct mm_struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
u32 vmacache_seqnum; /* per-thread vmacache */
u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,

View File

@ -1559,7 +1559,7 @@ struct task_struct {
struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
u32 vmacache_seqnum;
u64 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;

View File

@ -97,7 +97,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
VMACACHE_FULL_FLUSHES,
#endif
NR_VM_EVENT_ITEMS
};

View File

@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
}
extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
/* deal with overflows */
if (unlikely(mm->vmacache_seqnum == 0))
vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */

View File

@ -882,13 +882,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
};
}
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
};
}
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules

View File

@ -1532,6 +1532,18 @@ static __latent_entropy struct task_struct *copy_process(
if (!p)
goto fork_out;
/*
* This _must_ happen before we call free_task(), i.e. before we jump
* to any of the bad_fork_* labels. This is to avoid freeing
* p->set_child_tid which is (ab)used as a kthread's data pointer for
* kernel threads (PF_KTHREAD).
*/
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
@ -1693,11 +1705,6 @@ static __latent_entropy struct task_struct *copy_process(
}
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif

View File

@ -104,6 +104,19 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
/*
* osq_lock() unqueue
*
* node->prev = prev osq_wait_next()
* WMB MB
* prev->next = node next->prev = prev // unqueue-C
*
* Here 'node->prev' and 'next->prev' are the same variable and we need
* to ensure these stores happen in-order to avoid corrupting the list.
*/
smp_wmb();
WRITE_ONCE(prev->next, node);
/*

View File

@ -573,6 +573,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
unsigned long flags;
WAKE_Q(wake_q);
/*
* __rwsem_down_write_failed_common(sem)
* rwsem_optimistic_spin(sem)
* osq_unlock(sem->osq)
* ...
* atomic_long_add_return(&sem->count)
*
* - VS -
*
* __up_write()
* if (atomic_long_sub_return_release(&sem->count) < 0)
* rwsem_wake(sem)
* osq_is_locked(&sem->osq)
*
* And __up_write() must observe !osq_is_locked() when it observes the
* atomic_long_add_return() in order to not miss a wakeup.
*
* This boils down to:
*
* [S.rel] X = 1 [RmW] r0 = (Y += 0)
* MB RMB
* [RmW] Y += 1 [L] r1 = X
*
* exists (r0=1 /\ r1=0)
*/
smp_rmb();
/*
* If a spinner is present, it is not necessary to do the wakeup.
* Try to do wakeup only if the trylock succeeds to minimize

View File

@ -1649,6 +1649,22 @@ static inline void __run_timers(struct timer_base *base)
spin_lock_irq(&base->lock);
/*
* timer_base::must_forward_clk must be cleared before running
* timers so that any timer functions that call mod_timer() will
* not try to forward the base. Idle tracking / clock forwarding
* logic is only used with BASE_STD timers.
*
* The must_forward_clk flag is cleared unconditionally also for
* the deferrable base. The deferrable base is not affected by idle
* tracking and never forwarded, so clearing the flag is a NOOP.
*
* The fact that the deferrable base is never forwarded can cause
* large variations in granularity for deferrable timers, but they
* can be deferred for long periods due to idle anyway.
*/
base->must_forward_clk = false;
while (time_after_eq(jiffies, base->clk)) {
levels = collect_expired_timers(base, heads);
@ -1668,19 +1684,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
/*
* must_forward_clk must be cleared before running timers so that any
* timer functions that call mod_timer will not try to forward the
* base. idle trcking / clock forwarding logic is only used with
* BASE_STD timers.
*
* The deferrable base does not do idle tracking at all, so we do
* not forward it. This can result in very large variations in
* granularity for deferrable timers, but they can be deferred for
* long periods due to idle.
*/
base->must_forward_clk = false;
__run_timers(base);
if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));

View File

@ -95,7 +95,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %p\n"
#endif
@ -125,7 +125,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
"def_flags: %#lx(%pGv)\n",
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif

View File

@ -5,44 +5,6 @@
#include <linux/mm.h>
#include <linux/vmacache.h>
/*
* Flush vma caches for threads that share a given mm.
*
* The operation is safe because the caller holds the mmap_sem
* exclusively and other threads accessing the vma cache will
* have mmap_sem held at least for read, so no extra locking
* is required to maintain the vma cache.
*/
void vmacache_flush_all(struct mm_struct *mm)
{
struct task_struct *g, *p;
count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
/*
* Single threaded tasks need not iterate the entire
* list of process. We can avoid the flushing as well
* since the mm's seqnum was increased and don't have
* to worry about other threads' seqnum. Current's
* flush will occur upon the next lookup.
*/
if (atomic_read(&mm->mm_users) == 1)
return;
rcu_read_lock();
for_each_process_thread(g, p) {
/*
* Only flush the vmacache pointers as the
* mm seqnum is already set and curr's will
* be set upon invalidation when the next
* lookup is done.
*/
if (mm == p->mm)
vmacache_flush(p);
}
rcu_read_unlock();
}
/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this

View File

@ -3123,6 +3123,7 @@ static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
*/
clear_bit(PGDAT_CONGESTED, &zone->zone_pgdat->flags);
clear_bit(PGDAT_DIRTY, &zone->zone_pgdat->flags);
clear_bit(PGDAT_WRITEBACK, &zone->zone_pgdat->flags);
return true;
}
@ -3300,7 +3301,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
* If we're getting trouble reclaiming, start doing writepage
* even in laptop mode.
*/
if (sc.priority < DEF_PRIORITY - 2 || !pgdat_reclaimable(pgdat))
if (sc.priority < DEF_PRIORITY - 2)
sc.may_writepage = 1;
/* Call soft limit reclaim before calling shrink_node. */

View File

@ -774,7 +774,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;
strncpy(hid->name, req->name, sizeof(req->name) - 1);
strncpy(hid->name, req->name, sizeof(hid->name));
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);

View File

@ -1764,7 +1764,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
(!prio || itr->app.priority == prio))
((prio == -1) || itr->app.priority == prio))
return itr;
}
@ -1799,7 +1799,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
itr = dcb_app_lookup(app, dev->ifindex, -1);
if (itr)
prio = itr->app.priority;
spin_unlock_bh(&dcb_lock);
@ -1827,7 +1828,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock_bh(&dcb_lock);
/* Search for existing match and replace */
if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
itr = dcb_app_lookup(new, dev->ifindex, -1);
if (itr) {
if (new->priority)
itr->app.priority = new->priority;
else {
@ -1860,7 +1862,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
itr = dcb_app_lookup(app, dev->ifindex, -1);
if (itr)
prio |= 1 << itr->app.priority;
spin_unlock_bh(&dcb_lock);

View File

@ -877,7 +877,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
return ERR_PTR(-EFAULT);
strlcpy(info->name, compat_tmp.name, sizeof(info->name));
memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
info->num_counters = compat_tmp.num_counters;
user += sizeof(compat_tmp);
} else
@ -890,9 +890,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(info, user, sizeof(*info)) != 0)
return ERR_PTR(-EFAULT);
info->name[sizeof(info->name) - 1] = '\0';
user += sizeof(*info);
}
info->name[sizeof(info->name) - 1] = '\0';
size = sizeof(struct xt_counters);
size *= info->num_counters;

View File

@ -9481,6 +9481,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
if (!setup.chandef.chan)
return -EINVAL;
err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
&setup.beacon_rate);
if (err)

View File

@ -348,27 +348,26 @@ static struct avc_xperms_decision_node
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
GFP_ATOMIC | __GFP_NOMEMALLOC);
xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
GFP_ATOMIC | __GFP_NOMEMALLOC);
GFP_NOWAIT);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
GFP_ATOMIC | __GFP_NOMEMALLOC);
GFP_NOWAIT);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
GFP_ATOMIC | __GFP_NOMEMALLOC);
GFP_NOWAIT);
if (!xpd->dontaudit)
goto error;
}
@ -396,8 +395,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
{
struct avc_xperms_node *xp_node;
xp_node = kmem_cache_zalloc(avc_xperms_cachep,
GFP_ATOMIC|__GFP_NOMEMALLOC);
xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@ -550,7 +548,7 @@ static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
if (!node)
goto out;

View File

@ -4025,7 +4025,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
list_for_each_codec(codec, bus) {
/* FIXME: maybe a better way needed for forced reset */
cancel_delayed_work_sync(&codec->jackpoll_work);
if (current_work() != &codec->jackpoll_work.work)
cancel_delayed_work_sync(&codec->jackpoll_work);
#ifdef CONFIG_PM
if (hda_codec_is_power_on(codec)) {
hda_call_codec_suspend(codec);

View File

@ -22,7 +22,9 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 1024
#endif
extern const char *input_name;
extern bool perf_host, perf_guest;