Merge branch 'core' into next

* core: (8 commits)
  Revert "jffs2: Fix possible null-pointer dereferences in jffs2_add_frag_to_fragtree()"
  of: of_reserved_mem: Ensure cma reserved region not cross the low/high memory
  mm: Re-export ioremap_page_range
  nand: raw: workaround for EDO high speed mode
  cgroup/bfq: revert bfq.weight symlink change
  ...
This commit is contained in:
Dong Aisheng 2019-12-02 18:02:05 +08:00
commit 8c64fabeb0
8 changed files with 112 additions and 9 deletions

View File

@ -0,0 +1,13 @@
What: /sys/devices/.../consumers
Date: October 2018
Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
Description:
Read-only attribute that lists the current "consumers" of
a specific device.
What: /sys/devices/.../suppliers
Date: October 2018
Contact: Ioana Ciornei <ioana.ciornei@nxp.com>
Description:
Read-only attribute that lists the current "suppliers" of
a specific device.

View File

@ -1320,6 +1320,34 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(online);
static ssize_t suppliers_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct device_link *link;
size_t count = 0;
list_for_each_entry(link, &dev->links.suppliers, c_node)
count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
dev_name(link->supplier));
return count;
}
static DEVICE_ATTR_RO(suppliers);
static ssize_t consumers_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct device_link *link;
size_t count = 0;
list_for_each_entry(link, &dev->links.consumers, s_node)
count += scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
dev_name(link->consumer));
return count;
}
static DEVICE_ATTR_RO(consumers);
int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
@ -1491,8 +1519,20 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
error = device_create_file(dev, &dev_attr_suppliers);
if (error)
goto err_remove_online;
error = device_create_file(dev, &dev_attr_consumers);
if (error)
goto err_remove_suppliers;
return 0;
err_remove_suppliers:
device_remove_file(dev, &dev_attr_suppliers);
err_remove_online:
device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@ -1510,6 +1550,8 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
device_remove_file(dev, &dev_attr_consumers);
device_remove_file(dev, &dev_attr_suppliers);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);

View File

@ -930,7 +930,8 @@ static int nand_init_data_interface(struct nand_chip *chip)
modes = GENMASK(chip->onfi_timing_mode_default, 0);
}
for (mode = fls(modes) - 1; mode >= 0; mode--) {
/* for (mode = fls(modes) - 1; mode >= 0; mode--) { */
for (mode = 1; mode >= 0; mode--) {
ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode);
if (ret)
continue;

View File

@ -26,11 +26,12 @@
static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
static int reserved_mem_count;
static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
phys_addr_t *res_base)
static int __init early_init_dt_alloc_reserved_memory_arch(unsigned long node,
phys_addr_t size, phys_addr_t align, phys_addr_t start,
phys_addr_t end, bool nomap, phys_addr_t *res_base)
{
phys_addr_t base;
phys_addr_t highmem_start = __pa(high_memory - 1) + 1;
end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
align = !align ? SMP_CACHE_BYTES : align;
@ -38,6 +39,24 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
if (!base)
return -ENOMEM;
/*
* Sanity check for the cma reserved region:If the reserved region
* crosses the low/high memory boundary, try to fix it up and then
* fall back to allocate the cma region from the low mememory space.
*/
if (IS_ENABLED(CONFIG_CMA)
&& of_flat_dt_is_compatible(node, "shared-dma-pool")
&& of_get_flat_dt_prop(node, "reusable", NULL) && !nomap) {
if (base < highmem_start && (base + size) > highmem_start) {
base = memblock_find_in_range(start, highmem_start,
size, align);
if (!base)
return -ENOMEM;
}
}
*res_base = base;
if (nomap)
return memblock_remove(base, size);
@ -131,8 +150,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
end = start + dt_mem_next_cell(dt_root_size_cells,
&prop);
ret = early_init_dt_alloc_reserved_memory_arch(size,
align, start, end, nomap, &base);
ret = early_init_dt_alloc_reserved_memory_arch(node,
size, align, start, end, nomap, &base);
if (ret == 0) {
pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base,
@ -143,8 +162,8 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
}
} else {
ret = early_init_dt_alloc_reserved_memory_arch(size, align,
0, 0, nomap, &base);
ret = early_init_dt_alloc_reserved_memory_arch(node,
size, align, 0, 0, nomap, &base);
if (ret == 0)
pr_debug("allocated memory for '%s' node: base %pa, size %ld MiB\n",
uname, &base, (unsigned long)size / SZ_1M);

View File

@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
lastend = this->ofs + this->size;
} else {
dbg_fragtree2("lookup gave no frag\n");
return -EINVAL;
lastend = 0;
}
/* See if we ran off the end of the fragtree */

View File

@ -1022,6 +1022,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
void consume_skb(struct sk_buff *skb);
void __consume_stateless_skb(struct sk_buff *skb);
void skb_recycle(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;

View File

@ -231,3 +231,4 @@ int ioremap_page_range(unsigned long addr,
return err;
}
EXPORT_SYMBOL_GPL(ioremap_page_range);

View File

@ -919,6 +919,32 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
}
EXPORT_SYMBOL(napi_consume_skb);
/**
* skb_recycle - clean up an skb for reuse
* @skb: buffer
*
* Recycles the skb to be reused as a receive buffer. This
* function does any necessary reference count dropping, and
* cleans up the skbuff as if it just came from __alloc_skb().
*/
void skb_recycle(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
u8 head_frag = skb->head_frag;
skb_release_head_state(skb);
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;
skb->head_frag = head_frag;
skb_reset_tail_pointer(skb);
}
EXPORT_SYMBOL(skb_recycle);
/* Make sure a field is enclosed inside headers_start/headers_end section */
#define CHECK_SKB_FIELD(field) \
BUILD_BUG_ON(offsetof(struct sk_buff, field) < \