slub: make ->cpu_partial unsigned int

commit e5d9998f3e upstream.

	/*
	 * cpu_partial determined the maximum number of objects
	 * kept in the per cpu partial lists of a processor.
	 */

Can't be negative.

Link: http://lkml.kernel.org/r/20180305200730.15812-15-adobriyan@gmail.com
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: zhong jiang <zhongjiang@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Alexey Dobriyan 2018-04-05 16:21:10 -07:00 committed by Greg Kroah-Hartman
parent 06fc0ef3ef
commit de2aac8ee0
2 changed files with 5 additions and 4 deletions

View File

@ -67,7 +67,8 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int cpu_partial; /* Number of per cpu partial objects to keep around */
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */

View File

@ -1661,7 +1661,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
{
struct page *page, *page2;
void *object = NULL;
int available = 0;
unsigned int available = 0;
int objects;
/*
@ -4674,10 +4674,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
unsigned long objects;
unsigned int objects;
int err;
err = kstrtoul(buf, 10, &objects);
err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))