slub: Fallback to minimal order during slab page allocation

If any higher order allocation fails then fall back the smallest order
necessary to contain at least one object. This enables fallback for all
allocations to order 0 pages. The fallback will waste more memory (objects
will not fit neatly) and the fallback slabs will be not as efficient as larger
slabs since they contain less objects.

Note that SLAB also depends on order 1 allocations for some slabs that waste
too much memory if forced into PAGE_SIZE'd page. SLUB now can now deal with
failing order 1 allocs which SLAB cannot do.

Add a new field min that will contain the objects for the smallest possible order
for a slab cache.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
Christoph Lameter 2008-04-14 19:11:40 +03:00 committed by Pekka Enberg
parent 205ab99dd1
commit 65c3376aac
2 changed files with 30 additions and 11 deletions

View File

@ -29,6 +29,7 @@ enum stat_item {
DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
ORDER_FALLBACK, /* Number of times fallback was necessary */
NR_SLUB_STAT_ITEMS };
struct kmem_cache_cpu {
@ -81,6 +82,7 @@ struct kmem_cache {
/* Allocation and freeing of slabs */
struct kmem_cache_order_objects max;
struct kmem_cache_order_objects min;
gfp_t allocflags; /* gfp flags to use on each alloc */
int refcount; /* Refcount for slab cache destroy */
void (*ctor)(struct kmem_cache *, void *);

View File

@ -1113,28 +1113,43 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
/*
* Slab allocation and freeing
*/
static inline struct page *alloc_slab_page(gfp_t flags, int node,
struct kmem_cache_order_objects oo)
{
int order = oo_order(oo);
if (node == -1)
return alloc_pages(flags, order);
else
return alloc_pages_node(node, flags, order);
}
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page *page;
struct kmem_cache_order_objects oo = s->oo;
int order = oo_order(oo);
int pages = 1 << order;
flags |= s->allocflags;
if (node == -1)
page = alloc_pages(flags, order);
else
page = alloc_pages_node(node, flags, order);
if (!page)
return NULL;
page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
oo);
if (unlikely(!page)) {
oo = s->min;
/*
* Allocation may have failed due to fragmentation.
* Try a lower order alloc if possible
*/
page = alloc_slab_page(flags, node, oo);
if (!page)
return NULL;
stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
}
page->objects = oo_objects(oo);
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
pages);
1 << oo_order(oo));
return page;
}
@ -2347,6 +2362,7 @@ static int calculate_sizes(struct kmem_cache *s)
* Determine the number of objects per slab
*/
s->oo = oo_make(order, size);
s->min = oo_make(get_order(size), size);
if (oo_objects(s->oo) > oo_objects(s->max))
s->max = s->oo;
@ -4163,7 +4179,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
STAT_ATTR(ORDER_FALLBACK, order_fallback);
#endif
static struct attribute *slab_attrs[] = {
@ -4216,6 +4232,7 @@ static struct attribute *slab_attrs[] = {
&deactivate_to_head_attr.attr,
&deactivate_to_tail_attr.attr,
&deactivate_remote_frees_attr.attr,
&order_fallback_attr.attr,
#endif
NULL
};