slub: free_list() cleanup

free_list looked a bit screwy so here is an attempt to clean it up.

free_list is is only used for freeing partial lists. We do not need to return a
parameter if we decrement nr_partial within the function which allows a
simplification of the whole thing.

The current version modifies nr_partial outside of the list_lock which is
technically not correct. It was only ok because we should be the only user of
this slab cache at this point.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
Christoph Lameter 2008-04-23 12:36:52 -07:00 committed by Pekka Enberg
parent d629d81957
commit 599870b175
1 changed files with 7 additions and 11 deletions

View File

@ -2372,25 +2372,21 @@ const char *kmem_cache_name(struct kmem_cache *s)
EXPORT_SYMBOL(kmem_cache_name);
/*
* Attempt to free all slabs on a node. Return the number of slabs we
* were unable to free.
* Attempt to free all partial slabs on a node.
*/
static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
struct list_head *list)
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
int slabs_inuse = 0;
unsigned long flags;
struct page *page, *h;
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry_safe(page, h, list, lru)
list_for_each_entry_safe(page, h, &n->partial, lru)
if (!page->inuse) {
list_del(&page->lru);
discard_slab(s, page);
} else
slabs_inuse++;
n->nr_partial--;
}
spin_unlock_irqrestore(&n->list_lock, flags);
return slabs_inuse;
}
/*
@ -2407,8 +2403,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
n->nr_partial -= free_list(s, n, &n->partial);
if (slabs_node(s, node))
free_partial(s, n);
if (n->nr_partial || slabs_node(s, node))
return 1;
}
free_kmem_cache_nodes(s);