XArray: Mark xa_insert and xa_reserve as must_check

If the user doesn't care about the return value from xa_insert(), then
they should be using xa_store() instead.  The point of xa_reserve() is
to get the return value early before taking another lock, so this should
also be __must_check.

Signed-off-by: Matthew Wilcox <willy@infradead.org>
This commit is contained in:
Matthew Wilcox 2019-02-08 14:02:45 -05:00
parent 2fa044e51a
commit f818b82b80
2 changed files with 17 additions and 16 deletions

View File

@ -497,12 +497,13 @@ void *__xa_erase(struct xarray *, unsigned long index);
void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
int __must_check __xa_insert(struct xarray *, unsigned long index,
void *entry, gfp_t);
int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
struct xa_limit, gfp_t);
int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
struct xa_limit, u32 *next, gfp_t);
int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
int __must_check __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@ -704,8 +705,8 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
static inline int __must_check xa_insert(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
@ -733,8 +734,8 @@ static inline int xa_insert(struct xarray *xa, unsigned long index,
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
static inline int __must_check xa_insert_bh(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
@ -762,8 +763,8 @@ static inline int xa_insert_bh(struct xarray *xa, unsigned long index,
* Return: 0 if the store succeeded. -EBUSY if another entry was present.
* -ENOMEM if memory could not be allocated.
*/
static inline int xa_insert_irq(struct xarray *xa, unsigned long index,
void *entry, gfp_t gfp)
static inline int __must_check xa_insert_irq(struct xarray *xa,
unsigned long index, void *entry, gfp_t gfp)
{
int err;
@ -978,7 +979,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
* May sleep if the @gfp flags permit.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
static inline __must_check
int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;
@ -1002,7 +1003,7 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
* disabling softirqs.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
static inline __must_check
int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;
@ -1026,7 +1027,7 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
* disabling interrupts.
* Return: 0 if the reservation succeeded or -ENOMEM if it failed.
*/
static inline
static inline __must_check
int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
{
int ret;

View File

@ -364,21 +364,21 @@ static noinline void check_reserve(struct xarray *xa)
/* An array with a reserved entry is not empty */
XA_BUG_ON(xa, !xa_empty(xa));
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_empty(xa));
XA_BUG_ON(xa, xa_load(xa, 12345678));
xa_release(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* Releasing a used entry does nothing */
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
xa_erase_index(xa, 12345678);
XA_BUG_ON(xa, !xa_empty(xa));
/* cmpxchg sees a reserved entry as NULL */
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
GFP_NOWAIT) != NULL);
xa_release(xa, 12345678);
@ -386,7 +386,7 @@ static noinline void check_reserve(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
/* But xa_insert does not */
xa_reserve(xa, 12345678, GFP_KERNEL);
XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0);
XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) !=
-EBUSY);
XA_BUG_ON(xa, xa_empty(xa));
@ -395,7 +395,7 @@ static noinline void check_reserve(struct xarray *xa)
/* Can iterate through a reserved entry */
xa_store_index(xa, 5, GFP_KERNEL);
xa_reserve(xa, 6, GFP_KERNEL);
XA_BUG_ON(xa, xa_reserve(xa, 6, GFP_KERNEL) != 0);
xa_store_index(xa, 7, GFP_KERNEL);
xa_for_each(xa, index, entry) {