netns: protect netns ID lookups with RCU

commit 2dce224f469f060b9998a5a869151ef83c08ce77 upstream.

__peernet2id() can be protected by RCU as it only calls idr_for_each(),
which is RCU-safe, and never modifies the nsid table.

rtnl_net_dumpid() can also do lockless lookups. It does two nested
idr_for_each() calls on nsid tables (one direct call and one indirect
call because of rtnl_net_dumpid_one() calling __peernet2id()). The
netnsid tables are never updated. Therefore it is safe to not take the
nsid_lock and run within an RCU-critical section instead.

Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com>
This commit is contained in:
Guillaume Nault 2020-01-13 22:39:22 +01:00 committed by Greg Kroah-Hartman
parent bd1cd32caa
commit 252fad3d02

View File

@ -211,9 +211,9 @@ static int net_eq_idr(int id, void *net, void *peer)
return 0; return 0;
} }
/* Should be called with nsid_lock held. If a new id is assigned, the bool alloc /* Must be called from RCU-critical section or with nsid_lock held. If
* is set to true, thus the caller knows that the new id must be notified via * a new id is assigned, the bool alloc is set to true, thus the
* rtnl. * caller knows that the new id must be notified via rtnl.
*/ */
static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc) static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
{ {
@ -237,7 +237,7 @@ static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
return NETNSA_NSID_NOT_ASSIGNED; return NETNSA_NSID_NOT_ASSIGNED;
} }
/* should be called with nsid_lock held */ /* Must be called from RCU-critical section or with nsid_lock held */
static int __peernet2id(struct net *net, struct net *peer) static int __peernet2id(struct net *net, struct net *peer)
{ {
bool no = false; bool no = false;
@ -281,9 +281,10 @@ int peernet2id(struct net *net, struct net *peer)
{ {
int id; int id;
spin_lock_bh(&net->nsid_lock); rcu_read_lock();
id = __peernet2id(net, peer); id = __peernet2id(net, peer);
spin_unlock_bh(&net->nsid_lock); rcu_read_unlock();
return id; return id;
} }
EXPORT_SYMBOL(peernet2id); EXPORT_SYMBOL(peernet2id);
@ -962,6 +963,7 @@ struct rtnl_net_dump_cb {
int s_idx; int s_idx;
}; };
/* Runs in RCU-critical section. */
static int rtnl_net_dumpid_one(int id, void *peer, void *data) static int rtnl_net_dumpid_one(int id, void *peer, void *data)
{ {
struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
@ -1046,19 +1048,9 @@ static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
goto end; goto end;
} }
spin_lock_bh(&net_cb.tgt_net->nsid_lock); rcu_read_lock();
if (net_cb.fillargs.add_ref &&
!net_eq(net_cb.ref_net, net_cb.tgt_net) &&
!spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
err = -EAGAIN;
goto end;
}
idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
if (net_cb.fillargs.add_ref && rcu_read_unlock();
!net_eq(net_cb.ref_net, net_cb.tgt_net))
spin_unlock_bh(&net_cb.ref_net->nsid_lock);
spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
cb->args[0] = net_cb.idx; cb->args[0] = net_cb.idx;
end: end: