diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 41abfff6a6a3..2146372adff4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, return af; } +static void sctp_auto_asconf_init(struct sctp_sock *sp) +{ + struct net *net = sock_net(&sp->inet.sk); + + if (net->sctp.default_auto_asconf) { + spin_lock(&net->sctp.addr_wq_lock); + list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); + spin_unlock(&net->sctp.addr_wq_lock); + sp->do_auto_asconf = 1; + } +} + /* Bind a local address either to an endpoint or to an association. */ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) { @@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) return -EADDRINUSE; /* Refresh ephemeral port. */ - if (!bp->port) + if (!bp->port) { bp->port = inet_sk(sk)->inet_num; + sctp_auto_asconf_init(sp); + } /* Add the address to the bind address list. * Use GFP_ATOMIC since BHs will be disabled. @@ -5115,19 +5129,6 @@ static int sctp_init_sock(struct sock *sk) sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); - /* Nothing can fail after this block, otherwise - * sctp_destroy_sock() will be called without addr_wq_lock held - */ - if (net->sctp.default_auto_asconf) { - spin_lock(&sock_net(sk)->sctp.addr_wq_lock); - list_add_tail(&sp->auto_asconf_list, - &net->sctp.auto_asconf_splist); - sp->do_auto_asconf = 1; - spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); - } else { - sp->do_auto_asconf = 0; - } - local_bh_enable(); return 0; @@ -9402,6 +9403,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, return err; } + sctp_auto_asconf_init(newsp); + /* Move any messages in the old socket's receive queue that are for the * peeled off association to the new socket's receive queue. */