@@ -1819,6 +1819,78 @@ static void subflow_state_change(struct sock *sk)
18191819 }
18201820}
18211821
1822+ void mptcp_subflow_queue_clean (struct sock * listener_sk , struct sock * listener_ssk )
1823+ {
1824+ struct request_sock_queue * queue = & inet_csk (listener_ssk )-> icsk_accept_queue ;
1825+ struct mptcp_sock * msk , * next , * head = NULL ;
1826+ struct request_sock * req ;
1827+
1828+ /* build a list of all unaccepted mptcp sockets */
1829+ spin_lock_bh (& queue -> rskq_lock );
1830+ for (req = queue -> rskq_accept_head ; req ; req = req -> dl_next ) {
1831+ struct mptcp_subflow_context * subflow ;
1832+ struct sock * ssk = req -> sk ;
1833+
1834+ if (!sk_is_mptcp (ssk ))
1835+ continue ;
1836+
1837+ subflow = mptcp_subflow_ctx (ssk );
1838+ if (!subflow || !subflow -> conn )
1839+ continue ;
1840+
1841+ /* skip if already in list */
1842+ msk = mptcp_sk (subflow -> conn );
1843+ if (msk -> dl_next || msk == head )
1844+ continue ;
1845+
1846+ sock_hold (subflow -> conn );
1847+ msk -> dl_next = head ;
1848+ head = msk ;
1849+ }
1850+ spin_unlock_bh (& queue -> rskq_lock );
1851+ if (!head )
1852+ return ;
1853+
1854+ /* can't acquire the msk socket lock under the subflow one,
1855+ * or will cause ABBA deadlock
1856+ */
1857+ release_sock (listener_ssk );
1858+
1859+ for (msk = head ; msk ; msk = next ) {
1860+ struct sock * sk = (struct sock * )msk ;
1861+
1862+ lock_sock_nested (sk , SINGLE_DEPTH_NESTING );
1863+ next = msk -> dl_next ;
1864+ msk -> dl_next = NULL ;
1865+
1866+ /* prevent the stack from later re-schedule the worker for
1867+ * this socket
1868+ */
1869+ inet_sk_state_store (sk , TCP_CLOSE );
1870+ release_sock (sk );
1871+
1872+ /* lockdep will report a false positive ABBA deadlock
1873+ * between cancel_work_sync and the listener socket.
1874+ * The involved locks belong to different sockets WRT
1875+ * the existing AB chain.
1876+ * Using a per socket key is problematic as key
1877+ * deregistration requires process context and must be
1878+ * performed at socket disposal time, in atomic
1879+ * context.
1880+ * Just tell lockdep to consider the listener socket
1881+ * released here.
1882+ */
1883+ mutex_release (& listener_sk -> sk_lock .dep_map , _RET_IP_ );
1884+ mptcp_cancel_work (sk );
1885+ mutex_acquire (& listener_sk -> sk_lock .dep_map , 0 , 0 , _RET_IP_ );
1886+
1887+ sock_put (sk );
1888+ }
1889+
1890+ /* we are still under the listener msk socket lock */
1891+ lock_sock_nested (listener_ssk , SINGLE_DEPTH_NESTING );
1892+ }
1893+
18221894static int subflow_ulp_init (struct sock * sk )
18231895{
18241896 struct inet_connection_sock * icsk = inet_csk (sk );
0 commit comments