cxgbe(4): Stop work request queues in a reliable manner.

Clear the EQ_HW_ALLOCATED flag with the wrq lock held and discard all
work requests, pending or new, when it's not set.

Sponsored by:	Chelsio Communications

(cherry picked from commit 0a9d1da6e6cede5e9c0ff63240d724049ad72b5b)
This commit is contained in:
Navdeep Parhar 2024-07-31 12:27:18 -07:00
parent 4016225138
commit 888858a7bf
3 changed files with 37 additions and 3 deletions

View file

@ -1561,7 +1561,10 @@ t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
struct sge_wrq *wrq = wr->wrq;
TXQ_LOCK(wrq);
t4_wrq_tx_locked(sc, wrq, wr);
if (__predict_true(wrq->eq.flags & EQ_HW_ALLOCATED))
t4_wrq_tx_locked(sc, wrq, wr);
else
free(wr, M_CXGBE);
TXQ_UNLOCK(wrq);
}

View file

@ -2060,7 +2060,9 @@ stop_lld(struct adapter *sc)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, k, ofld_txq) {
TXQ_LOCK(&ofld_txq->wrq);
ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
TXQ_UNLOCK(&ofld_txq->wrq);
}
#endif
for_each_rxq(vi, k, rxq) {
@ -2078,7 +2080,9 @@ stop_lld(struct adapter *sc)
if (sc->flags & FULL_INIT_DONE) {
/* Control queue */
wrq = &sc->sge.ctrlq[i];
TXQ_LOCK(wrq);
wrq->eq.flags &= ~EQ_HW_ALLOCATED;
TXQ_UNLOCK(wrq);
quiesce_wrq(wrq);
}
}
@ -7049,8 +7053,22 @@ quiesce_txq(struct sge_txq *txq)
static void
quiesce_wrq(struct sge_wrq *wrq)
{
struct wrqe *wr;
/* XXXTX */
TXQ_LOCK(wrq);
while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) {
STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
#ifdef INVARIANTS
wrq->nwr_pending--;
wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE);
#endif
free(wr, M_CXGBE);
}
MPASS(wrq->nwr_pending == 0);
MPASS(wrq->ndesc_needed == 0);
wrq->nwr_pending = 0;
wrq->ndesc_needed = 0;
TXQ_UNLOCK(wrq);
}
static void

View file

@ -2921,6 +2921,10 @@ start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
EQ_LOCK(eq);
if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) {
EQ_UNLOCK(eq);
return (NULL);
}
if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
drain_wrq_wr_list(sc, wrq);
@ -3016,7 +3020,10 @@ commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
F_FW_WR_EQUEQ);
}
ring_eq_db(wrq->adapter, eq, ndesc);
if (__predict_true(eq->flags & EQ_HW_ALLOCATED))
ring_eq_db(wrq->adapter, eq, ndesc);
else
IDXINCR(eq->dbidx, ndesc, eq->sidx);
} else {
MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
next->pidx = pidx;
@ -3852,6 +3859,8 @@ alloc_ctrlq(struct adapter *sc, int idx)
if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) {
MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
MPASS(ctrlq->nwr_pending == 0);
MPASS(ctrlq->ndesc_needed == 0);
rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
if (rc != 0) {
@ -4556,6 +4565,7 @@ free_wrq(struct adapter *sc, struct sge_wrq *wrq)
{
free_eq(sc, &wrq->eq);
MPASS(wrq->nwr_pending == 0);
MPASS(wrq->ndesc_needed == 0);
MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
MPASS(STAILQ_EMPTY(&wrq->wr_list));
bzero(wrq, sizeof(*wrq));
@ -4850,6 +4860,9 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
}
if (!(eq->flags & EQ_HW_ALLOCATED)) {
MPASS(eq->flags & EQ_SW_ALLOCATED);
MPASS(ofld_txq->wrq.nwr_pending == 0);
MPASS(ofld_txq->wrq.ndesc_needed == 0);
rc = alloc_eq_hwq(sc, vi, eq);
if (rc != 0) {
CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,