gve: Allocate qpl per ring at ring allocation time

Every tx and rx ring has its own queue-page-list (QPL) that serves as
the bounce buffer. Previously we were allocating QPLs for all queues
before the queues themselves were allocated and later associating a QPL
with a queue. This is avoidable complexity: it is much more natural for
each queue to allocate and free its own QPL.

Signed-off-by: Vee Agarwal <veethebee@google.com>

Reviewed by:	markj
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D49426

(cherry picked from commit f8ed8382daf4b9a97056b1dba4fe4e5cb4f7485c)
This commit is contained in:
Vee Agarwal 2025-04-04 22:53:31 +00:00 committed by Mark Johnston
parent 2a88aad628
commit 890309a67b
7 changed files with 115 additions and 133 deletions

View file

@ -542,7 +542,6 @@ struct gve_priv {
struct gve_irq_db *irq_db_indices;
enum gve_queue_format queue_format;
struct gve_queue_page_list *qpls;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
uint32_t num_queues;
@ -629,8 +628,9 @@ void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
/* QPL (Queue Page List) functions defined in gve_qpl.c */
int gve_alloc_qpls(struct gve_priv *priv);
void gve_free_qpls(struct gve_priv *priv);
struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
int npages, bool single_kva);
void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
int gve_register_qpls(struct gve_priv *priv);
int gve_unregister_qpls(struct gve_priv *priv);
void gve_mextadd_free(struct mbuf *mbuf);

View file

@ -482,8 +482,6 @@ gve_free_rings(struct gve_priv *priv)
gve_free_irqs(priv);
gve_free_tx_rings(priv);
gve_free_rx_rings(priv);
if (gve_is_qpl(priv))
gve_free_qpls(priv);
}
static int
@ -491,12 +489,6 @@ gve_alloc_rings(struct gve_priv *priv)
{
int err;
if (gve_is_qpl(priv)) {
err = gve_alloc_qpls(priv);
if (err != 0)
goto abort;
}
err = gve_alloc_rx_rings(priv);
if (err != 0)
goto abort;

View file

@ -36,28 +36,9 @@
static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
static uint32_t
gve_num_tx_qpls(struct gve_priv *priv)
void
gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl)
{
if (!gve_is_qpl(priv))
return (0);
return (priv->tx_cfg.max_queues);
}
static uint32_t
gve_num_rx_qpls(struct gve_priv *priv)
{
if (!gve_is_qpl(priv))
return (0);
return (priv->rx_cfg.max_queues);
}
static void
gve_free_qpl(struct gve_priv *priv, uint32_t id)
{
struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
for (i = 0; i < qpl->num_dmas; i++) {
@ -92,12 +73,14 @@ gve_free_qpl(struct gve_priv *priv, uint32_t id)
if (qpl->dmas != NULL)
free(qpl->dmas, M_GVE_QPL);
free(qpl, M_GVE_QPL);
}
static int
struct gve_queue_page_list *
gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
{
struct gve_queue_page_list *qpl = &priv->qpls[id];
struct gve_queue_page_list *qpl;
int err;
int i;
@ -105,9 +88,12 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
(uintmax_t)npages + priv->num_registered_pages,
(uintmax_t)priv->max_registered_pages);
return (EINVAL);
return (NULL);
}
qpl = malloc(sizeof(struct gve_queue_page_list), M_GVE_QPL,
M_WAITOK | M_ZERO);
qpl->id = id;
qpl->num_pages = 0;
qpl->num_dmas = 0;
@ -163,126 +149,90 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
priv->num_registered_pages++;
}
return (0);
return (qpl);
abort:
gve_free_qpl(priv, id);
return (err);
}
void
gve_free_qpls(struct gve_priv *priv)
{
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
if (num_qpls == 0)
return;
if (priv->qpls != NULL) {
for (i = 0; i < num_qpls; i++)
gve_free_qpl(priv, i);
free(priv->qpls, M_GVE_QPL);
priv->qpls = NULL;
}
}
int gve_alloc_qpls(struct gve_priv *priv)
{
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int num_pages;
int err;
int i;
if (num_qpls == 0)
return (0);
priv->qpls = malloc(num_qpls * sizeof(*priv->qpls), M_GVE_QPL,
M_WAITOK | M_ZERO);
num_pages = gve_is_gqi(priv) ?
priv->tx_desc_cnt / GVE_QPL_DIVISOR :
GVE_TX_NUM_QPL_PAGES_DQO;
for (i = 0; i < gve_num_tx_qpls(priv); i++) {
err = gve_alloc_qpl(priv, i, num_pages,
/*single_kva=*/true);
if (err != 0)
goto abort;
}
num_pages = gve_is_gqi(priv) ? priv->rx_desc_cnt : GVE_RX_NUM_QPL_PAGES_DQO;
for (; i < num_qpls; i++) {
err = gve_alloc_qpl(priv, i, num_pages, /*single_kva=*/false);
if (err != 0)
goto abort;
}
return (0);
abort:
gve_free_qpls(priv);
return (err);
}
static int
gve_unregister_n_qpls(struct gve_priv *priv, int n)
{
int err;
int i;
for (i = 0; i < n; i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
if (err != 0) {
device_printf(priv->dev,
"Failed to unregister qpl %d, err: %d\n",
priv->qpls[i].id, err);
}
}
if (err != 0)
return (err);
return (0);
gve_free_qpl(priv, qpl);
return (NULL);
}
int
gve_register_qpls(struct gve_priv *priv)
{
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
struct gve_ring_com *com;
struct gve_tx_ring *tx;
struct gve_rx_ring *rx;
int err;
int i;
if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
return (0);
for (i = 0; i < num_qpls; i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
/* Register TX qpls */
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
tx = &priv->tx[i];
com = &tx->com;
err = gve_adminq_register_page_list(priv, com->qpl);
if (err != 0) {
device_printf(priv->dev,
"Failed to register qpl %d, err: %d\n",
priv->qpls[i].id, err);
goto abort;
com->qpl->id, err);
/* Caller schedules a reset when this fails */
return (err);
}
}
/* Register RX qpls */
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
rx = &priv->rx[i];
com = &rx->com;
err = gve_adminq_register_page_list(priv, com->qpl);
if (err != 0) {
device_printf(priv->dev,
"Failed to register qpl %d, err: %d\n",
com->qpl->id, err);
/* Caller schedules a reset when this fails */
return (err);
}
}
gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
return (0);
abort:
gve_unregister_n_qpls(priv, i);
return (err);
}
int
gve_unregister_qpls(struct gve_priv *priv)
{
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int err;
int i;
struct gve_ring_com *com;
struct gve_tx_ring *tx;
struct gve_rx_ring *rx;
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
return (0);
err = gve_unregister_n_qpls(priv, num_qpls);
for (i = 0; i < priv->tx_cfg.num_queues; i++) {
tx = &priv->tx[i];
com = &tx->com;
err = gve_adminq_unregister_page_list(priv, com->qpl->id);
if (err != 0) {
device_printf(priv->dev,
"Failed to unregister qpl %d, err: %d\n",
com->qpl->id, err);
}
}
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
rx = &priv->rx[i];
com = &rx->com;
err = gve_adminq_unregister_page_list(priv, com->qpl->id);
if (err != 0) {
device_printf(priv->dev,
"Failed to unregister qpl %d, err: %d\n",
com->qpl->id, err);
}
}
if (err != 0)
return (err);

View file

@ -36,6 +36,7 @@ static void
gve_rx_free_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_rx_ring *rx = &priv->rx[i];
struct gve_ring_com *com = &rx->com;
if (rx->page_info != NULL) {
free(rx->page_info, M_GVE);
@ -51,6 +52,11 @@ gve_rx_free_ring_gqi(struct gve_priv *priv, int i)
gve_dma_free_coherent(&rx->desc_ring_mem);
rx->desc_ring = NULL;
}
if (com->qpl != NULL) {
gve_free_qpl(priv, com->qpl);
com->qpl = NULL;
}
}
static void
@ -113,10 +119,13 @@ gve_rx_alloc_ring_gqi(struct gve_priv *priv, int i)
rx->mask = priv->rx_pages_per_qpl - 1;
rx->desc_ring = rx->desc_ring_mem.cpu_addr;
com->qpl = &priv->qpls[priv->tx_cfg.max_queues + i];
com->qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues,
priv->rx_desc_cnt, /*single_kva=*/false);
if (com->qpl == NULL) {
device_printf(priv->dev, "No QPL left for rx ring %d", i);
return (ENOMEM);
device_printf(priv->dev,
"Failed to alloc QPL for rx ring %d", i);
err = ENOMEM;
goto abort;
}
rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info),

View file

@ -58,6 +58,7 @@ void
gve_rx_free_ring_dqo(struct gve_priv *priv, int i)
{
struct gve_rx_ring *rx = &priv->rx[i];
struct gve_ring_com *com = &rx->com;
int j;
if (rx->dqo.compl_ring != NULL) {
@ -86,6 +87,11 @@ gve_rx_free_ring_dqo(struct gve_priv *priv, int i)
if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag)
bus_dma_tag_destroy(rx->dqo.buf_dmatag);
if (com->qpl != NULL) {
gve_free_qpl(priv, com->qpl);
com->qpl = NULL;
}
}
int
@ -123,10 +129,13 @@ gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i)
M_GVE, M_WAITOK | M_ZERO);
if (gve_is_qpl(priv)) {
rx->com.qpl = &priv->qpls[priv->tx_cfg.max_queues + i];
rx->com.qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues,
GVE_RX_NUM_QPL_PAGES_DQO, /*single_kva=*/false);
if (rx->com.qpl == NULL) {
device_printf(priv->dev, "No QPL left for rx ring %d", i);
return (ENOMEM);
device_printf(priv->dev,
"Failed to alloc QPL for rx ring %d", i);
err = ENOMEM;
goto abort;
}
return (0);
}

View file

@ -52,6 +52,7 @@ static void
gve_tx_free_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
if (tx->desc_ring != NULL) {
gve_dma_free_coherent(&tx->desc_ring_mem);
@ -62,6 +63,11 @@ gve_tx_free_ring_gqi(struct gve_priv *priv, int i)
free(tx->info, M_GVE);
tx->info = NULL;
}
if (com->qpl != NULL) {
gve_free_qpl(priv, com->qpl);
com->qpl = NULL;
}
}
static void
@ -109,9 +115,11 @@ gve_tx_alloc_ring_gqi(struct gve_priv *priv, int i)
}
tx->desc_ring = tx->desc_ring_mem.cpu_addr;
com->qpl = &priv->qpls[i];
com->qpl = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR,
/*single_kva=*/true);
if (com->qpl == NULL) {
device_printf(priv->dev, "No QPL left for tx ring %d\n", i);
device_printf(priv->dev,
"Failed to alloc QPL for tx ring %d\n", i);
err = ENOMEM;
goto abort;
}

View file

@ -75,6 +75,7 @@ void
gve_tx_free_ring_dqo(struct gve_priv *priv, int i)
{
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
int j;
if (tx->dqo.desc_ring != NULL) {
@ -109,6 +110,11 @@ gve_tx_free_ring_dqo(struct gve_priv *priv, int i)
free(tx->dqo.qpl_bufs, M_GVE);
tx->dqo.qpl_bufs = NULL;
}
if (com->qpl != NULL) {
gve_free_qpl(priv, com->qpl);
com->qpl = NULL;
}
}
static int
@ -210,7 +216,15 @@ gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i)
if (gve_is_qpl(priv)) {
int qpl_buf_cnt;
tx->com.qpl = &priv->qpls[i];
tx->com.qpl = gve_alloc_qpl(priv, i, GVE_TX_NUM_QPL_PAGES_DQO,
/*single_kva*/false);
if (tx->com.qpl == NULL) {
device_printf(priv->dev,
"Failed to alloc QPL for tx ring %d", i);
err = ENOMEM;
goto abort;
}
qpl_buf_cnt = GVE_TX_BUFS_PER_PAGE_DQO *
tx->com.qpl->num_pages;