import dpdk-19.11.2-1.el8

This commit is contained in:
CentOS Sources 2020-07-28 07:47:37 -04:00 committed by Stepan Oksanichenko
parent 01b2eb7876
commit d7b3dca526
28 changed files with 347 additions and 3180 deletions

View File

@ -1 +1 @@
6e04c3e3a82f91ebe0360b8067df59e2b774924d SOURCES/dpdk-18.11.2.tar.xz
29e715804c5af6afc44ffeb128716628823c89d2 SOURCES/dpdk-19.11.2.tar.xz

2
.gitignore vendored
View File

@ -1 +1 @@
SOURCES/dpdk-18.11.2.tar.xz
SOURCES/dpdk-19.11.2.tar.xz

View File

@ -1,102 +0,0 @@
From 93f21370ca38ae61dc2d938adf569f6668381c32 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:30 +0100
Subject: [PATCH 01/18] net/virtio: add packed virtqueue defines
[ upstream commit 4c3f5822eb21476fbbd807a7c40584c1090695e5 ]
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 4c3f5822eb21476fbbd807a7c40584c1090695e5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_pci.h | 1 +
drivers/net/virtio/virtio_ring.h | 30 ++++++++++++++++++++++++++++++
drivers/net/virtio/virtqueue.h | 6 ++++++
3 files changed, 37 insertions(+)
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index e961a58ca..4c975a531 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -113,6 +113,7 @@ struct virtnet_ctl;
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_RING_PACKED 34
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 9e3c2a015..464449074 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -15,6 +15,10 @@
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/* This flag means the descriptor was made available by the driver */
+#define VRING_DESC_F_AVAIL(b) ((uint16_t)(b) << 7)
+/* This flag means the descriptor was used by the device */
+#define VRING_DESC_F_USED(b) ((uint16_t)(b) << 15)
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
@@ -54,6 +58,32 @@ struct vring_used {
struct vring_used_elem ring[0];
};
+/* For support of packed virtqueues in Virtio 1.1 the format of descriptors
+ * looks like this.
+ */
+struct vring_packed_desc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+};
+
+#define RING_EVENT_FLAGS_ENABLE 0x0
+#define RING_EVENT_FLAGS_DISABLE 0x1
+#define RING_EVENT_FLAGS_DESC 0x2
+struct vring_packed_desc_event {
+ uint16_t desc_event_off_wrap;
+ uint16_t desc_event_flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+ struct vring_packed_desc *desc_packed;
+ struct vring_packed_desc_event *driver_event;
+ struct vring_packed_desc_event *device_event;
+
+};
+
struct vring {
unsigned int num;
struct vring_desc *desc;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2e2abf15b..1525c7d10 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -161,11 +161,17 @@ struct virtio_pmd_ctrl {
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
+ uint16_t next;
};
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
struct vring vq_ring; /**< vring keeping desc, used and avail */
+ struct vring_packed ring_packed; /**< vring keeping descs */
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+ uint16_t event_flags_shadow;
+ uint16_t avail_used_flags;
/**
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
--
2.21.0

View File

@ -1,78 +0,0 @@
From 8093f82b3e52efe012e46c429b7af4e82492f71c Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Tue, 27 Nov 2018 11:54:27 +0100
Subject: [PATCH] net/virtio: allocate vrings on device NUMA node
[ upstream commit 4a5140ab17d29e77eefa47b5cb514238e8e0c132 ]
When a guest is spanned on multiple NUMA nodes and
multiple Virtio devices are spanned onto these nodes,
we expect that their ring memory is allocated in the
right memory node.
Otherwise, vCPUs from node A may be polling Virtio rings
allocated on node B, which would increase QPI bandwidth
and impact performance.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d291..cb2b2e0bf 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -335,8 +335,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
int ret;
+ int numa_node = dev->device->numa_node;
- PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+ PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
+ vtpci_queue_idx, numa_node);
/*
* Read the virtqueue size from the Queue Size field
@@ -372,7 +374,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ numa_node);
if (vq == NULL) {
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
@@ -392,7 +394,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
@@ -418,7 +420,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
@@ -435,7 +437,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
sizeof(vq->sw_ring[0]);
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ RTE_CACHE_LINE_SIZE, numa_node);
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
--
2.20.1

View File

@ -1,141 +0,0 @@
From 652a2e3a1ba0db81ae1814e8c3cb989e9e89c4e0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:31 +0100
Subject: [PATCH 02/18] net/virtio: add packed virtqueue helpers
[ upstream commit e9f4feb7e6225f671b59375aff44b9d576121577 ]
Add helper functions to set/clear and check descriptor flags.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit e9f4feb7e6225f671b59375aff44b9d576121577)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_pci.h | 6 +++
drivers/net/virtio/virtqueue.h | 72 ++++++++++++++++++++++++++++++++-
2 files changed, 76 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 4c975a531..b22b62dad 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -315,6 +315,12 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
return (hw->guest_features & (1ULL << bit)) != 0;
}
+static inline int
+vtpci_packed_queue(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
+}
+
/*
* Function declaration from virtio_pci.c
*/
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 1525c7d10..c32812427 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -251,6 +251,31 @@ struct virtio_tx_region {
__attribute__((__aligned__(16)));
};
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+ uint16_t used, avail, flags;
+
+ flags = desc->flags;
+ used = !!(flags & VRING_DESC_F_USED(1));
+ avail = !!(flags & VRING_DESC_F_AVAIL(1));
+
+ return avail == used && used == vq->used_wrap_counter;
+}
+
+
+static inline void
+vring_desc_init_packed(struct virtqueue *vq, int n)
+{
+ int i;
+ for (i = 0; i < n - 1; i++) {
+ vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_descx[i].next = i + 1;
+ }
+ vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
/* Chain all the descriptors in the ring with an END */
static inline void
vring_desc_init(struct vring_desc *dp, uint16_t n)
@@ -262,13 +287,53 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
dp[i].next = VQ_RING_DESC_CHAIN_END;
}
+/**
+ * Tell the backend not to interrupt us.
+ */
+static inline void
+virtqueue_disable_intr_packed(struct virtqueue *vq)
+{
+ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
+
+ *event_flags = RING_EVENT_FLAGS_DISABLE;
+}
+
+
/**
* Tell the backend not to interrupt us.
*/
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (vtpci_packed_queue(vq->hw))
+ virtqueue_disable_intr_packed(vq);
+ else
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_packed(struct virtqueue *vq)
+{
+ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
+
+
+ if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+ virtio_wmb();
+ vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+ *event_flags = vq->event_flags_shadow;
+ }
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_split(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
}
/**
@@ -277,7 +342,10 @@ virtqueue_disable_intr(struct virtqueue *vq)
static inline void
virtqueue_enable_intr(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ if (vtpci_packed_queue(vq->hw))
+ virtqueue_enable_intr_packed(vq);
+ else
+ virtqueue_enable_intr_split(vq);
}
/**
--
2.21.0

View File

@ -1,175 +0,0 @@
From 4e832cad1879f87a694e2f78b8718f986f7c76e2 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:32 +0100
Subject: [PATCH 03/18] net/virtio: vring init for packed queues
[ upstream commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f ]
Add and initialize descriptor data structures.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 32 ++++++++++++++++++++----------
drivers/net/virtio/virtio_ring.h | 28 ++++++++++++++++++++++----
drivers/net/virtio/virtqueue.h | 2 +-
3 files changed, 46 insertions(+), 16 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d291..ee52e3cdb 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -299,20 +299,22 @@ virtio_init_vring(struct virtqueue *vq)
PMD_INIT_FUNC_TRACE();
- /*
- * Reinitialise since virtio port might have been stopped and restarted
- */
memset(ring_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- vring_desc_init(vr->desc, size);
-
+ if (vtpci_packed_queue(vq->hw)) {
+ vring_init_packed(&vq->ring_packed, ring_mem,
+ VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ } else {
+ vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_split(vr->desc, size);
+ }
/*
* Disable device(host) interrupting guest
*/
@@ -382,11 +384,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
+ vq->event_flags_shadow = 0;
+ if (vtpci_packed_queue(hw)) {
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
+ }
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
@@ -489,7 +496,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
+ vring_desc_init_split(start_dp,
+ RTE_DIM(txr[i].tx_indir));
/* first indirect descriptor is always the tx header */
start_dp->addr = txvq->virtio_net_hdr_mem
@@ -1486,7 +1494,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@@ -1906,7 +1915,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) &&
+ !vtpci_packed_queue(hw)) {
hw->use_inorder_rx = 1;
hw->use_simple_rx = 0;
} else {
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 464449074..1760823c6 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -125,10 +125,18 @@ struct vring {
#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
static inline size_t
-vring_size(unsigned int num, unsigned long align)
+vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
{
size_t size;
+ if (vtpci_packed_queue(hw)) {
+ size = num * sizeof(struct vring_packed_desc);
+ size += sizeof(struct vring_packed_desc_event);
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_packed_desc_event);
+ return size;
+ }
+
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
size = RTE_ALIGN_CEIL(size, align);
@@ -136,10 +144,9 @@ vring_size(unsigned int num, unsigned long align)
(num * sizeof(struct vring_used_elem));
return size;
}
-
static inline void
-vring_init(struct vring *vr, unsigned int num, uint8_t *p,
- unsigned long align)
+vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
@@ -149,6 +156,19 @@ vring_init(struct vring *vr, unsigned int num, uint8_t *p,
RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
}
+static inline void
+vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
+{
+ vr->num = num;
+ vr->desc_packed = (struct vring_packed_desc *)p;
+ vr->driver_event = (struct vring_packed_desc_event *)(p +
+ vr->num * sizeof(struct vring_packed_desc));
+ vr->device_event = (struct vring_packed_desc_event *)
+ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
+ sizeof(struct vring_packed_desc_event)), align);
+}
+
/*
* The following is used with VIRTIO_RING_F_EVENT_IDX.
* Assuming a given event_idx value from the other size, if we have
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index c32812427..d08ef9112 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -278,7 +278,7 @@ vring_desc_init_packed(struct virtqueue *vq, int n)
/* Chain all the descriptors in the ring with an END */
static inline void
-vring_desc_init(struct vring_desc *dp, uint16_t n)
+vring_desc_init_split(struct vring_desc *dp, uint16_t n)
{
uint16_t i;
--
2.21.0

View File

@ -1,41 +0,0 @@
From 2dc70f1db67091cc3a9131d2093da464738b31d8 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:33 +0100
Subject: [PATCH 04/18] net/virtio: dump packed virtqueue data
[ upstream commit 56785a2d6fad987c025278909307db776df59bd9 ]
Add support to dump packed virtqueue data to the
VIRTQUEUE_DUMP() macro.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 56785a2d6fad987c025278909307db776df59bd9)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtqueue.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d08ef9112..e9c35a553 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -434,6 +434,15 @@ virtqueue_notify(struct virtqueue *vq)
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ if (vtpci_packed_queue((vq)->hw)) { \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
+ "VQ: - avail_wrap_counter=%d; used_wrap_counter=%d", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
+ (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \
+ (vq)->used_wrap_counter); \
+ break; \
+ } \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
--
2.21.0

View File

@ -1,448 +0,0 @@
From 97ee69c836bfb08e674fd0f28d1fc7a14f2d4de0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:34 +0100
Subject: [PATCH 05/18] net/virtio: implement Tx path for packed queues
[ upstream commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e ]
This implements the transmit path for devices with
support for packed virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 56 ++++---
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 236 ++++++++++++++++++++++++++++-
drivers/net/virtio/virtqueue.h | 20 ++-
4 files changed, 292 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ee52e3cdb..6023d6f2c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -388,6 +388,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
if (vtpci_packed_queue(hw)) {
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
}
/*
@@ -495,17 +498,26 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
-
- vring_desc_init_split(start_dp,
- RTE_DIM(txr[i].tx_indir));
+ struct vring_packed_desc *start_dp_packed =
+ txr[i].tx_indir_pq;
/* first indirect descriptor is always the tx header */
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
-
- start_dp->len = hw->vtnet_hdr_size;
- start_dp->flags = VRING_DESC_F_NEXT;
+ if (vtpci_packed_queue(hw)) {
+ start_dp_packed->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp_packed->len = hw->vtnet_hdr_size;
+ } else {
+ vring_desc_init_split(start_dp,
+ RTE_DIM(txr[i].tx_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
+ start_dp->flags = VRING_DESC_F_NEXT;
+ }
}
}
@@ -1334,6 +1346,23 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ if (vtpci_packed_queue(hw)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ } else {
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
+ }
+
if (hw->use_simple_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
@@ -1354,15 +1383,6 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_inorder_tx) {
- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
- } else {
- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts;
- }
}
/* Only support 1:1 queue/interrupt mapping so far.
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index e0f80e5a4..05d355180 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433e..ab74917a8 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -88,6 +88,23 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
dp->next = VQ_RING_DESC_CHAIN_END;
}
+static void
+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
+{
+ struct vq_desc_extra *dxp;
+
+ dxp = &vq->vq_descx[id];
+ vq->vq_free_cnt += dxp->ndescs;
+
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_head_idx = id;
+ else
+ vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+
+ vq->vq_desc_tail_idx = id;
+ dxp->next = VQ_RING_DESC_CHAIN_END;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -165,6 +182,33 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
#endif
/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
+{
+ uint16_t used_idx, id;
+ uint16_t size = vq->vq_nentries;
+ struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vq_desc_extra *dxp;
+
+ used_idx = vq->vq_used_cons_idx;
+ while (num-- && desc_is_used(&desc[used_idx], vq)) {
+ used_idx = vq->vq_used_cons_idx;
+ id = desc[used_idx].id;
+ dxp = &vq->vq_descx[id];
+ vq->vq_used_cons_idx += dxp->ndescs;
+ if (vq->vq_used_cons_idx >= size) {
+ vq->vq_used_cons_idx -= size;
+ vq->used_wrap_counter ^= 1;
+ }
+ vq_ring_free_id_packed(vq, id);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ used_idx = vq->vq_used_cons_idx;
+ }
+}
+
static void
virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
@@ -456,6 +500,107 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
}
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+ uint16_t needed, int can_push)
+{
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_packed_desc *start_dp, *head_dp;
+ uint16_t idx, id, head_idx, head_flags;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+ uint16_t prev;
+
+ id = vq->vq_desc_head_idx;
+
+ dxp = &vq->vq_descx[id];
+ dxp->ndescs = needed;
+ dxp->cookie = cookie;
+
+ head_idx = vq->vq_avail_idx;
+ idx = head_idx;
+ prev = head_idx;
+ start_dp = vq->ring_packed.desc_packed;
+
+ head_dp = &vq->ring_packed.desc_packed[idx];
+ head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ head_flags |= vq->avail_used_flags;
+
+ if (can_push) {
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ } else {
+ /* setup first tx ring slot to point to header
+ * stored in reserved region.
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ }
+ }
+
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+ do {
+ uint16_t flags;
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
+ if (likely(idx != head_idx)) {
+ flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ flags |= vq->avail_used_flags;
+ start_dp[idx].flags = flags;
+ }
+ prev = idx;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ }
+ } while ((cookie = cookie->next) != NULL);
+
+ start_dp[prev].id = id;
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+
+ vq->vq_avail_idx = idx;
+
+ rte_smp_wmb();
+ head_dp->flags = head_flags;
+}
+
static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push,
@@ -733,8 +878,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if (hw->use_inorder_tx)
- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ if (!vtpci_packed_queue(hw)) {
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ }
VIRTQUEUE_DUMP(vq);
@@ -1346,6 +1493,91 @@ virtio_recv_mergeable_pkts(void *rx_queue,
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_tx = 0;
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+ if (nb_pkts > vq->vq_free_cnt)
+ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int can_push = 0, slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
+ can_push = 1;
+
+ /* How many main ring entries are needed to this Tx?
+ * any_layout => number of segments
+ * default => number of segments + 1
+ */
+ slots = txm->nb_segs + !can_push;
+ need = slots - vq->vq_free_cnt;
+
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_pkts);
+ virtio_xmit_cleanup_packed(vq, need);
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index e9c35a553..b142fd488 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -247,8 +247,12 @@ struct virtio_net_hdr_mrg_rxbuf {
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
+ union {
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ };
};
static inline int
@@ -380,6 +384,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
uint16_t num);
@@ -418,6 +423,17 @@ virtqueue_kick_prepare(struct virtqueue *vq)
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
+{
+ uint16_t flags;
+
+ virtio_mb();
+ flags = vq->ring_packed.device_event->desc_event_flags;
+
+ return flags != RING_EVENT_FLAGS_DISABLE;
+}
+
static inline void
virtqueue_notify(struct virtqueue *vq)
{
--
2.21.0

View File

@ -1,613 +0,0 @@
From a1168f29a051eba2344407d72267b5d5f648d80c Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:35 +0100
Subject: [PATCH 06/18] net/virtio: implement Rx path for packed queues
[ upstream commit a76290c8f1cf9c4774c23592921302a04a90bded ]
Implement the receive part.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit a76290c8f1cf9c4774c23592921302a04a90bded)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 56 +++--
drivers/net/virtio/virtio_ethdev.h | 5 +
drivers/net/virtio/virtio_rxtx.c | 375 ++++++++++++++++++++++++++++-
drivers/net/virtio/virtqueue.c | 43 +++-
4 files changed, 457 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 6023d6f2c..4ef1da393 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1363,24 +1363,40 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
}
}
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else if (hw->use_inorder_rx) {
- PMD_INIT_LOG(INFO,
- "virtio: using inorder mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO,
- "virtio: using mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_packed;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
} else {
- PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_inorder;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
}
}
@@ -1944,6 +1960,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
}
+ if (vtpci_packed_queue(hw)) {
+ hw->use_simple_rx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
+ }
+
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 05d355180..88b8c42a3 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -73,10 +73,15 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index ab74917a8..0bcf3b08a 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -31,6 +31,7 @@
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
+#include "virtio_ring.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -105,6 +106,47 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct rte_mbuf *cookie;
+ uint16_t used_idx;
+ uint16_t id;
+ struct vring_packed_desc *desc;
+ uint16_t i;
+
+ desc = vq->ring_packed.desc_packed;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx;
+ if (!desc_is_used(&desc[used_idx], vq))
+ return i;
+ len[i] = desc[used_idx].len;
+ id = desc[used_idx].id;
+ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+
+ vq->vq_free_cnt++;
+ vq->vq_used_cons_idx++;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ return i;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -350,6 +392,51 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
return 0;
}
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
+ uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ struct virtio_hw *hw = vq->hw;
+ struct vq_desc_extra *dxp;
+ uint16_t idx;
+ int i;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ for (i = 0; i < num; i++) {
+ idx = vq->vq_avail_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
+ + hw->vtnet_hdr_size;
+
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+ rte_smp_wmb();
+ start_dp[idx].flags = flags;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ }
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
+
/* When doing TSO, the IP length is not included in the pseudo header
* checksum of the packet given to the PMD, but for virtio it is
* expected.
@@ -801,7 +888,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
break;
/* Enqueue allocated buffers */
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ &m, 1);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
@@ -809,7 +900,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs++;
}
- vq_update_avail_idx(vq);
+ if (!vtpci_packed_queue(vq->hw))
+ vq_update_avail_idx(vq);
}
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
@@ -896,7 +988,10 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* Requeue the discarded mbuf. This should always be
* successful since it was just dequeued.
*/
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (unlikely(error)) {
RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
@@ -1135,6 +1230,104 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return nb_rx;
}
+uint16_t
+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+
+ nb_enqueued = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ for (i = 0; i < num; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ virtio_rx_stats_updated(rxvq, rxm);
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+
uint16_t
virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts,
@@ -1493,6 +1686,182 @@ virtio_recv_mergeable_pkts(void *rx_queue,
return nb_rx;
}
+uint16_t
+virtio_recv_mergeable_pkts_packed(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev = NULL;
+ uint16_t num, nb_rx = 0;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ uint32_t nb_enqueued = 0;
+ uint32_t seg_num = 0;
+ uint32_t seg_res = 0;
+ uint32_t hdr_size = hw->vtnet_hdr_size;
+ int32_t i;
+ int error;
+
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+
+ num = nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
+ rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+ if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ if (prev)
+ virtio_discard_rxbuf(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ new_pkts, free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 56a77cc71..5b03f7a27 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -54,9 +54,36 @@ virtqueue_detach_unused(struct virtqueue *vq)
return NULL;
}
+/* Flush used descs */
+static void
+virtqueue_rxvq_flush_packed(struct virtqueue *vq)
+{
+ struct vq_desc_extra *dxp;
+ uint16_t i;
+
+ struct vring_packed_desc *descs = vq->ring_packed.desc_packed;
+ int cnt = 0;
+
+ i = vq->vq_used_cons_idx;
+ while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
+ dxp = &vq->vq_descx[descs[i].id];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq->vq_free_cnt++;
+ vq->vq_used_cons_idx++;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ i = vq->vq_used_cons_idx;
+ }
+}
+
/* Flush the elements in the used ring. */
-void
-virtqueue_rxvq_flush(struct virtqueue *vq)
+static void
+virtqueue_rxvq_flush_split(struct virtqueue *vq)
{
struct virtnet_rx *rxq = &vq->rxq;
struct virtio_hw *hw = vq->hw;
@@ -102,3 +129,15 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
}
}
}
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_rxvq_flush(struct virtqueue *vq)
+{
+ struct virtio_hw *hw = vq->hw;
+
+ if (vtpci_packed_queue(hw))
+ virtqueue_rxvq_flush_packed(vq);
+ else
+ virtqueue_rxvq_flush_split(vq);
+}
--
2.21.0

View File

@ -1,142 +0,0 @@
From d8d854a2f1814e10cf51ce88bf00b020167c772e Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:36 +0100
Subject: [PATCH 07/18] net/virtio: support packed queue in send command
[ upstream commit ec194c2f189525b2fb4be5604422a28ea5f08acd ]
Use packed virtqueue format when reading and writing descriptors
to/from the ring.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit ec194c2f189525b2fb4be5604422a28ea5f08acd)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 96 ++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 4ef1da393..53773445b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -141,6 +141,96 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+static struct virtio_pmd_ctrl *
+virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtqueue *vq = cvq->vq;
+ int head;
+ struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct virtio_pmd_ctrl *result;
+ int wrap_counter;
+ uint16_t flags;
+ int sum = 0;
+ int k;
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ head = vq->vq_avail_idx;
+ wrap_counter = vq->avail_wrap_counter;
+ desc[head].flags = VRING_DESC_F_NEXT;
+ desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ for (k = 0; k < pkt_num; k++) {
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ desc[vq->vq_avail_idx].len = dlen[k];
+ flags = VRING_DESC_F_NEXT;
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ desc[vq->vq_avail_idx].flags = flags;
+ rte_smp_wmb();
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+ }
+
+
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+ flags = VRING_DESC_F_WRITE;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ desc[vq->vq_avail_idx].flags = flags;
+ flags = VRING_DESC_F_NEXT;
+ flags |= VRING_DESC_F_AVAIL(wrap_counter) |
+ VRING_DESC_F_USED(!wrap_counter);
+ desc[head].flags = flags;
+ rte_smp_wmb();
+
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ virtqueue_notify(vq);
+
+ /* wait for used descriptors in virtqueue */
+ do {
+ rte_rmb();
+ usleep(100);
+ } while (!desc_is_used(&desc[head], vq));
+
+ /* now get used descriptors */
+ while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
+ vq->vq_free_cnt++;
+ if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
static int
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
@@ -174,6 +264,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
+ if (vtpci_packed_queue(vq->hw)) {
+ result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+ goto out_unlock;
+ }
+
/*
* Format is enforced in qemu code:
* One TX packet for header;
@@ -245,6 +340,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
result = cvq->virtio_net_hdr_mz->addr;
+out_unlock:
rte_spinlock_unlock(&cvq->lock);
return result->status;
}
--
2.21.0

View File

@ -1,139 +0,0 @@
From 0cdcdd50e4cbb88737abfee1e545019500f11e38 Mon Sep 17 00:00:00 2001
From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Date: Mon, 17 Dec 2018 22:31:37 +0100
Subject: [PATCH] net/virtio-user: add option to use packed queues
[ upstream commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6 ]
From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Add option to enable packed queue support for virtio-user
devices.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 20 ++++++++++++++-----
.../net/virtio/virtio_user/virtio_user_dev.h | 2 +-
drivers/net/virtio/virtio_user_ethdev.c | 14 ++++++++++++-
3 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index f0051f887..7d0acaeb7 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-1-Clause
* Copyright(c) 2010-2016 Intel Corporation
*/
@@ -58,6 +58,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
state.index = queue_sel;
state.num = 0; /* no reservation */
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
+ state.num |= (1 << 15);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
@@ -407,12 +409,13 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
- 1ULL << VIRTIO_F_VERSION_1)
+ 1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_RING_PACKED)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int server, int mrg_rxbuf, int in_order)
+ int server, int mrg_rxbuf, int in_order, int packed_vq)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -465,10 +468,17 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (dev->mac_specified)
- dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
+ if (packed_vq)
+ dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
else
+ dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+
+ if (dev->mac_specified) {
+ dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 3e3a7b787..67a9c01ac 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -50,7 +50,7 @@ int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int server, int mrg_rxbuf, int in_order);
+ int server, int mrg_rxbuf, int in_order, int packed_vq);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 5781c0948..daad8f452 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -361,6 +361,8 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_MRG_RXBUF,
#define VIRTIO_USER_ARG_IN_ORDER "in_order"
VIRTIO_USER_ARG_IN_ORDER,
+#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
+ VIRTIO_USER_ARG_PACKED_VQ,
NULL
};
@@ -468,6 +470,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
+ uint64_t packed_vq = 0;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
const char *name = rte_vdev_device_name(dev);
@@ -571,6 +574,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
cq = 1;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
+ &get_integer_arg, &packed_vq) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_PACKED_VQ);
+ goto end;
+ }
+ }
+
if (queues > 1 && cq == 0) {
PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
goto end;
@@ -610,7 +622,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
hw = eth_dev->data->dev_private;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
queue_size, mac_addr, &ifname, server_mode,
- mrg_rxbuf, in_order) < 0) {
+ mrg_rxbuf, in_order, packed_vq) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
--
2.21.0

View File

@ -1,44 +0,0 @@
From f5302062cbc98b3b8b1002cc48e7125a48ead96c Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:38 +0100
Subject: [PATCH 09/18] net/virtio-user: fail if cq used with packed vq
[ upstream commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9 ]
Until we have support for control virtqueues let's disable it and
fail device initalization if specified as a parameter.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 77cec1d3c..2f75091d5 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -467,10 +467,16 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (packed_vq)
+ if (packed_vq) {
+ if (cq) {
+ PMD_INIT_LOG(ERR, "control vq not supported yet with "
+ "packed virtqueues\n");
+ return -1;
+ }
dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
- else
+ } else {
dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+ }
if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
--
2.21.0

View File

@ -1,45 +0,0 @@
From d1b8c268219498c865511b375b0c0c89244046f9 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:39 +0100
Subject: [PATCH 10/18] net/virtio: enable packed virtqueues by default
[ upstream commit aea29aa5d37b40080cfc1f9a1acba239bf03922f ]
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit aea29aa5d37b40080cfc1f9a1acba239bf03922f)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.h | 1 +
drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 88b8c42a3..364ecbb50 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -34,6 +34,7 @@
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_RING_PACKED | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 2f75091d5..5999b7d9d 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -410,7 +410,8 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1 | \
- 1ULL << VIRTIO_F_RING_PACKED)
+ 1ULL << VIRTIO_F_RING_PACKED | \
+ 1ULL << VIRTIO_RING_F_EVENT_IDX)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
--
2.21.0

View File

@ -1,33 +0,0 @@
From 440731f30a1257c3318badfcf17f5ab9e5085317 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Thu, 20 Dec 2018 11:56:24 +0100
Subject: [PATCH 11/18] net/virtio: avoid double accounting of bytes
[ upstream commit 517ad3e018e31ab2596d1ece5369894703c850c2 ]
Accounting of bytes was moved to a common function, so at the moment we do
it twice. This patches fixes it for sending packets with packed virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 517ad3e018e31ab2596d1ece5369894703c850c2)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_rxtx.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 0bcf3b08a..50eb4c694 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1931,7 +1931,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Enqueue Packet buffers */
virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
- txvq->stats.bytes += txm->pkt_len;
virtio_update_packet_stats(&txvq->stats, txm);
}
--
2.21.0

View File

@ -1,85 +0,0 @@
From ec53a1992df973607cbb10db6a0816ed2ef498dd Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Thu, 3 Jan 2019 10:40:06 +0800
Subject: [PATCH] net/virtio-user: fix packed vq option parsing
[ upstream commit 9070f88b81dab42739fb169265e3ea727e47dfa2 ]
Add the RING_PACKED feature to dev->unsupported_features
when it's disabled, and add the missing packed vq param
string. And also revert the unexpected change to MAC option
introduced when adding packed vq option.
Fixes: 34f3966c7f81 ("net/virtio-user: add option to use packed queues")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 9070f88b81dab42739fb169265e3ea727e47dfa2)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 11 ++++-------
drivers/net/virtio/virtio_user_ethdev.c | 7 ++++---
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 811b95c45..426682c93 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -475,17 +475,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
"packed virtqueues\n");
return -1;
}
- dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
} else {
- dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+ dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
}
- if (dev->mac_specified) {
- dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
- } else {
- dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ if (dev->mac_specified)
+ dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
+ else
dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
- }
if (cq) {
/* device does not really need to know anything about CQ,
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index daad8f452..a2911febf 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -361,7 +361,7 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_MRG_RXBUF,
#define VIRTIO_USER_ARG_IN_ORDER "in_order"
VIRTIO_USER_ARG_IN_ORDER,
-#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
+#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
VIRTIO_USER_ARG_PACKED_VQ,
NULL
};
@@ -466,11 +466,11 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
uint64_t mrg_rxbuf = 1;
uint64_t in_order = 1;
+ uint64_t packed_vq = 0;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
- uint64_t packed_vq = 0;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
const char *name = rte_vdev_device_name(dev);
@@ -698,4 +698,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"iface=<string> "
"server=<0|1> "
"mrg_rxbuf=<0|1> "
- "in_order=<0|1>");
+ "in_order=<0|1> "
+ "packed_vq=<0|1>");
--
2.21.0

View File

@ -1,36 +0,0 @@
From b6da125960fb1fb017427af5910b43ac81586850 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Thu, 3 Jan 2019 10:40:07 +0800
Subject: [PATCH 13/18] net/virtio-user: fix supported features list
[ upstream commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1 ]
Currently virtio-user doesn't support event idx.
Fixes: aea29aa5d37b ("net/virtio: enable packed virtqueues by default")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index c4e026096..77341f895 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -410,8 +410,7 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1 | \
- 1ULL << VIRTIO_F_RING_PACKED | \
- 1ULL << VIRTIO_RING_F_EVENT_IDX)
+ 1ULL << VIRTIO_F_RING_PACKED)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
--
2.21.0

View File

@ -1,98 +0,0 @@
From 82b43dd199d5492527b73002d4c3b009a98ca7a0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Fri, 11 Jan 2019 10:39:28 +0100
Subject: [PATCH 14/18] net/virtio: check head desc with correct wrap counter
[ upstream commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a ]
In virtio_pq_send_command() we check for a used descriptor
and wait in an idle loop until it becomes used. We can't use
vq->used_wrap_counter here to check for the first descriptor
we made available because the ring could have wrapped. Let's use
the used_wrap_counter that matches the state of the head descriptor.
Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 11 ++++++-----
drivers/net/virtio/virtqueue.h | 10 ++++++++--
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 53773445b..7bd38a292 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -149,7 +149,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
- int wrap_counter;
+ bool avail_wrap_counter, used_wrap_counter;
uint16_t flags;
int sum = 0;
int k;
@@ -161,7 +161,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
* One RX packet for ACK.
*/
head = vq->vq_avail_idx;
- wrap_counter = vq->avail_wrap_counter;
+ avail_wrap_counter = vq->avail_wrap_counter;
+ used_wrap_counter = vq->used_wrap_counter;
desc[head].flags = VRING_DESC_F_NEXT;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
@@ -199,8 +200,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
VRING_DESC_F_USED(!vq->avail_wrap_counter);
desc[vq->vq_avail_idx].flags = flags;
flags = VRING_DESC_F_NEXT;
- flags |= VRING_DESC_F_AVAIL(wrap_counter) |
- VRING_DESC_F_USED(!wrap_counter);
+ flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
+ VRING_DESC_F_USED(!avail_wrap_counter);
desc[head].flags = flags;
rte_smp_wmb();
@@ -216,7 +217,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
do {
rte_rmb();
usleep(100);
- } while (!desc_is_used(&desc[head], vq));
+ } while (!__desc_is_used(&desc[head], used_wrap_counter));
/* now get used descriptors */
while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index b142fd488..75f5782bc 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -256,7 +256,7 @@ struct virtio_tx_region {
};
static inline int
-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
{
uint16_t used, avail, flags;
@@ -264,7 +264,13 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == vq->used_wrap_counter;
+ return avail == used && used == wrap_counter;
+}
+
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+ return __desc_is_used(desc, vq->used_wrap_counter);
}
--
2.21.0

View File

@ -1,277 +0,0 @@
From 74bbcd238093edc81b1a1f0b9b6e0d3c3fe32584 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Fri, 11 Jan 2019 10:39:29 +0100
Subject: [PATCH] net/virtio-user: support control VQ for packed
[ upstream commit 48a4464029a7f76dfb2c1f09146a391917b075e5 ]
Add support to virtio-user for control virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 48a4464029a7f76dfb2c1f09146a391917b075e5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 102 ++++++++++++++++--
.../net/virtio/virtio_user/virtio_user_dev.h | 15 ++-
drivers/net/virtio/virtio_user_ethdev.c | 56 +++++++++-
3 files changed, 157 insertions(+), 16 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 2caaaad5f..83d3fb531 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -43,15 +43,26 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
+ struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
struct vhost_vring_addr addr = {
.index = queue_sel,
- .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
- .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
- .used_user_addr = (uint64_t)(uintptr_t)vring->used,
.log_guest_addr = 0,
.flags = 0, /* disable log */
};
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+ addr.desc_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->desc_packed;
+ addr.avail_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->driver_event;
+ addr.used_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->device_event;
+ } else {
+ addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
+ addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
+ addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ }
+
state.index = queue_sel;
state.num = vring->num;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
@@ -468,15 +479,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (packed_vq) {
- if (cq) {
- PMD_INIT_LOG(ERR, "control vq not supported yet with "
- "packed virtqueues\n");
- return -1;
- }
- } else {
+ if (!packed_vq)
dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
- }
if (dev->mac_specified)
dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
@@ -621,6 +625,82 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
return n_descs;
}
+static inline int
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+ return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL(1)) &&
+ wrap_counter != !!(desc->flags & VRING_DESC_F_USED(1));
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
+ struct vring_packed *vring,
+ uint16_t idx_hdr)
+{
+ struct virtio_net_ctrl_hdr *hdr;
+ virtio_net_ctrl_ack status = ~0;
+ uint16_t idx_data, idx_status;
+ /* initialize to one, header is first */
+ uint32_t n_descs = 1;
+
+ /* locate desc for header, data, and status */
+ idx_data = idx_hdr + 1;
+ if (idx_data >= dev->queue_size)
+ idx_data -= dev->queue_size;
+
+ n_descs++;
+
+ idx_status = idx_data;
+ while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) {
+ idx_status++;
+ if (idx_status >= dev->queue_size)
+ idx_status -= dev->queue_size;
+ n_descs++;
+ }
+
+ hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr;
+ if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t queues;
+
+ queues = *(uint16_t *)(uintptr_t)
+ vring->desc_packed[idx_data].addr;
+ status = virtio_user_handle_mq(dev, queues);
+ }
+
+ /* Update status */
+ *(virtio_net_ctrl_ack *)(uintptr_t)
+ vring->desc_packed[idx_status].addr = status;
+
+ return n_descs;
+}
+
+void
+virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+ struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
+ struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+ uint16_t id, n_descs;
+
+ while (desc_is_avail(&vring->desc_packed[vq->used_idx],
+ vq->used_wrap_counter)) {
+ id = vring->desc_packed[vq->used_idx].id;
+
+ n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
+
+ do {
+ vring->desc_packed[vq->used_idx].flags =
+ VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
+ VRING_DESC_F_USED(vq->used_wrap_counter);
+ if (++vq->used_idx >= dev->queue_size) {
+ vq->used_idx -= dev->queue_size;
+ vq->used_wrap_counter ^= 1;
+ }
+ n_descs--;
+ } while (n_descs);
+ }
+}
+
void
virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
{
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 67a9c01ac..c6c2f7d6e 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -11,6 +11,12 @@
#include "../virtio_ring.h"
#include "vhost.h"
+struct virtio_user_queue {
+ uint16_t used_idx;
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+};
+
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
@@ -39,7 +45,12 @@ struct virtio_user_dev {
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ union {
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
+ };
+ struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
+
struct virtio_user_backend_ops *ops;
pthread_mutex_t mutex;
bool started;
@@ -53,5 +64,7 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int server, int mrg_rxbuf, int in_order, int packed_vq);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+void virtio_user_handle_cq_packed(struct virtio_user_dev *dev,
+ uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index a2911febf..dddb7dd23 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -271,10 +271,44 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
return dev->queue_size;
}
-static int
-virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+static void
+virtio_user_setup_queue_packed(struct virtqueue *vq,
+ struct virtio_user_dev *dev)
+
+{
+ uint16_t queue_idx = vq->vq_queue_index;
+ struct vring_packed *vring;
+ uint64_t desc_addr;
+ uint64_t avail_addr;
+ uint64_t used_addr;
+ uint16_t i;
+
+ vring = &dev->packed_vrings[queue_idx];
+ desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+ avail_addr = desc_addr + vq->vq_nentries *
+ sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr +
+ sizeof(struct vring_packed_desc_event),
+ VIRTIO_PCI_VRING_ALIGN);
+ vring->num = vq->vq_nentries;
+ vring->desc_packed =
+ (void *)(uintptr_t)desc_addr;
+ vring->driver_event =
+ (void *)(uintptr_t)avail_addr;
+ vring->device_event =
+ (void *)(uintptr_t)used_addr;
+ dev->packed_queues[queue_idx].avail_wrap_counter = true;
+ dev->packed_queues[queue_idx].used_wrap_counter = true;
+
+ for (i = 0; i < vring->num; i++) {
+ vring->desc_packed[i].flags = VRING_DESC_F_USED(1) |
+ VRING_DESC_F_AVAIL(1);
+ }
+}
+
+static void
+virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
{
- struct virtio_user_dev *dev = virtio_user_get_dev(hw);
uint16_t queue_idx = vq->vq_queue_index;
uint64_t desc_addr, avail_addr, used_addr;
@@ -288,6 +322,17 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (vtpci_packed_queue(hw))
+ virtio_user_setup_queue_packed(vq, dev);
+ else
+ virtio_user_setup_queue_split(vq, dev);
return 0;
}
@@ -317,7 +362,10 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (hw->cvq && (hw->cvq->vq == vq)) {
- virtio_user_handle_cq(dev, vq->vq_queue_index);
+ if (vtpci_packed_queue(vq->hw))
+ virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
+ else
+ virtio_user_handle_cq(dev, vq->vq_queue_index);
return;
}
--
2.21.0

View File

@ -1,197 +0,0 @@
From c276398e43bec444eb207c3184f667b3d97361f8 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Wed, 23 Jan 2019 01:01:40 +0800
Subject: [PATCH 16/18] net/virtio: fix control VQ
[ upstream commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5 ]
This patch mainly fixed below issues in the packed ring based
control vq support in virtio driver:
1. When parsing the used descriptors, we have to track the
number of descs that we need to skip;
2. vq->vq_free_cnt was decreased twice for a same desc;
Meanwhile, make the function name consistent with other parts.
Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
[changed parameters to virtio_rmb/_wmb()]
(cherry picked from commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 62 ++++++++++++++----------------
drivers/net/virtio/virtqueue.h | 12 +-----
2 files changed, 31 insertions(+), 43 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 7bd38a292..c12fb157e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
static struct virtio_pmd_ctrl *
-virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
{
struct virtqueue *vq = cvq->vq;
int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
- bool avail_wrap_counter, used_wrap_counter;
- uint16_t flags;
+ bool avail_wrap_counter;
int sum = 0;
+ int nb_descs = 0;
int k;
/*
@@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
*/
head = vq->vq_avail_idx;
avail_wrap_counter = vq->avail_wrap_counter;
- used_wrap_counter = vq->used_wrap_counter;
- desc[head].flags = VRING_DESC_F_NEXT;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
@@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
- flags = VRING_DESC_F_NEXT;
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
sum += dlen[k];
vq->vq_free_cnt--;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- rte_smp_wmb();
- vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
}
-
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- flags = VRING_DESC_F_WRITE;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- flags = VRING_DESC_F_NEXT;
- flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
- VRING_DESC_F_USED(!avail_wrap_counter);
- desc[head].flags = flags;
- rte_smp_wmb();
-
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
+ virtio_wmb();
+ desc[head].flags = VRING_DESC_F_NEXT |
+ VRING_DESC_F_AVAIL(avail_wrap_counter) |
+ VRING_DESC_F_USED(!avail_wrap_counter);
+
+ virtio_wmb();
virtqueue_notify(vq);
/* wait for used descriptors in virtqueue */
- do {
- rte_rmb();
+ while (!desc_is_used(&desc[head], vq))
usleep(100);
- } while (!__desc_is_used(&desc[head], used_wrap_counter));
+
+ virtio_rmb();
/* now get used descriptors */
- while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
- vq->vq_free_cnt++;
- if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
- }
+ vq->vq_free_cnt += nb_descs;
+ vq->vq_used_cons_idx += nb_descs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
}
result = cvq->virtio_net_hdr_mz->addr;
@@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
sizeof(struct virtio_pmd_ctrl));
if (vtpci_packed_queue(vq->hw)) {
- result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
goto out_unlock;
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 75f5782bc..9e74b7bd0 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -256,7 +256,7 @@ struct virtio_tx_region {
};
static inline int
-__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
{
uint16_t used, avail, flags;
@@ -264,16 +264,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == wrap_counter;
-}
-
-static inline int
-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
-{
- return __desc_is_used(desc, vq->used_wrap_counter);
+ return avail == used && used == vq->used_wrap_counter;
}
-
static inline void
vring_desc_init_packed(struct virtqueue *vq, int n)
{
@@ -329,7 +322,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
{
uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
virtio_wmb();
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
--
2.21.0

View File

@ -1,146 +0,0 @@
From e5ee642672921b9e83aaa558067b6b685a7af0a3 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Wed, 23 Jan 2019 01:01:41 +0800
Subject: [PATCH 17/18] net/virtio-user: fix control VQ
[ upstream commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa ]
This patch fixed below issues in the packed ring based control
vq support in virtio user:
1. The idx_hdr should be used_idx instead of the id in the desc;
2. We just need to write out a single used descriptor for each
descriptor list;
3. The avail/used bits should be initialized to 0;
Meanwhile, make the function name consistent with other parts.
Fixes: 48a4464029a7 ("net/virtio-user: support control VQ for packed")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 11 ++++++
.../net/virtio/virtio_user/virtio_user_dev.c | 37 +++++++++++--------
drivers/net/virtio/virtio_user_ethdev.c | 7 +---
3 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index c12fb157e..a31129484 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -224,6 +224,17 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
vq->used_wrap_counter ^= 1;
}
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+ "vq->vq_avail_idx=%d\n"
+ "vq->vq_used_cons_idx=%d\n"
+ "vq->avail_wrap_counter=%d\n"
+ "vq->used_wrap_counter=%d\n",
+ vq->vq_free_cnt,
+ vq->vq_avail_idx,
+ vq->vq_used_cons_idx,
+ vq->avail_wrap_counter,
+ vq->used_wrap_counter);
+
result = cvq->virtio_net_hdr_mz->addr;
return result;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index ea5149929..d1157378d 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -632,9 +632,9 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
}
static uint32_t
-virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
- struct vring_packed *vring,
- uint16_t idx_hdr)
+virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
+ struct vring_packed *vring,
+ uint16_t idx_hdr)
{
struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
@@ -671,6 +671,10 @@ virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
*(virtio_net_ctrl_ack *)(uintptr_t)
vring->desc_packed[idx_status].addr = status;
+ /* Update used descriptor */
+ vring->desc_packed[idx_hdr].id = vring->desc_packed[idx_status].id;
+ vring->desc_packed[idx_hdr].len = sizeof(status);
+
return n_descs;
}
@@ -679,24 +683,25 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
{
struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
- uint16_t id, n_descs;
+ uint16_t n_descs;
while (desc_is_avail(&vring->desc_packed[vq->used_idx],
vq->used_wrap_counter)) {
- id = vring->desc_packed[vq->used_idx].id;
- n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
+ n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
+ vq->used_idx);
- do {
- vring->desc_packed[vq->used_idx].flags =
- VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
- VRING_DESC_F_USED(vq->used_wrap_counter);
- if (++vq->used_idx >= dev->queue_size) {
- vq->used_idx -= dev->queue_size;
- vq->used_wrap_counter ^= 1;
- }
- n_descs--;
- } while (n_descs);
+ rte_smp_wmb();
+ vring->desc_packed[vq->used_idx].flags =
+ VRING_DESC_F_WRITE |
+ VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
+ VRING_DESC_F_USED(vq->used_wrap_counter);
+
+ vq->used_idx += n_descs;
+ if (vq->used_idx >= dev->queue_size) {
+ vq->used_idx -= dev->queue_size;
+ vq->used_wrap_counter ^= 1;
+ }
}
}
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index c01f45cab..6423e1f61 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -274,7 +274,6 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
static void
virtio_user_setup_queue_packed(struct virtqueue *vq,
struct virtio_user_dev *dev)
-
{
uint16_t queue_idx = vq->vq_queue_index;
struct vring_packed *vring;
@@ -300,10 +299,8 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
dev->packed_queues[queue_idx].avail_wrap_counter = true;
dev->packed_queues[queue_idx].used_wrap_counter = true;
- for (i = 0; i < vring->num; i++) {
- vring->desc_packed[i].flags = VRING_DESC_F_USED(1) |
- VRING_DESC_F_AVAIL(1);
- }
+ for (i = 0; i < vring->num; i++)
+ vring->desc_packed[i].flags = 0;
}
static void
--
2.21.0

View File

@ -1,97 +0,0 @@
From f3bf9a1a9b1ad3419b436855306ad8b5d8efab2f Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Thu, 20 Dec 2018 17:47:55 +0100
Subject: [PATCH 18/18] vhost: batch used descs chains write-back with packed
ring
[ upstream commit b473ec1131ee44ee25e0536a04be65246b93f4f3 ]
Instead of writing back descriptors chains in order, let's
write the first chain flags last in order to improve batching.
Also, move the write barrier in logging cache sync, so that it
is done only when logging is enabled. It means there is now
one more barrier for split ring when logging is enabled.
With Kernel's pktgen benchmark, ~3% performance gain is measured.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
(cherry picked from commit b473ec1131ee44ee25e0536a04be65246b93f4f3)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
lib/librte_vhost/vhost.h | 7 ++-----
lib/librte_vhost/virtio_net.c | 19 ++++++++++++++++---
2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 552b9298d..adc2fb78e 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -456,12 +456,9 @@ vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
!dev->log_base))
return;
- log_base = (unsigned long *)(uintptr_t)dev->log_base;
+ rte_smp_wmb();
- /*
- * It is expected a write memory barrier has been issued
- * before this function is called.
- */
+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
for (i = 0; i < vq->log_cache_nb_elem; i++) {
struct log_cache_entry *elem = vq->log_cache + i;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 15d682c3c..ec70ef947 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -136,6 +136,8 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
{
int i;
uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
/* Split loop in two to save memory barriers */
for (i = 0; i < vq->shadow_used_idx; i++) {
@@ -165,12 +167,17 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
flags &= ~VRING_DESC_F_AVAIL;
}
- vq->desc_packed[vq->last_used_idx].flags = flags;
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
- vhost_log_cache_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
vq->last_used_idx *
sizeof(struct vring_packed_desc),
sizeof(struct vring_packed_desc));
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
vq->last_used_idx += vq->shadow_used_packed[i].count;
if (vq->last_used_idx >= vq->size) {
@@ -179,7 +186,13 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
}
}
- rte_smp_wmb();
+ vq->desc_packed[head_idx].flags = head_flags;
+
+ vhost_log_cache_used_vring(dev, vq,
+ head_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc));
+
vq->shadow_used_idx = 0;
vhost_log_cache_sync(dev, vq);
}
--
2.21.0

View File

@ -1,42 +0,0 @@
From daa23dec25e8e418cd4e921531c82b5aae39b362 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Tue, 19 Mar 2019 14:43:04 +0800
Subject: [PATCH] net/virtio: fix interrupt helper for packed ring
When disabling interrupt, the shadow event flags should also be
updated accordingly. The unnecessary wmb is also dropped.
Fixes: e9f4feb7e622 ("net/virtio: add packed virtqueue helpers")
Cc: stable@dpdk.org
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtqueue.h | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 9e74b7bd0..c9f1c0afa 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -296,12 +296,13 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n)
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
- uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
- *event_flags = RING_EVENT_FLAGS_DISABLE;
+ if (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+ vq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+ vq->ring_packed.driver_event->desc_event_flags =
+ vq->event_flags_shadow;
+ }
}
-
/**
* Tell the backend not to interrupt us.
*/
--
2.21.0

View File

@ -1,30 +0,0 @@
From f2e20b51ac6432390ea545e2b6247419dfcaab40 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 16 Sep 2019 17:26:16 +0200
Subject: [PATCH] net/virtio: fix calculation of device_event ptr
Fix wrong pointer arithmetic. We only need to increment by 1 if we want
to advance it by the size of the driver event area.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ring.h | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 1760823c6..fdc62194e 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -165,8 +165,7 @@ vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
vr->driver_event = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
vr->device_event = (struct vring_packed_desc_event *)
- RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
- sizeof(struct vring_packed_desc_event)), align);
+ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event + 1), align);
}
/*
--
2.21.0

View File

@ -1,4 +1,4 @@
# -*- cfg-sha: 9fc8b53ccd53cc8b64391f6252e1dba558ae660a73a72f10dcadff2ca5462243
# -*- cfg-sha: fedc4fc78a46e35070ea61c62f51c1cb9283062b4bad46fb91f6c2eb9ec536c5
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2015 Cavium, Inc
# SPDX-License-Identifier: BSD-3-Clause
@ -7,7 +7,15 @@
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
# String that appears before the version number
CONFIG_RTE_VER_PREFIX="DPDK"
# Version information completed when this file is processed for a build
CONFIG_RTE_VER_YEAR=19
CONFIG_RTE_VER_MONTH=11
CONFIG_RTE_VER_MINOR=2
CONFIG_RTE_VER_SUFFIX=""
CONFIG_RTE_VER_RELEASE=99
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="arm64"
@ -21,12 +29,12 @@ CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_FORCE_INTRINSICS=y
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Enable link time optimization
CONFIG_RTE_ENABLE_LTO=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=128
# Memory model
@ -57,7 +65,6 @@ CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
@ -71,6 +78,8 @@ CONFIG_RTE_USE_LIBBSD=n
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Use ARM LSE ATOMIC instructions
CONFIG_RTE_ARM_FEATURE_ATOMICS=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
@ -122,12 +131,8 @@ CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
CONFIG_RTE_LIBRTE_BNXT_PMD=n
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# Compile burst-oriented NXP PFE PMD driver
CONFIG_RTE_LIBRTE_PFE_PMD=n
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
@ -158,6 +163,10 @@ CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented HINIC PMD driver
CONFIG_RTE_LIBRTE_HINIC_PMD=n
# Compile burst-oriented HNS3 PMD driver
CONFIG_RTE_LIBRTE_HNS3_PMD=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=y
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
@ -183,22 +192,34 @@ CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented ICE PMD driver
CONFIG_RTE_LIBRTE_ICE_PMD=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n
# Compile burst-oriented IAVF PMD driver
CONFIG_RTE_LIBRTE_IAVF_PMD=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC=n
CONFIG_RTE_LIBRTE_IAVF_16BYTE_RX_DESC=n
# Compile burst-oriented IPN3KE PMD driver
CONFIG_RTE_LIBRTE_IPN3KE_PMD=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5,
# ConnectX-6 & BlueField (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
# Linking method for mlx4/5 dependency on ibverbs and related libraries
# Default linking is dynamic by linker.
# Other options are: dynamic by dlopen at run-time, or statically embedded.
CONFIG_RTE_IBVERBS_LINK_DLOPEN=n
CONFIG_RTE_IBVERBS_LINK_STATIC=n
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
@ -215,6 +236,8 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile software PMD backed by NFB device
CONFIG_RTE_LIBRTE_NFB_PMD=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
@ -227,6 +250,8 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile burst-oriented Marvell OCTEON TX2 network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX2_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
@ -246,6 +271,10 @@ CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile software PMD backed by AF_XDP sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_XDP=n
# Compile Memory Interface PMD driver (Linux only)
CONFIG_RTE_LIBRTE_PMD_MEMIF=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
@ -283,12 +312,17 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_LIBRTE_BBDEV_DEBUG=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
CONFIG_RTE_BBDEV_OFFLOAD_COST=y
CONFIG_RTE_BBDEV_SDK_AVX2=n
CONFIG_RTE_BBDEV_SDK_AVX512=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=n
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile PMD for Intel FPGA LTE FEC bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
@ -304,13 +338,15 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=n
# Compile PMD for Marvell OCTEON TX2 crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO=n
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
CONFIG_RTE_LIBRTE_PMD_QAT_ASYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
@ -337,6 +373,8 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile PMD for NITROX crypto device
CONFIG_RTE_LIBRTE_PMD_NITROX=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
@ -368,6 +406,8 @@ CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for octeontx2 sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
@ -377,7 +417,7 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_RAWDEV_MAX_DEVS=64
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
@ -385,8 +425,16 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile PMD for Intel IOAT raw device
CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV=n
# Compile PMD for octeontx2 DMA raw device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV=n
# Compile PMD for NTB raw device
CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_stack
CONFIG_RTE_LIBRTE_STACK=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
@ -398,6 +446,8 @@ CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile PMD for octeontx2 npa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
@ -429,6 +479,14 @@ CONFIG_RTE_LIBRTE_BITRATE=y
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_rcu
CONFIG_RTE_LIBRTE_RCU=n
CONFIG_RTE_LIBRTE_RCU_DEBUG=n
# Compile librte_rib
CONFIG_RTE_LIBRTE_RIB=n
# Compile librte_fib
CONFIG_RTE_LIBRTE_FIB=n
CONFIG_RTE_LIBRTE_FIB_DEBUG=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
@ -480,7 +538,6 @@ CONFIG_RTE_PIPELINE_STATS_COLLECT=n
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
@ -499,27 +556,36 @@ CONFIG_RTE_LIBRTE_IFC_PMD=n
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile librte_ipsec
CONFIG_RTE_LIBRTE_IPSEC=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
CONFIG_RTE_PROC_INFO=n
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. compression performance application
CONFIG_RTE_APP_COMPRESS_PERF=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUX=y
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
# HINIC PMD driver
# Hisilicon HNS3 PMD driver
# Compile PMD for Intel FPGA raw device
# To compile, CONFIG_RTE_EAL_VFIO should be enabled.
CONFIG_RTE_ARCH_ARM64=y
CONFIG_RTE_ARCH_64=y
# Maximum available cache line size in arm64 implementations.
@ -536,5 +602,5 @@ CONFIG_RTE_ARCH_ARM64_MEMCPY=n
#CONFIG_RTE_ARM64_MEMCPY_SKIP_GCC_VER_CHECK=n
#CONFIG_RTE_ARM64_MEMCPY_ALIGN_MASK=0xF
#CONFIG_RTE_ARM64_MEMCPY_STRICT_ALIGN=n
# NXP PFE PMD Driver
CONFIG_RTE_TOOLCHAIN_GCC=y
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -93,9 +93,15 @@ do
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_DPAA_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_CFGFILE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_EFD n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_FLOW_CLASSIFY n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_RCU n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_RIB n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_FIB n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_IPSEC n
# Disable all eventdevs
for eventdev in $(grep _EVENTDEV= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g')
@ -129,7 +135,6 @@ do
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VHOST n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_KNI n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_XENVIRT n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER n
@ -145,6 +150,13 @@ do
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_COMMON_DPAAX n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CAAM_JR n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_MEMIF n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NITROX n
# whitelist of enabled PMDs
# Soft PMDs to enable
@ -173,6 +185,8 @@ do
# Disable some other miscellanous items related to test apps
set_conf "${OUTDIR}" CONFIG_RTE_TEST_BBDEV n
set_conf "${OUTDIR}" CONFIG_RTE_APP_CRYPTO_PERF n
set_conf "${OUTDIR}" CONFIG_RTE_APP_COMPRESS_PERF n
set_conf "${OUTDIR}" CONFIG_RTE_PROC_INFO n
# Disable kernel modules
set_conf "${OUTDIR}" CONFIG_RTE_EAL_IGB_UIO n
@ -195,9 +209,7 @@ do
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_BNXT_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_ENIC_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX4_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX5_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_NFP_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_QEDE_PMD y
# Sw PMD

View File

@ -1,34 +1,19 @@
# -*- cfg-sha: ac783e64ca20c977a7c1c42e72e6dce151b31aa9aecfbfa121b45e49e938f418
# BSD LICENSE
# -*- cfg-sha: 46dcea384e8aae2041f0f78fedef7f7da8906ed14c1ce3812b7e02ed4f2a3baf
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) IBM Corporation 2014.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of IBM Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
# String that appears before the version number
CONFIG_RTE_VER_PREFIX="DPDK"
# Version information completed when this file is processed for a build
CONFIG_RTE_VER_YEAR=19
CONFIG_RTE_VER_MONTH=11
CONFIG_RTE_VER_MINOR=2
CONFIG_RTE_VER_SUFFIX=""
CONFIG_RTE_VER_RELEASE=99
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="ppc_64"
@ -42,19 +27,19 @@ CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_FORCE_INTRINSICS=n
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Enable link time optimization
CONFIG_RTE_ENABLE_LTO=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=128
# Memory model
CONFIG_RTE_USE_C11_MEM_MODEL=n
# Compile Environment Abstraction Layer
CONFIG_RTE_LIBRTE_EAL=y
CONFIG_RTE_MAX_LCORE=256
CONFIG_RTE_MAX_LCORE=1536
CONFIG_RTE_MAX_NUMA_NODES=32
CONFIG_RTE_MAX_HEAPS=32
CONFIG_RTE_MAX_MEMSEG_LISTS=64
@ -78,7 +63,6 @@ CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
@ -92,6 +76,8 @@ CONFIG_RTE_USE_LIBBSD=n
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Use ARM LSE ATOMIC instructions
CONFIG_RTE_ARM_FEATURE_ATOMICS=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
@ -143,12 +129,8 @@ CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
CONFIG_RTE_LIBRTE_BNXT_PMD=n
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# Compile burst-oriented NXP PFE PMD driver
CONFIG_RTE_LIBRTE_PFE_PMD=n
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
@ -179,6 +161,10 @@ CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented HINIC PMD driver
CONFIG_RTE_LIBRTE_HINIC_PMD=n
# Compile burst-oriented HNS3 PMD driver
CONFIG_RTE_LIBRTE_HNS3_PMD=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
@ -204,22 +190,34 @@ CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented ICE PMD driver
CONFIG_RTE_LIBRTE_ICE_PMD=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n
# Compile burst-oriented IAVF PMD driver
CONFIG_RTE_LIBRTE_IAVF_PMD=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC=n
CONFIG_RTE_LIBRTE_IAVF_16BYTE_RX_DESC=n
# Compile burst-oriented IPN3KE PMD driver
CONFIG_RTE_LIBRTE_IPN3KE_PMD=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5,
# ConnectX-6 & BlueField (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
# Linking method for mlx4/5 dependency on ibverbs and related libraries
# Default linking is dynamic by linker.
# Other options are: dynamic by dlopen at run-time, or statically embedded.
CONFIG_RTE_IBVERBS_LINK_DLOPEN=n
CONFIG_RTE_IBVERBS_LINK_STATIC=n
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
@ -236,6 +234,8 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile software PMD backed by NFB device
CONFIG_RTE_LIBRTE_NFB_PMD=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
@ -248,6 +248,8 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile burst-oriented Marvell OCTEON TX2 network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX2_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
@ -267,6 +269,10 @@ CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile software PMD backed by AF_XDP sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_XDP=n
# Compile Memory Interface PMD driver (Linux only)
CONFIG_RTE_LIBRTE_PMD_MEMIF=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
@ -304,12 +310,17 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_LIBRTE_BBDEV_DEBUG=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
CONFIG_RTE_BBDEV_OFFLOAD_COST=y
CONFIG_RTE_BBDEV_SDK_AVX2=n
CONFIG_RTE_BBDEV_SDK_AVX512=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=n
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile PMD for Intel FPGA LTE FEC bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
@ -325,13 +336,15 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=n
# Compile PMD for Marvell OCTEON TX2 crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO=n
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
CONFIG_RTE_LIBRTE_PMD_QAT_ASYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
@ -358,6 +371,8 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile PMD for NITROX crypto device
CONFIG_RTE_LIBRTE_PMD_NITROX=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
@ -389,6 +404,8 @@ CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for octeontx2 sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
@ -398,7 +415,7 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_RAWDEV_MAX_DEVS=64
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
@ -406,8 +423,16 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile PMD for Intel IOAT raw device
CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV=n
# Compile PMD for octeontx2 DMA raw device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV=n
# Compile PMD for NTB raw device
CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_stack
CONFIG_RTE_LIBRTE_STACK=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
@ -419,6 +444,8 @@ CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile PMD for octeontx2 npa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
@ -450,6 +477,14 @@ CONFIG_RTE_LIBRTE_BITRATE=y
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_rcu
CONFIG_RTE_LIBRTE_RCU=n
CONFIG_RTE_LIBRTE_RCU_DEBUG=n
# Compile librte_rib
CONFIG_RTE_LIBRTE_RIB=n
# Compile librte_fib
CONFIG_RTE_LIBRTE_FIB=n
CONFIG_RTE_LIBRTE_FIB_DEBUG=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
@ -501,7 +536,6 @@ CONFIG_RTE_PIPELINE_STATS_COLLECT=n
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
@ -520,31 +554,39 @@ CONFIG_RTE_LIBRTE_IFC_PMD=n
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile librte_ipsec
CONFIG_RTE_LIBRTE_IPSEC=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
CONFIG_RTE_PROC_INFO=n
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. compression performance application
CONFIG_RTE_APP_COMPRESS_PERF=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUX=y
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
# HINIC PMD driver
# Hisilicon HNS3 PMD driver
# Compile PMD for Intel FPGA raw device
# To compile, CONFIG_RTE_EAL_VFIO should be enabled.
CONFIG_RTE_ARCH_PPC_64=y
CONFIG_RTE_ARCH_64=y
CONFIG_RTE_TOOLCHAIN_GCC=y
# Note: Power doesn't have this support
# Note: Initially, all of architecture we compile for. PMD drivers compilation are turned off on Power
# Will turn on them only after architecture we compile for. successful testing on Power
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -1,11 +1,19 @@
# -*- cfg-sha: 2ba93102021dc5d38494cf5090c3ecaca37db13153dd558b1511a56f2a3d9b10
# -*- cfg-sha: fa840957ea3dbcff81dd55098161c11d90c3cad399b89e6089f793e7362a5c48
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
# String that appears before the version number
CONFIG_RTE_VER_PREFIX="DPDK"
# Version information completed when this file is processed for a build
CONFIG_RTE_VER_YEAR=19
CONFIG_RTE_VER_MONTH=11
CONFIG_RTE_VER_MINOR=2
CONFIG_RTE_VER_SUFFIX=""
CONFIG_RTE_VER_RELEASE=99
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="x86_64"
@ -19,12 +27,12 @@ CONFIG_RTE_TOOLCHAIN="gcc"
CONFIG_RTE_FORCE_INTRINSICS=n
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Enable link time optimization
CONFIG_RTE_ENABLE_LTO=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=64
# Memory model
@ -55,7 +63,6 @@ CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
@ -69,6 +76,8 @@ CONFIG_RTE_USE_LIBBSD=n
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Use ARM LSE ATOMIC instructions
CONFIG_RTE_ARM_FEATURE_ATOMICS=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
@ -120,12 +129,8 @@ CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
CONFIG_RTE_LIBRTE_BNXT_PMD=y
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# Compile burst-oriented NXP PFE PMD driver
CONFIG_RTE_LIBRTE_PFE_PMD=n
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
@ -156,6 +161,10 @@ CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented HINIC PMD driver
CONFIG_RTE_LIBRTE_HINIC_PMD=n
# Compile burst-oriented HNS3 PMD driver
CONFIG_RTE_LIBRTE_HNS3_PMD=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=y
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
@ -181,22 +190,34 @@ CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented ICE PMD driver
CONFIG_RTE_LIBRTE_ICE_PMD=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ICE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ICE_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_ICE_16BYTE_RX_DESC=n
# Compile burst-oriented IAVF PMD driver
CONFIG_RTE_LIBRTE_IAVF_PMD=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC=n
CONFIG_RTE_LIBRTE_IAVF_16BYTE_RX_DESC=n
# Compile burst-oriented IPN3KE PMD driver
CONFIG_RTE_LIBRTE_IPN3KE_PMD=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=y
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=y
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5,
# ConnectX-6 & BlueField (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=y
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=y
# Linking method for mlx4/5 dependency on ibverbs and related libraries
# Default linking is dynamic by linker.
# Other options are: dynamic by dlopen at run-time, or statically embedded.
CONFIG_RTE_IBVERBS_LINK_DLOPEN=n
CONFIG_RTE_IBVERBS_LINK_STATIC=n
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=y
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
@ -213,6 +234,8 @@ CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile software PMD backed by NFB device
CONFIG_RTE_LIBRTE_NFB_PMD=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
@ -225,6 +248,8 @@ CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile burst-oriented Marvell OCTEON TX2 network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX2_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
@ -244,6 +269,10 @@ CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile software PMD backed by AF_XDP sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_XDP=n
# Compile Memory Interface PMD driver (Linux only)
CONFIG_RTE_LIBRTE_PMD_MEMIF=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
@ -281,12 +310,17 @@ CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_LIBRTE_BBDEV_DEBUG=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
CONFIG_RTE_BBDEV_OFFLOAD_COST=y
CONFIG_RTE_BBDEV_SDK_AVX2=n
CONFIG_RTE_BBDEV_SDK_AVX512=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=n
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile PMD for Intel FPGA LTE FEC bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_FPGA_LTE_FEC=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
@ -302,13 +336,15 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=n
# Compile PMD for Marvell OCTEON TX2 crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO=n
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
CONFIG_RTE_LIBRTE_PMD_QAT_ASYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
@ -335,6 +371,8 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile PMD for NITROX crypto device
CONFIG_RTE_LIBRTE_PMD_NITROX=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
@ -366,6 +404,8 @@ CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for octeontx2 sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
@ -375,7 +415,7 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_RAWDEV_MAX_DEVS=64
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
@ -383,8 +423,16 @@ CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile PMD for Intel IOAT raw device
CONFIG_RTE_LIBRTE_PMD_IOAT_RAWDEV=n
# Compile PMD for octeontx2 DMA raw device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_DMA_RAWDEV=n
# Compile PMD for NTB raw device
CONFIG_RTE_LIBRTE_PMD_NTB_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_stack
CONFIG_RTE_LIBRTE_STACK=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
@ -396,6 +444,8 @@ CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile PMD for octeontx2 npa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX2_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
@ -427,6 +477,14 @@ CONFIG_RTE_LIBRTE_BITRATE=y
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_rcu
CONFIG_RTE_LIBRTE_RCU=n
CONFIG_RTE_LIBRTE_RCU_DEBUG=n
# Compile librte_rib
CONFIG_RTE_LIBRTE_RIB=n
# Compile librte_fib
CONFIG_RTE_LIBRTE_FIB=n
CONFIG_RTE_LIBRTE_FIB_DEBUG=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
@ -478,7 +536,6 @@ CONFIG_RTE_PIPELINE_STATS_COLLECT=n
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
@ -497,29 +554,37 @@ CONFIG_RTE_LIBRTE_IFC_PMD=n
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile librte_ipsec
CONFIG_RTE_LIBRTE_IPSEC=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
CONFIG_RTE_PROC_INFO=n
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. compression performance application
CONFIG_RTE_APP_COMPRESS_PERF=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUX=y
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
# HINIC PMD driver
# Hisilicon HNS3 PMD driver
# Compile PMD for Intel FPGA raw device
# To compile, CONFIG_RTE_EAL_VFIO should be enabled.
CONFIG_RTE_ARCH_X86_64=y
CONFIG_RTE_ARCH_X86=y
CONFIG_RTE_ARCH_64=y
CONFIG_RTE_TOOLCHAIN_GCC=y
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -4,12 +4,12 @@
%bcond_without tools
# Dont edit Version: and Release: directly, only these:
#% define commit0 0da7f445df445630c794897347ee360d6fe6348b
#% define date 20181127
#% define commit0 7001c8fdb27357c67147c0a13cb3826e48c0f2bf
#% define date 20191128
#% define shortcommit0 %(c=%{commit0}; echo ${c:0:7})
%define ver 18.11.2
%define rel 3
%define ver 19.11.2
%define rel 1
%define srcname dpdk-stable
@ -39,31 +39,6 @@ Source506: x86_64-native-linuxapp-gcc-config
# Patches only in dpdk package
# Bug 1525039
Patch10: 0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch
# Bug 1700373
Patch11: 0001-net-virtio-add-packed-virtqueue-defines.patch
Patch12: 0002-net-virtio-add-packed-virtqueue-helpers.patch
Patch13: 0003-net-virtio-vring-init-for-packed-queues.patch
Patch14: 0004-net-virtio-dump-packed-virtqueue-data.patch
Patch15: 0005-net-virtio-implement-Tx-path-for-packed-queues.patch
Patch16: 0006-net-virtio-implement-Rx-path-for-packed-queues.patch
Patch17: 0007-net-virtio-support-packed-queue-in-send-command.patch
Patch18: 0008-net-virtio-user-add-option-to-use-packed-queues.patch
Patch19: 0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch
Patch20: 0010-net-virtio-enable-packed-virtqueues-by-default.patch
Patch21: 0011-net-virtio-avoid-double-accounting-of-bytes.patch
Patch22: 0012-net-virtio-user-fix-packed-vq-option-parsing.patch
Patch23: 0013-net-virtio-user-fix-supported-features-list.patch
Patch24: 0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch
Patch25: 0015-net-virtio-user-support-control-VQ-for-packed.patch
Patch26: 0016-net-virtio-fix-control-VQ.patch
Patch27: 0017-net-virtio-user-fix-control-VQ.patch
Patch28: 0018-vhost-batch-used-descs-chains-write-back-with-packed.patch
Patch29: 0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch
Patch30: 0020-net-virtio-fix-calculation-of-device_event-ptr.patch
Summary: Set of libraries and drivers for fast packet processing
#
@ -122,7 +97,6 @@ BuildRequires: gcc, kernel-headers, zlib-devel, numactl-devel
BuildRequires: doxygen, %{_py}-devel, %{_py}-sphinx
%ifarch x86_64
BuildRequires: rdma-core-devel >= 15 libmnl-devel
%global __requires_exclude_from ^%{_libdir}/librte_pmd_mlx[45]_glue\.so.*$
%endif
%description
@ -132,6 +106,9 @@ fast packet processing in the user space.
%package devel
Summary: Data Plane Development Kit development files
Requires: %{name}%{?_isa} = %{version}-%{release}
%ifarch x86_64
Requires: rdma-core-devel libmnl-devel
%endif
%description devel
This package contains the headers and other files needed for developing
@ -187,7 +164,7 @@ rm -f obj.o
export EXTRA_CFLAGS="$(echo %{optflags} | sed -e 's:-Wall::g' -e 's:-march=[[:alnum:]]* ::g') -Wformat -fPIC %{_hardening_ldflags}"
export EXTRA_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-Wl,//g' -e's/-spec.*//')
export HOST_EXTRA_CFLAGS="$EXTRA_CFLAGS $EXTRA_RPM_LDFLAGS"
export EXTRA_HOST_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-spec.*//')
export EXTRA_HOST_LDFLAGS="$EXTRA_RPM_LDFLAGS $(echo %{__global_ldflags} | sed -e's/-spec.*//')"
# DPDK defaults to using builder-specific compiler flags. However,
# the config has been changed by specifying CONFIG_RTE_MACHINE=default
@ -200,7 +177,7 @@ make V=1 O=%{target} T=%{target} %{?_smp_mflags} config
cp -f %{SOURCE500} %{SOURCE502} "%{_sourcedir}/%{target}-config" .
%{SOURCE502} %{target}-config "%{target}/.config"
make V=1 O=%{target} %{?_smp_mflags}
make V=1 O=%{target} %{?_smp_mflags}
# Creating PDF's has excessive build-requirements, html docs suffice fine
make V=1 O=%{target} %{?_smp_mflags} doc-api-html doc-guides-html
@ -223,15 +200,6 @@ find %{buildroot}%{sdkdir}/ -name "*.py" -exec \
mkdir -p %{buildroot}/%{pmddir}
for f in %{buildroot}/%{_libdir}/*_pmd_*.so.*; do
bn=$(basename ${f})
%ifarch x86_64
case $bn in
librte_pmd_mlx[45]_glue.so.*)
mkdir -p %{buildroot}/%{pmddir}-glue
ln -s ../${bn} %{buildroot}%{pmddir}-glue/${bn}
continue
;;
esac
%endif
ln -s ../${bn} %{buildroot}%{pmddir}/${bn}
done
@ -239,8 +207,10 @@ done
rm -rf %{buildroot}%{sdkdir}/usertools
rm -rf %{buildroot}%{_sbindir}/dpdk-devbind
%endif
rm -f %{buildroot}%{sdkdir}/usertools/dpdk-pmdinfo.py
rm -f %{buildroot}%{sdkdir}/usertools/dpdk-setup.sh
rm -f %{buildroot}%{sdkdir}/usertools/meson.build
rm -f %{buildroot}%{_bindir}/dpdk-pdump
rm -f %{buildroot}%{_bindir}/dpdk-pmdinfo
rm -f %{buildroot}%{_bindir}/dpdk-test-crypto-perf
rm -f %{buildroot}%{_bindir}/dpdk-test-eventdev
@ -255,6 +225,10 @@ done
rm -rf %{buildroot}%{sdkdir}/examples
%endif
# Due to RPM limitations delete the backwards compatibility symlinks
rm -f %{buildroot}%{sdkdir}/mk/exec-env/bsdapp
rm -f %{buildroot}%{sdkdir}/mk/exec-env/linuxapp
# Setup RTE_SDK environment as expected by apps etc
mkdir -p %{buildroot}/%{_sysconfdir}/profile.d
cat << EOF > %{buildroot}/%{_sysconfdir}/profile.d/dpdk-sdk-%{_arch}.sh
@ -280,15 +254,9 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
# BSD
%doc README MAINTAINERS
%{_bindir}/testpmd
%{_bindir}/dpdk-procinfo
%{_bindir}/dpdk-pdump
%dir %{pmddir}
%{_libdir}/*.so.*
%{pmddir}/*.so.*
%ifarch x86_64
%dir %{pmddir}-glue
%{pmddir}-glue/*.so.*
%endif
%files doc
#BSD
@ -310,8 +278,6 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
%{_libdir}/*.so
%if %{with examples}
%files examples
%exclude %{_bindir}/dpdk-procinfo
%exclude %{_bindir}/dpdk-pdump
%{_bindir}/dpdk-*
%doc %{sdkdir}/examples/
%endif
@ -323,6 +289,31 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
%endif
%changelog
* Wed May 20 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11.2-1
- Rebase DPDK to 19.11.2 (#1836830, #1837024, #1837030, #1837022)
* Fri Apr 17 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11.1-1
- Rebase DPDK to 19.11.1 (#1824905)
- Remove dpdk-pmdinfo.py (#1801361)
- Add Requires: rdma-core-devel libmnl-devel on x86_64 for dpdk-devel (#1813252)
* Thu Feb 20 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-4
- Remove MLX{4,5} glue libraries since RHEL 8 ships the correct libibverbs
library. (#1805140)
* Mon Feb 17 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-3
- Remove /usr/share/dpdk/mk/exec-env/{bsd,linux}app symlinks (#1773889)
* Thu Feb 13 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-2
- Add pretrans to handle /usr/share/dpdk/mk/exec-env/{bsd,linux}app (#1773889)
* Thu Nov 28 2019 David Marchand <david.marchand@redhat.com> - 19.11-1
- Rebase to 19.11 (#1773889)
- Remove dpdk-pdump (#1779229)
* Mon Nov 04 2019 Timothy Redaelli <tredaelli@redhat.com> - 18.11.2-4
- Pass the correct LDFLAGS to host apps (dpdk-pmdinfogen) too (#1755538)
* Mon Sep 16 2019 Jens Freimann <jfreimann@redhat.com> - 18.11.2-3
- Add fix for wrong pointer calculation to fix Covscan issue
- https://cov01.lab.eng.brq.redhat.com/covscanhub/task/135452/log/added.html
@ -538,7 +529,7 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
- New snapshot
- Add spec option for enabling vhost-user instead of vhost-cuse
- Build requires fuse-devel only with vhost-cuse
- Add virtual provide for vhost user/cuse tracking
- Add virtual provide for vhost user/cuse tracking
* Fri Mar 27 2015 Panu Matilainen <pmatilai@redhat.com> - 2.0.0-0.2038.git91a8743e.3
- Disable vhost-user for now to get vhost-cuse support, argh.
@ -695,7 +686,7 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
- Remove ix86 from ExclusiveArch -- it does not build with above changes
* Thu Jul 10 2014 - Neil Horman <nhorman@tuxdriver.com> - 1.7.0-1.0
- Update source to official 1.7.0 release
- Update source to official 1.7.0 release
* Thu Jul 03 2014 - Neil Horman <nhorman@tuxdriver.com>
- Fixing up release numbering