Compare commits

...

7 Commits

Author SHA1 Message Date
eabdullin d00e656bc5 import CS dpdk-23.11-1.el8 2024-05-22 10:42:15 +00:00
CentOS Sources b13a48cb43 import dpdk-21.11-3.el8 2023-05-16 07:00:31 +00:00
CentOS Sources b3fb893773 import dpdk-21.11-2.el8_7 2023-01-16 09:02:35 +00:00
CentOS Sources 1cde36c233 import dpdk-21.11-1.el8 2022-05-10 07:22:17 +00:00
CentOS Sources 2b29899c87 import dpdk-20.11-3.el8 2021-09-09 16:11:34 +00:00
CentOS Sources 4dd2700a76 import dpdk-19.11.3-1.el8 2021-09-09 16:11:31 +00:00
CentOS Sources 36880af702 import dpdk-19.11-4.el8 2021-09-09 16:11:28 +00:00
30 changed files with 218 additions and 5206 deletions

View File

@ -1 +1,2 @@
6e04c3e3a82f91ebe0360b8067df59e2b774924d SOURCES/dpdk-18.11.2.tar.xz
061198752d3d8b64d33113b7c8c1e272c973403d SOURCES/dpdk-23.11.tar.xz
3cc45b133677fbff08e89e65a2120be52ebb27a5 SOURCES/pyelftools-0.27.tar.gz

3
.gitignore vendored
View File

@ -1 +1,2 @@
SOURCES/dpdk-18.11.2.tar.xz
SOURCES/dpdk-23.11.tar.xz
SOURCES/pyelftools-0.27.tar.gz

View File

@ -1,102 +0,0 @@
From 93f21370ca38ae61dc2d938adf569f6668381c32 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:30 +0100
Subject: [PATCH 01/18] net/virtio: add packed virtqueue defines
[ upstream commit 4c3f5822eb21476fbbd807a7c40584c1090695e5 ]
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 4c3f5822eb21476fbbd807a7c40584c1090695e5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_pci.h | 1 +
drivers/net/virtio/virtio_ring.h | 30 ++++++++++++++++++++++++++++++
drivers/net/virtio/virtqueue.h | 6 ++++++
3 files changed, 37 insertions(+)
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index e961a58ca..4c975a531 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -113,6 +113,7 @@ struct virtnet_ctl;
#define VIRTIO_F_VERSION_1 32
#define VIRTIO_F_IOMMU_PLATFORM 33
+#define VIRTIO_F_RING_PACKED 34
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 9e3c2a015..464449074 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -15,6 +15,10 @@
#define VRING_DESC_F_WRITE 2
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/* This flag means the descriptor was made available by the driver */
+#define VRING_DESC_F_AVAIL(b) ((uint16_t)(b) << 7)
+/* This flag means the descriptor was used by the device */
+#define VRING_DESC_F_USED(b) ((uint16_t)(b) << 15)
/* The Host uses this in used->flags to advise the Guest: don't kick me
* when you add a buffer. It's unreliable, so it's simply an
@@ -54,6 +58,32 @@ struct vring_used {
struct vring_used_elem ring[0];
};
+/* For support of packed virtqueues in Virtio 1.1 the format of descriptors
+ * looks like this.
+ */
+struct vring_packed_desc {
+ uint64_t addr;
+ uint32_t len;
+ uint16_t id;
+ uint16_t flags;
+};
+
+#define RING_EVENT_FLAGS_ENABLE 0x0
+#define RING_EVENT_FLAGS_DISABLE 0x1
+#define RING_EVENT_FLAGS_DESC 0x2
+struct vring_packed_desc_event {
+ uint16_t desc_event_off_wrap;
+ uint16_t desc_event_flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+ struct vring_packed_desc *desc_packed;
+ struct vring_packed_desc_event *driver_event;
+ struct vring_packed_desc_event *device_event;
+
+};
+
struct vring {
unsigned int num;
struct vring_desc *desc;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 2e2abf15b..1525c7d10 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -161,11 +161,17 @@ struct virtio_pmd_ctrl {
struct vq_desc_extra {
void *cookie;
uint16_t ndescs;
+ uint16_t next;
};
struct virtqueue {
struct virtio_hw *hw; /**< virtio_hw structure pointer. */
struct vring vq_ring; /**< vring keeping desc, used and avail */
+ struct vring_packed ring_packed; /**< vring keeping descs */
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+ uint16_t event_flags_shadow;
+ uint16_t avail_used_flags;
/**
* Last consumed descriptor in the used table,
* trails vq_ring.used->idx.
--
2.21.0

View File

@ -1,78 +0,0 @@
From 8093f82b3e52efe012e46c429b7af4e82492f71c Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Tue, 27 Nov 2018 11:54:27 +0100
Subject: [PATCH] net/virtio: allocate vrings on device NUMA node
[ upstream commit 4a5140ab17d29e77eefa47b5cb514238e8e0c132 ]
When a guest is spanned on multiple NUMA nodes and
multiple Virtio devices are spanned onto these nodes,
we expect that their ring memory is allocated in the
right memory node.
Otherwise, vCPUs from node A may be polling Virtio rings
allocated on node B, which would increase QPI bandwidth
and impact performance.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d291..cb2b2e0bf 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -335,8 +335,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
int ret;
+ int numa_node = dev->device->numa_node;
- PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+ PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
+ vtpci_queue_idx, numa_node);
/*
* Read the virtqueue size from the Queue Size field
@@ -372,7 +374,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ numa_node);
if (vq == NULL) {
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
@@ -392,7 +394,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
@@ -418,7 +420,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
@@ -435,7 +437,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
sizeof(vq->sw_ring[0]);
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ RTE_CACHE_LINE_SIZE, numa_node);
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
--
2.20.1

View File

@ -1,141 +0,0 @@
From 652a2e3a1ba0db81ae1814e8c3cb989e9e89c4e0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:31 +0100
Subject: [PATCH 02/18] net/virtio: add packed virtqueue helpers
[ upstream commit e9f4feb7e6225f671b59375aff44b9d576121577 ]
Add helper functions to set/clear and check descriptor flags.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit e9f4feb7e6225f671b59375aff44b9d576121577)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_pci.h | 6 +++
drivers/net/virtio/virtqueue.h | 72 ++++++++++++++++++++++++++++++++-
2 files changed, 76 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 4c975a531..b22b62dad 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -315,6 +315,12 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
return (hw->guest_features & (1ULL << bit)) != 0;
}
+static inline int
+vtpci_packed_queue(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED);
+}
+
/*
* Function declaration from virtio_pci.c
*/
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 1525c7d10..c32812427 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -251,6 +251,31 @@ struct virtio_tx_region {
__attribute__((__aligned__(16)));
};
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+ uint16_t used, avail, flags;
+
+ flags = desc->flags;
+ used = !!(flags & VRING_DESC_F_USED(1));
+ avail = !!(flags & VRING_DESC_F_AVAIL(1));
+
+ return avail == used && used == vq->used_wrap_counter;
+}
+
+
+static inline void
+vring_desc_init_packed(struct virtqueue *vq, int n)
+{
+ int i;
+ for (i = 0; i < n - 1; i++) {
+ vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_descx[i].next = i + 1;
+ }
+ vq->ring_packed.desc_packed[i].id = i;
+ vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
+}
+
/* Chain all the descriptors in the ring with an END */
static inline void
vring_desc_init(struct vring_desc *dp, uint16_t n)
@@ -262,13 +287,53 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
dp[i].next = VQ_RING_DESC_CHAIN_END;
}
+/**
+ * Tell the backend not to interrupt us.
+ */
+static inline void
+virtqueue_disable_intr_packed(struct virtqueue *vq)
+{
+ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
+
+ *event_flags = RING_EVENT_FLAGS_DISABLE;
+}
+
+
/**
* Tell the backend not to interrupt us.
*/
static inline void
virtqueue_disable_intr(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (vtpci_packed_queue(vq->hw))
+ virtqueue_disable_intr_packed(vq);
+ else
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for packed virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_packed(struct virtqueue *vq)
+{
+ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
+
+
+ if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+ virtio_wmb();
+ vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+ *event_flags = vq->event_flags_shadow;
+ }
+}
+
+/**
+ * Tell the backend to interrupt. Implementation for split virtqueues.
+ */
+static inline void
+virtqueue_enable_intr_split(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
}
/**
@@ -277,7 +342,10 @@ virtqueue_disable_intr(struct virtqueue *vq)
static inline void
virtqueue_enable_intr(struct virtqueue *vq)
{
- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+ if (vtpci_packed_queue(vq->hw))
+ virtqueue_enable_intr_packed(vq);
+ else
+ virtqueue_enable_intr_split(vq);
}
/**
--
2.21.0

View File

@ -1,175 +0,0 @@
From 4e832cad1879f87a694e2f78b8718f986f7c76e2 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:32 +0100
Subject: [PATCH 03/18] net/virtio: vring init for packed queues
[ upstream commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f ]
Add and initialize descriptor data structures.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 32 ++++++++++++++++++++----------
drivers/net/virtio/virtio_ring.h | 28 ++++++++++++++++++++++----
drivers/net/virtio/virtqueue.h | 2 +-
3 files changed, 46 insertions(+), 16 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 2ba66d291..ee52e3cdb 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -299,20 +299,22 @@ virtio_init_vring(struct virtqueue *vq)
PMD_INIT_FUNC_TRACE();
- /*
- * Reinitialise since virtio port might have been stopped and restarted
- */
memset(ring_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- vring_desc_init(vr->desc, size);
-
+ if (vtpci_packed_queue(vq->hw)) {
+ vring_init_packed(&vq->ring_packed, ring_mem,
+ VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ } else {
+ vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_split(vr->desc, size);
+ }
/*
* Disable device(host) interrupting guest
*/
@@ -382,11 +384,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
+ vq->event_flags_shadow = 0;
+ if (vtpci_packed_queue(hw)) {
+ vq->avail_wrap_counter = 1;
+ vq->used_wrap_counter = 1;
+ }
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
@@ -489,7 +496,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
+ vring_desc_init_split(start_dp,
+ RTE_DIM(txr[i].tx_indir));
/* first indirect descriptor is always the tx header */
start_dp->addr = txvq->virtio_net_hdr_mem
@@ -1486,7 +1494,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
@@ -1906,7 +1915,8 @@ virtio_dev_configure(struct rte_eth_dev *dev)
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) &&
+ !vtpci_packed_queue(hw)) {
hw->use_inorder_rx = 1;
hw->use_simple_rx = 0;
} else {
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 464449074..1760823c6 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -125,10 +125,18 @@ struct vring {
#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
static inline size_t
-vring_size(unsigned int num, unsigned long align)
+vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align)
{
size_t size;
+ if (vtpci_packed_queue(hw)) {
+ size = num * sizeof(struct vring_packed_desc);
+ size += sizeof(struct vring_packed_desc_event);
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_packed_desc_event);
+ return size;
+ }
+
size = num * sizeof(struct vring_desc);
size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
size = RTE_ALIGN_CEIL(size, align);
@@ -136,10 +144,9 @@ vring_size(unsigned int num, unsigned long align)
(num * sizeof(struct vring_used_elem));
return size;
}
-
static inline void
-vring_init(struct vring *vr, unsigned int num, uint8_t *p,
- unsigned long align)
+vring_init_split(struct vring *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
{
vr->num = num;
vr->desc = (struct vring_desc *) p;
@@ -149,6 +156,19 @@ vring_init(struct vring *vr, unsigned int num, uint8_t *p,
RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
}
+static inline void
+vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
+ unsigned int num)
+{
+ vr->num = num;
+ vr->desc_packed = (struct vring_packed_desc *)p;
+ vr->driver_event = (struct vring_packed_desc_event *)(p +
+ vr->num * sizeof(struct vring_packed_desc));
+ vr->device_event = (struct vring_packed_desc_event *)
+ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
+ sizeof(struct vring_packed_desc_event)), align);
+}
+
/*
* The following is used with VIRTIO_RING_F_EVENT_IDX.
* Assuming a given event_idx value from the other size, if we have
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index c32812427..d08ef9112 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -278,7 +278,7 @@ vring_desc_init_packed(struct virtqueue *vq, int n)
/* Chain all the descriptors in the ring with an END */
static inline void
-vring_desc_init(struct vring_desc *dp, uint16_t n)
+vring_desc_init_split(struct vring_desc *dp, uint16_t n)
{
uint16_t i;
--
2.21.0

View File

@ -1,41 +0,0 @@
From 2dc70f1db67091cc3a9131d2093da464738b31d8 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:33 +0100
Subject: [PATCH 04/18] net/virtio: dump packed virtqueue data
[ upstream commit 56785a2d6fad987c025278909307db776df59bd9 ]
Add support to dump packed virtqueue data to the
VIRTQUEUE_DUMP() macro.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 56785a2d6fad987c025278909307db776df59bd9)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtqueue.h | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index d08ef9112..e9c35a553 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -434,6 +434,15 @@ virtqueue_notify(struct virtqueue *vq)
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \
nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ if (vtpci_packed_queue((vq)->hw)) { \
+ PMD_INIT_LOG(DEBUG, \
+ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
+ "VQ: - avail_wrap_counter=%d; used_wrap_counter=%d", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
+ (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \
+ (vq)->used_wrap_counter); \
+ break; \
+ } \
PMD_INIT_LOG(DEBUG, \
"VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
" avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
--
2.21.0

View File

@ -1,448 +0,0 @@
From 97ee69c836bfb08e674fd0f28d1fc7a14f2d4de0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:34 +0100
Subject: [PATCH 05/18] net/virtio: implement Tx path for packed queues
[ upstream commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e ]
This implements the transmit path for devices with
support for packed virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 56 ++++---
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 236 ++++++++++++++++++++++++++++-
drivers/net/virtio/virtqueue.h | 20 ++-
4 files changed, 292 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index ee52e3cdb..6023d6f2c 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -388,6 +388,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
if (vtpci_packed_queue(hw)) {
vq->avail_wrap_counter = 1;
vq->used_wrap_counter = 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
}
/*
@@ -495,17 +498,26 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
-
- vring_desc_init_split(start_dp,
- RTE_DIM(txr[i].tx_indir));
+ struct vring_packed_desc *start_dp_packed =
+ txr[i].tx_indir_pq;
/* first indirect descriptor is always the tx header */
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
-
- start_dp->len = hw->vtnet_hdr_size;
- start_dp->flags = VRING_DESC_F_NEXT;
+ if (vtpci_packed_queue(hw)) {
+ start_dp_packed->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp_packed->len = hw->vtnet_hdr_size;
+ } else {
+ vring_desc_init_split(start_dp,
+ RTE_DIM(txr[i].tx_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
+ start_dp->flags = VRING_DESC_F_NEXT;
+ }
}
}
@@ -1334,6 +1346,23 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ if (vtpci_packed_queue(hw)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ } else {
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
+ }
+
if (hw->use_simple_rx) {
PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
eth_dev->data->port_id);
@@ -1354,15 +1383,6 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_inorder_tx) {
- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
- } else {
- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts;
- }
}
/* Only support 1:1 queue/interrupt mapping so far.
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index e0f80e5a4..05d355180 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433e..ab74917a8 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -88,6 +88,23 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
dp->next = VQ_RING_DESC_CHAIN_END;
}
+static void
+vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
+{
+ struct vq_desc_extra *dxp;
+
+ dxp = &vq->vq_descx[id];
+ vq->vq_free_cnt += dxp->ndescs;
+
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_head_idx = id;
+ else
+ vq->vq_descx[vq->vq_desc_tail_idx].next = id;
+
+ vq->vq_desc_tail_idx = id;
+ dxp->next = VQ_RING_DESC_CHAIN_END;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -165,6 +182,33 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
#endif
/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq, int num)
+{
+ uint16_t used_idx, id;
+ uint16_t size = vq->vq_nentries;
+ struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct vq_desc_extra *dxp;
+
+ used_idx = vq->vq_used_cons_idx;
+ while (num-- && desc_is_used(&desc[used_idx], vq)) {
+ used_idx = vq->vq_used_cons_idx;
+ id = desc[used_idx].id;
+ dxp = &vq->vq_descx[id];
+ vq->vq_used_cons_idx += dxp->ndescs;
+ if (vq->vq_used_cons_idx >= size) {
+ vq->vq_used_cons_idx -= size;
+ vq->used_wrap_counter ^= 1;
+ }
+ vq_ring_free_id_packed(vq, id);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ used_idx = vq->vq_used_cons_idx;
+ }
+}
+
static void
virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
@@ -456,6 +500,107 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
}
+static inline void
+virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+ uint16_t needed, int can_push)
+{
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ struct vq_desc_extra *dxp;
+ struct virtqueue *vq = txvq->vq;
+ struct vring_packed_desc *start_dp, *head_dp;
+ uint16_t idx, id, head_idx, head_flags;
+ uint16_t head_size = vq->hw->vtnet_hdr_size;
+ struct virtio_net_hdr *hdr;
+ uint16_t prev;
+
+ id = vq->vq_desc_head_idx;
+
+ dxp = &vq->vq_descx[id];
+ dxp->ndescs = needed;
+ dxp->cookie = cookie;
+
+ head_idx = vq->vq_avail_idx;
+ idx = head_idx;
+ prev = head_idx;
+ start_dp = vq->ring_packed.desc_packed;
+
+ head_dp = &vq->ring_packed.desc_packed[idx];
+ head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ head_flags |= vq->avail_used_flags;
+
+ if (can_push) {
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookie, head_size);
+ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
+ * which is wrong. Below subtract restores correct pkt size.
+ */
+ cookie->pkt_len -= head_size;
+
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (!vq->hw->has_tx_offload) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
+ } else {
+ /* setup first tx ring slot to point to header
+ * stored in reserved region.
+ */
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ start_dp[idx].len = vq->hw->vtnet_hdr_size;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ }
+ }
+
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+
+ do {
+ uint16_t flags;
+
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
+ if (likely(idx != head_idx)) {
+ flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+ flags |= vq->avail_used_flags;
+ start_dp[idx].flags = flags;
+ }
+ prev = idx;
+ idx++;
+ if (idx >= vq->vq_nentries) {
+ idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ }
+ } while ((cookie = cookie->next) != NULL);
+
+ start_dp[prev].id = id;
+
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
+
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
+
+ vq->vq_avail_idx = idx;
+
+ rte_smp_wmb();
+ head_dp->flags = head_flags;
+}
+
static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push,
@@ -733,8 +878,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- if (hw->use_inorder_tx)
- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ if (!vtpci_packed_queue(hw)) {
+ if (hw->use_inorder_tx)
+ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+ }
VIRTQUEUE_DUMP(vq);
@@ -1346,6 +1493,91 @@ virtio_recv_mergeable_pkts(void *rx_queue,
return nb_rx;
}
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ uint16_t hdr_size = hw->vtnet_hdr_size;
+ uint16_t nb_tx = 0;
+ int error;
+
+ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
+ return nb_tx;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+ if (nb_pkts > vq->vq_free_cnt)
+ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx];
+ int can_push = 0, slots, need;
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ continue;
+ }
+ }
+
+ /* optimize ring usage */
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
+ rte_mbuf_refcnt_read(txm) == 1 &&
+ RTE_MBUF_DIRECT(txm) &&
+ txm->nb_segs == 1 &&
+ rte_pktmbuf_headroom(txm) >= hdr_size &&
+ rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+ __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
+ can_push = 1;
+
+ /* How many main ring entries are needed to this Tx?
+ * any_layout => number of segments
+ * default => number of segments + 1
+ */
+ slots = txm->nb_segs + !can_push;
+ need = slots - vq->vq_free_cnt;
+
+ /* Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_pkts);
+ virtio_xmit_cleanup_packed(vq, need);
+ need = slots - vq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to transmit");
+ break;
+ }
+ }
+
+ /* Enqueue Packet buffers */
+ virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
+
+ txvq->stats.bytes += txm->pkt_len;
+ virtio_update_packet_stats(&txvq->stats, txm);
+ }
+
+ txvq->stats.packets += nb_tx;
+
+ if (likely(nb_tx)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
+
uint16_t
virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index e9c35a553..b142fd488 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -247,8 +247,12 @@ struct virtio_net_hdr_mrg_rxbuf {
#define VIRTIO_MAX_TX_INDIRECT 8
struct virtio_tx_region {
struct virtio_net_hdr_mrg_rxbuf tx_hdr;
- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
- __attribute__((__aligned__(16)));
+ union {
+ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT]
+ __attribute__((__aligned__(16)));
+ };
};
static inline int
@@ -380,6 +384,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
+void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx,
uint16_t num);
@@ -418,6 +423,17 @@ virtqueue_kick_prepare(struct virtqueue *vq)
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
+static inline int
+virtqueue_kick_prepare_packed(struct virtqueue *vq)
+{
+ uint16_t flags;
+
+ virtio_mb();
+ flags = vq->ring_packed.device_event->desc_event_flags;
+
+ return flags != RING_EVENT_FLAGS_DISABLE;
+}
+
static inline void
virtqueue_notify(struct virtqueue *vq)
{
--
2.21.0

View File

@ -1,613 +0,0 @@
From a1168f29a051eba2344407d72267b5d5f648d80c Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:35 +0100
Subject: [PATCH 06/18] net/virtio: implement Rx path for packed queues
[ upstream commit a76290c8f1cf9c4774c23592921302a04a90bded ]
Implement the receive part.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit a76290c8f1cf9c4774c23592921302a04a90bded)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 56 +++--
drivers/net/virtio/virtio_ethdev.h | 5 +
drivers/net/virtio/virtio_rxtx.c | 375 ++++++++++++++++++++++++++++-
drivers/net/virtio/virtqueue.c | 43 +++-
4 files changed, 457 insertions(+), 22 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 6023d6f2c..4ef1da393 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1363,24 +1363,40 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
}
}
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else if (hw->use_inorder_rx) {
- PMD_INIT_LOG(INFO,
- "virtio: using inorder mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO,
- "virtio: using mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_packed;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
} else {
- PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_inorder;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
}
}
@@ -1944,6 +1960,12 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
}
+ if (vtpci_packed_queue(hw)) {
+ hw->use_simple_rx = 0;
+ hw->use_inorder_rx = 0;
+ hw->use_inorder_tx = 0;
+ }
+
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_simple_rx = 0;
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 05d355180..88b8c42a3 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -73,10 +73,15 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index ab74917a8..0bcf3b08a 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -31,6 +31,7 @@
#include "virtqueue.h"
#include "virtio_rxtx.h"
#include "virtio_rxtx_simple.h"
+#include "virtio_ring.h"
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
@@ -105,6 +106,47 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
dxp->next = VQ_RING_DESC_CHAIN_END;
}
+static uint16_t
+virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t *len,
+ uint16_t num)
+{
+ struct rte_mbuf *cookie;
+ uint16_t used_idx;
+ uint16_t id;
+ struct vring_packed_desc *desc;
+ uint16_t i;
+
+ desc = vq->ring_packed.desc_packed;
+
+ for (i = 0; i < num; i++) {
+ used_idx = vq->vq_used_cons_idx;
+ if (!desc_is_used(&desc[used_idx], vq))
+ return i;
+ len[i] = desc[used_idx].len;
+ id = desc[used_idx].id;
+ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
+ if (unlikely(cookie == NULL)) {
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+ rte_prefetch0(cookie);
+ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
+ rx_pkts[i] = cookie;
+
+ vq->vq_free_cnt++;
+ vq->vq_used_cons_idx++;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ return i;
+}
+
static uint16_t
virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
uint32_t *len, uint16_t num)
@@ -350,6 +392,51 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
return 0;
}
+static inline int
+virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
+ struct rte_mbuf **cookie, uint16_t num)
+{
+ struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
+ uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ struct virtio_hw *hw = vq->hw;
+ struct vq_desc_extra *dxp;
+ uint16_t idx;
+ int i;
+
+ if (unlikely(vq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(vq->vq_free_cnt < num))
+ return -EMSGSIZE;
+
+ for (i = 0; i < num; i++) {
+ idx = vq->vq_avail_idx;
+ dxp = &vq->vq_descx[idx];
+ dxp->cookie = (void *)cookie[i];
+ dxp->ndescs = 1;
+
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
+ + hw->vtnet_hdr_size;
+
+ vq->vq_desc_head_idx = dxp->next;
+ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+ rte_smp_wmb();
+ start_dp[idx].flags = flags;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ vq->avail_used_flags =
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ flags = VRING_DESC_F_WRITE | vq->avail_used_flags;
+ }
+ }
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+ return 0;
+}
+
/* When doing TSO, the IP length is not included in the pseudo header
* checksum of the packet given to the PMD, but for virtio it is
* expected.
@@ -801,7 +888,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
break;
/* Enqueue allocated buffers */
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ &m, 1);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (error) {
rte_pktmbuf_free(m);
break;
@@ -809,7 +900,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
nbufs++;
}
- vq_update_avail_idx(vq);
+ if (!vtpci_packed_queue(vq->hw))
+ vq_update_avail_idx(vq);
}
PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
@@ -896,7 +988,10 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
* Requeue the discarded mbuf. This should always be
* successful since it was just dequeued.
*/
- error = virtqueue_enqueue_recv_refill(vq, m);
+ if (vtpci_packed_queue(vq->hw))
+ error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
if (unlikely(error)) {
RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
@@ -1135,6 +1230,104 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return nb_rx;
}
+uint16_t
+virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm, *new_mbuf;
+ uint16_t num, nb_rx;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ int error;
+ uint32_t i, nb_enqueued;
+ uint32_t hdr_size;
+ struct virtio_net_hdr *hdr;
+
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+ num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+
+ nb_enqueued = 0;
+ hdr_size = hw->vtnet_hdr_size;
+
+ for (i = 0; i < num; i++) {
+ rxm = rcv_pkts[i];
+
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ rxm->port = rxvq->port_id;
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ virtio_rx_stats_updated(rxvq, rxm);
+
+ rx_pkts[nb_rx++] = rxm;
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ while (likely(!virtqueue_full(vq))) {
+ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (unlikely(new_mbuf == NULL)) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(new_mbuf);
+ break;
+ }
+ nb_enqueued++;
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
+
uint16_t
virtio_recv_mergeable_pkts_inorder(void *rx_queue,
struct rte_mbuf **rx_pkts,
@@ -1493,6 +1686,182 @@ virtio_recv_mergeable_pkts(void *rx_queue,
return nb_rx;
}
+uint16_t
+virtio_recv_mergeable_pkts_packed(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
+ struct rte_mbuf *rxm;
+ struct rte_mbuf *prev = NULL;
+ uint16_t num, nb_rx = 0;
+ uint32_t len[VIRTIO_MBUF_BURST_SZ];
+ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
+ uint32_t nb_enqueued = 0;
+ uint32_t seg_num = 0;
+ uint32_t seg_res = 0;
+ uint32_t hdr_size = hw->vtnet_hdr_size;
+ int32_t i;
+ int error;
+
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
+
+ num = nb_pkts;
+ if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
+ num = VIRTIO_MBUF_BURST_SZ;
+ if (likely(num > DESC_PER_CACHELINE))
+ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
+
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
+
+ for (i = 0; i < num; i++) {
+ struct virtio_net_hdr_mrg_rxbuf *header;
+
+ PMD_RX_LOG(DEBUG, "dequeue:%d", num);
+ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
+
+ rxm = rcv_pkts[i];
+
+ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+ PMD_RX_LOG(ERR, "Packet drop");
+ nb_enqueued++;
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
+ rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
+ seg_num = header->num_buffers;
+
+ if (seg_num == 0)
+ seg_num = 1;
+
+ rxm->data_off = RTE_PKTMBUF_HEADROOM;
+ rxm->nb_segs = seg_num;
+ rxm->ol_flags = 0;
+ rxm->vlan_tci = 0;
+ rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
+ rxm->data_len = (uint16_t)(len[i] - hdr_size);
+
+ rxm->port = rxvq->port_id;
+ rx_pkts[nb_rx] = rxm;
+ prev = rxm;
+
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
+ seg_res = seg_num - 1;
+
+ /* Merge remaining segments */
+ while (seg_res != 0 && i < (num - 1)) {
+ i++;
+
+ rxm = rcv_pkts[i];
+ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[i]);
+ rxm->data_len = (uint16_t)(len[i]);
+
+ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
+ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
+
+ if (prev)
+ prev->next = rxm;
+
+ prev = rxm;
+ seg_res -= 1;
+ }
+
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ }
+
+ /* Last packet still need merge segments */
+ while (seg_res != 0) {
+ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
+ VIRTIO_MBUF_BURST_SZ);
+ if (likely(vq->vq_free_cnt >= rcv_cnt)) {
+ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
+ len, rcv_cnt);
+ uint16_t extra_idx = 0;
+
+ rcv_cnt = num;
+
+ while (extra_idx < rcv_cnt) {
+ rxm = rcv_pkts[extra_idx];
+
+ rxm->data_off =
+ RTE_PKTMBUF_HEADROOM - hdr_size;
+ rxm->pkt_len = (uint32_t)(len[extra_idx]);
+ rxm->data_len = (uint16_t)(len[extra_idx]);
+
+ prev->next = rxm;
+ prev = rxm;
+ rx_pkts[nb_rx]->pkt_len += len[extra_idx];
+ rx_pkts[nb_rx]->data_len += len[extra_idx];
+ extra_idx += 1;
+ }
+ seg_res -= rcv_cnt;
+ if (!seg_res) {
+ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
+ nb_rx++;
+ }
+ } else {
+ PMD_RX_LOG(ERR,
+ "No enough segments for packet.");
+ if (prev)
+ virtio_discard_rxbuf(vq, prev);
+ rxvq->stats.errors++;
+ break;
+ }
+ }
+
+ rxvq->stats.packets += nb_rx;
+
+ /* Allocate new mbuf for the used descriptor */
+ if (likely(!virtqueue_full(vq))) {
+ /* free_cnt may include mrg descs */
+ uint16_t free_cnt = vq->vq_free_cnt;
+ struct rte_mbuf *new_pkts[free_cnt];
+
+ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
+ error = virtqueue_enqueue_recv_refill_packed(vq,
+ new_pkts, free_cnt);
+ if (unlikely(error)) {
+ for (i = 0; i < free_cnt; i++)
+ rte_pktmbuf_free(new_pkts[i]);
+ }
+ nb_enqueued += free_cnt;
+ } else {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[rxvq->port_id];
+ dev->data->rx_mbuf_alloc_failed += free_cnt;
+ }
+ }
+
+ if (likely(nb_enqueued)) {
+ if (unlikely(virtqueue_kick_prepare_packed(vq))) {
+ virtqueue_notify(vq);
+ PMD_RX_LOG(DEBUG, "Notified");
+ }
+ }
+
+ return nb_rx;
+}
+
uint16_t
virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 56a77cc71..5b03f7a27 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -54,9 +54,36 @@ virtqueue_detach_unused(struct virtqueue *vq)
return NULL;
}
+/* Flush used descs */
+static void
+virtqueue_rxvq_flush_packed(struct virtqueue *vq)
+{
+ struct vq_desc_extra *dxp;
+ uint16_t i;
+
+ struct vring_packed_desc *descs = vq->ring_packed.desc_packed;
+ int cnt = 0;
+
+ i = vq->vq_used_cons_idx;
+ while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
+ dxp = &vq->vq_descx[descs[i].id];
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ vq->vq_free_cnt++;
+ vq->vq_used_cons_idx++;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ i = vq->vq_used_cons_idx;
+ }
+}
+
/* Flush the elements in the used ring. */
-void
-virtqueue_rxvq_flush(struct virtqueue *vq)
+static void
+virtqueue_rxvq_flush_split(struct virtqueue *vq)
{
struct virtnet_rx *rxq = &vq->rxq;
struct virtio_hw *hw = vq->hw;
@@ -102,3 +129,15 @@ virtqueue_rxvq_flush(struct virtqueue *vq)
}
}
}
+
+/* Flush the elements in the used ring. */
+void
+virtqueue_rxvq_flush(struct virtqueue *vq)
+{
+ struct virtio_hw *hw = vq->hw;
+
+ if (vtpci_packed_queue(hw))
+ virtqueue_rxvq_flush_packed(vq);
+ else
+ virtqueue_rxvq_flush_split(vq);
+}
--
2.21.0

View File

@ -1,142 +0,0 @@
From d8d854a2f1814e10cf51ce88bf00b020167c772e Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:36 +0100
Subject: [PATCH 07/18] net/virtio: support packed queue in send command
[ upstream commit ec194c2f189525b2fb4be5604422a28ea5f08acd ]
Use packed virtqueue format when reading and writing descriptors
to/from the ring.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit ec194c2f189525b2fb4be5604422a28ea5f08acd)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 96 ++++++++++++++++++++++++++++++
1 file changed, 96 insertions(+)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 4ef1da393..53773445b 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -141,6 +141,96 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+static struct virtio_pmd_ctrl *
+virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtqueue *vq = cvq->vq;
+ int head;
+ struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+ struct virtio_pmd_ctrl *result;
+ int wrap_counter;
+ uint16_t flags;
+ int sum = 0;
+ int k;
+
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ head = vq->vq_avail_idx;
+ wrap_counter = vq->avail_wrap_counter;
+ desc[head].flags = VRING_DESC_F_NEXT;
+ desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ for (k = 0; k < pkt_num; k++) {
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ desc[vq->vq_avail_idx].len = dlen[k];
+ flags = VRING_DESC_F_NEXT;
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ desc[vq->vq_avail_idx].flags = flags;
+ rte_smp_wmb();
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+ }
+
+
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+ flags = VRING_DESC_F_WRITE;
+ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
+ desc[vq->vq_avail_idx].flags = flags;
+ flags = VRING_DESC_F_NEXT;
+ flags |= VRING_DESC_F_AVAIL(wrap_counter) |
+ VRING_DESC_F_USED(!wrap_counter);
+ desc[head].flags = flags;
+ rte_smp_wmb();
+
+ vq->vq_free_cnt--;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->avail_wrap_counter ^= 1;
+ }
+
+ virtqueue_notify(vq);
+
+ /* wait for used descriptors in virtqueue */
+ do {
+ rte_rmb();
+ usleep(100);
+ } while (!desc_is_used(&desc[head], vq));
+
+ /* now get used descriptors */
+ while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
+ vq->vq_free_cnt++;
+ if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
+ }
+ }
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
static int
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
@@ -174,6 +264,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
+ if (vtpci_packed_queue(vq->hw)) {
+ result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+ goto out_unlock;
+ }
+
/*
* Format is enforced in qemu code:
* One TX packet for header;
@@ -245,6 +340,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
result = cvq->virtio_net_hdr_mz->addr;
+out_unlock:
rte_spinlock_unlock(&cvq->lock);
return result->status;
}
--
2.21.0

View File

@ -1,139 +0,0 @@
From 0cdcdd50e4cbb88737abfee1e545019500f11e38 Mon Sep 17 00:00:00 2001
From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Date: Mon, 17 Dec 2018 22:31:37 +0100
Subject: [PATCH] net/virtio-user: add option to use packed queues
[ upstream commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6 ]
From: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Add option to enable packed queue support for virtio-user
devices.
Signed-off-by: Yuanhan Liu <yuanhan.liu@linux.intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 20 ++++++++++++++-----
.../net/virtio/virtio_user/virtio_user_dev.h | 2 +-
drivers/net/virtio/virtio_user_ethdev.c | 14 ++++++++++++-
3 files changed, 29 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index f0051f887..7d0acaeb7 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-1-Clause
* Copyright(c) 2010-2016 Intel Corporation
*/
@@ -58,6 +58,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
state.index = queue_sel;
state.num = 0; /* no reservation */
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED))
+ state.num |= (1 << 15);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
@@ -407,12 +409,13 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
- 1ULL << VIRTIO_F_VERSION_1)
+ 1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_RING_PACKED)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int server, int mrg_rxbuf, int in_order)
+ int server, int mrg_rxbuf, int in_order, int packed_vq)
{
pthread_mutex_init(&dev->mutex, NULL);
snprintf(dev->path, PATH_MAX, "%s", path);
@@ -465,10 +468,17 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (dev->mac_specified)
- dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
+ if (packed_vq)
+ dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
else
+ dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+
+ if (dev->mac_specified) {
+ dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
+ }
if (cq) {
/* device does not really need to know anything about CQ,
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 3e3a7b787..67a9c01ac 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -50,7 +50,7 @@ int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac, char **ifname,
- int server, int mrg_rxbuf, int in_order);
+ int server, int mrg_rxbuf, int in_order, int packed_vq);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 5781c0948..daad8f452 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -361,6 +361,8 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_MRG_RXBUF,
#define VIRTIO_USER_ARG_IN_ORDER "in_order"
VIRTIO_USER_ARG_IN_ORDER,
+#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
+ VIRTIO_USER_ARG_PACKED_VQ,
NULL
};
@@ -468,6 +470,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
+ uint64_t packed_vq = 0;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
const char *name = rte_vdev_device_name(dev);
@@ -571,6 +574,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
cq = 1;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) {
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ,
+ &get_integer_arg, &packed_vq) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_PACKED_VQ);
+ goto end;
+ }
+ }
+
if (queues > 1 && cq == 0) {
PMD_INIT_LOG(ERR, "multi-q requires ctrl-q");
goto end;
@@ -610,7 +622,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
hw = eth_dev->data->dev_private;
if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
queue_size, mac_addr, &ifname, server_mode,
- mrg_rxbuf, in_order) < 0) {
+ mrg_rxbuf, in_order, packed_vq) < 0) {
PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
goto end;
--
2.21.0

View File

@ -1,44 +0,0 @@
From f5302062cbc98b3b8b1002cc48e7125a48ead96c Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:38 +0100
Subject: [PATCH 09/18] net/virtio-user: fail if cq used with packed vq
[ upstream commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9 ]
Until we have support for control virtqueues let's disable it and
fail device initalization if specified as a parameter.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 77cec1d3c..2f75091d5 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -467,10 +467,16 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (packed_vq)
+ if (packed_vq) {
+ if (cq) {
+ PMD_INIT_LOG(ERR, "control vq not supported yet with "
+ "packed virtqueues\n");
+ return -1;
+ }
dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
- else
+ } else {
dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+ }
if (dev->mac_specified) {
dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
--
2.21.0

View File

@ -1,45 +0,0 @@
From d1b8c268219498c865511b375b0c0c89244046f9 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 17 Dec 2018 22:31:39 +0100
Subject: [PATCH 10/18] net/virtio: enable packed virtqueues by default
[ upstream commit aea29aa5d37b40080cfc1f9a1acba239bf03922f ]
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit aea29aa5d37b40080cfc1f9a1acba239bf03922f)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.h | 1 +
drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 ++-
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 88b8c42a3..364ecbb50 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -34,6 +34,7 @@
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IN_ORDER | \
+ 1ULL << VIRTIO_F_RING_PACKED | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 2f75091d5..5999b7d9d 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -410,7 +410,8 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1 | \
- 1ULL << VIRTIO_F_RING_PACKED)
+ 1ULL << VIRTIO_F_RING_PACKED | \
+ 1ULL << VIRTIO_RING_F_EVENT_IDX)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
--
2.21.0

View File

@ -1,33 +0,0 @@
From 440731f30a1257c3318badfcf17f5ab9e5085317 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Thu, 20 Dec 2018 11:56:24 +0100
Subject: [PATCH 11/18] net/virtio: avoid double accounting of bytes
[ upstream commit 517ad3e018e31ab2596d1ece5369894703c850c2 ]
Accounting of bytes was moved to a common function, so at the moment we do
it twice. This patches fixes it for sending packets with packed virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 517ad3e018e31ab2596d1ece5369894703c850c2)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_rxtx.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 0bcf3b08a..50eb4c694 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -1931,7 +1931,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Enqueue Packet buffers */
virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push);
- txvq->stats.bytes += txm->pkt_len;
virtio_update_packet_stats(&txvq->stats, txm);
}
--
2.21.0

View File

@ -1,85 +0,0 @@
From ec53a1992df973607cbb10db6a0816ed2ef498dd Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Thu, 3 Jan 2019 10:40:06 +0800
Subject: [PATCH] net/virtio-user: fix packed vq option parsing
[ upstream commit 9070f88b81dab42739fb169265e3ea727e47dfa2 ]
Add the RING_PACKED feature to dev->unsupported_features
when it's disabled, and add the missing packed vq param
string. And also revert the unexpected change to MAC option
introduced when adding packed vq option.
Fixes: 34f3966c7f81 ("net/virtio-user: add option to use packed queues")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 9070f88b81dab42739fb169265e3ea727e47dfa2)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 11 ++++-------
drivers/net/virtio/virtio_user_ethdev.c | 7 ++++---
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 811b95c45..426682c93 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -475,17 +475,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
"packed virtqueues\n");
return -1;
}
- dev->device_features |= (1ull << VIRTIO_F_RING_PACKED);
} else {
- dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED);
+ dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
}
- if (dev->mac_specified) {
- dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
- } else {
- dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC);
+ if (dev->mac_specified)
+ dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
+ else
dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC);
- }
if (cq) {
/* device does not really need to know anything about CQ,
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index daad8f452..a2911febf 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -361,7 +361,7 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_MRG_RXBUF,
#define VIRTIO_USER_ARG_IN_ORDER "in_order"
VIRTIO_USER_ARG_IN_ORDER,
-#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
+#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq"
VIRTIO_USER_ARG_PACKED_VQ,
NULL
};
@@ -466,11 +466,11 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE;
uint64_t mrg_rxbuf = 1;
uint64_t in_order = 1;
+ uint64_t packed_vq = 0;
char *path = NULL;
char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
- uint64_t packed_vq = 0;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
const char *name = rte_vdev_device_name(dev);
@@ -698,4 +698,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"iface=<string> "
"server=<0|1> "
"mrg_rxbuf=<0|1> "
- "in_order=<0|1>");
+ "in_order=<0|1> "
+ "packed_vq=<0|1>");
--
2.21.0

View File

@ -1,36 +0,0 @@
From b6da125960fb1fb017427af5910b43ac81586850 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Thu, 3 Jan 2019 10:40:07 +0800
Subject: [PATCH 13/18] net/virtio-user: fix supported features list
[ upstream commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1 ]
Currently virtio-user doesn't support event idx.
Fixes: aea29aa5d37b ("net/virtio: enable packed virtqueues by default")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index c4e026096..77341f895 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -410,8 +410,7 @@ virtio_user_dev_setup(struct virtio_user_dev *dev)
1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
1ULL << VIRTIO_F_IN_ORDER | \
1ULL << VIRTIO_F_VERSION_1 | \
- 1ULL << VIRTIO_F_RING_PACKED | \
- 1ULL << VIRTIO_RING_F_EVENT_IDX)
+ 1ULL << VIRTIO_F_RING_PACKED)
int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
--
2.21.0

View File

@ -1,98 +0,0 @@
From 82b43dd199d5492527b73002d4c3b009a98ca7a0 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Fri, 11 Jan 2019 10:39:28 +0100
Subject: [PATCH 14/18] net/virtio: check head desc with correct wrap counter
[ upstream commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a ]
In virtio_pq_send_command() we check for a used descriptor
and wait in an idle loop until it becomes used. We can't use
vq->used_wrap_counter here to check for the first descriptor
we made available because the ring could have wrapped. Let's use
the used_wrap_counter that matches the state of the head descriptor.
Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 11 ++++++-----
drivers/net/virtio/virtqueue.h | 10 ++++++++--
2 files changed, 14 insertions(+), 7 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 53773445b..7bd38a292 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -149,7 +149,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
- int wrap_counter;
+ bool avail_wrap_counter, used_wrap_counter;
uint16_t flags;
int sum = 0;
int k;
@@ -161,7 +161,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
* One RX packet for ACK.
*/
head = vq->vq_avail_idx;
- wrap_counter = vq->avail_wrap_counter;
+ avail_wrap_counter = vq->avail_wrap_counter;
+ used_wrap_counter = vq->used_wrap_counter;
desc[head].flags = VRING_DESC_F_NEXT;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
@@ -199,8 +200,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
VRING_DESC_F_USED(!vq->avail_wrap_counter);
desc[vq->vq_avail_idx].flags = flags;
flags = VRING_DESC_F_NEXT;
- flags |= VRING_DESC_F_AVAIL(wrap_counter) |
- VRING_DESC_F_USED(!wrap_counter);
+ flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
+ VRING_DESC_F_USED(!avail_wrap_counter);
desc[head].flags = flags;
rte_smp_wmb();
@@ -216,7 +217,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
do {
rte_rmb();
usleep(100);
- } while (!desc_is_used(&desc[head], vq));
+ } while (!__desc_is_used(&desc[head], used_wrap_counter));
/* now get used descriptors */
while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index b142fd488..75f5782bc 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -256,7 +256,7 @@ struct virtio_tx_region {
};
static inline int
-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
{
uint16_t used, avail, flags;
@@ -264,7 +264,13 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == vq->used_wrap_counter;
+ return avail == used && used == wrap_counter;
+}
+
+static inline int
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
+{
+ return __desc_is_used(desc, vq->used_wrap_counter);
}
--
2.21.0

View File

@ -1,277 +0,0 @@
From 74bbcd238093edc81b1a1f0b9b6e0d3c3fe32584 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Fri, 11 Jan 2019 10:39:29 +0100
Subject: [PATCH] net/virtio-user: support control VQ for packed
[ upstream commit 48a4464029a7f76dfb2c1f09146a391917b075e5 ]
Add support to virtio-user for control virtqueues.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 48a4464029a7f76dfb2c1f09146a391917b075e5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
.../net/virtio/virtio_user/virtio_user_dev.c | 102 ++++++++++++++++--
.../net/virtio/virtio_user/virtio_user_dev.h | 15 ++-
drivers/net/virtio/virtio_user_ethdev.c | 56 +++++++++-
3 files changed, 157 insertions(+), 16 deletions(-)
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 2caaaad5f..83d3fb531 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -43,15 +43,26 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
+ struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
struct vhost_vring_addr addr = {
.index = queue_sel,
- .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
- .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
- .used_user_addr = (uint64_t)(uintptr_t)vring->used,
.log_guest_addr = 0,
.flags = 0, /* disable log */
};
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+ addr.desc_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->desc_packed;
+ addr.avail_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->driver_event;
+ addr.used_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->device_event;
+ } else {
+ addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
+ addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
+ addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ }
+
state.index = queue_sel;
state.num = vring->num;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
@@ -468,15 +479,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (packed_vq) {
- if (cq) {
- PMD_INIT_LOG(ERR, "control vq not supported yet with "
- "packed virtqueues\n");
- return -1;
- }
- } else {
+ if (!packed_vq)
dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
- }
if (dev->mac_specified)
dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
@@ -621,6 +625,82 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring,
return n_descs;
}
+static inline int
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+ return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL(1)) &&
+ wrap_counter != !!(desc->flags & VRING_DESC_F_USED(1));
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
+ struct vring_packed *vring,
+ uint16_t idx_hdr)
+{
+ struct virtio_net_ctrl_hdr *hdr;
+ virtio_net_ctrl_ack status = ~0;
+ uint16_t idx_data, idx_status;
+ /* initialize to one, header is first */
+ uint32_t n_descs = 1;
+
+ /* locate desc for header, data, and status */
+ idx_data = idx_hdr + 1;
+ if (idx_data >= dev->queue_size)
+ idx_data -= dev->queue_size;
+
+ n_descs++;
+
+ idx_status = idx_data;
+ while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) {
+ idx_status++;
+ if (idx_status >= dev->queue_size)
+ idx_status -= dev->queue_size;
+ n_descs++;
+ }
+
+ hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr;
+ if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t queues;
+
+ queues = *(uint16_t *)(uintptr_t)
+ vring->desc_packed[idx_data].addr;
+ status = virtio_user_handle_mq(dev, queues);
+ }
+
+ /* Update status */
+ *(virtio_net_ctrl_ack *)(uintptr_t)
+ vring->desc_packed[idx_status].addr = status;
+
+ return n_descs;
+}
+
+void
+virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+ struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
+ struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+ uint16_t id, n_descs;
+
+ while (desc_is_avail(&vring->desc_packed[vq->used_idx],
+ vq->used_wrap_counter)) {
+ id = vring->desc_packed[vq->used_idx].id;
+
+ n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
+
+ do {
+ vring->desc_packed[vq->used_idx].flags =
+ VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
+ VRING_DESC_F_USED(vq->used_wrap_counter);
+ if (++vq->used_idx >= dev->queue_size) {
+ vq->used_idx -= dev->queue_size;
+ vq->used_wrap_counter ^= 1;
+ }
+ n_descs--;
+ } while (n_descs);
+ }
+}
+
void
virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
{
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 67a9c01ac..c6c2f7d6e 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -11,6 +11,12 @@
#include "../virtio_ring.h"
#include "vhost.h"
+struct virtio_user_queue {
+ uint16_t used_idx;
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+};
+
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
@@ -39,7 +45,12 @@ struct virtio_user_dev {
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ union {
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
+ };
+ struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
+
struct virtio_user_backend_ops *ops;
pthread_mutex_t mutex;
bool started;
@@ -53,5 +64,7 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int server, int mrg_rxbuf, int in_order, int packed_vq);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+void virtio_user_handle_cq_packed(struct virtio_user_dev *dev,
+ uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index a2911febf..dddb7dd23 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -271,10 +271,44 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
return dev->queue_size;
}
-static int
-virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+static void
+virtio_user_setup_queue_packed(struct virtqueue *vq,
+ struct virtio_user_dev *dev)
+
+{
+ uint16_t queue_idx = vq->vq_queue_index;
+ struct vring_packed *vring;
+ uint64_t desc_addr;
+ uint64_t avail_addr;
+ uint64_t used_addr;
+ uint16_t i;
+
+ vring = &dev->packed_vrings[queue_idx];
+ desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+ avail_addr = desc_addr + vq->vq_nentries *
+ sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr +
+ sizeof(struct vring_packed_desc_event),
+ VIRTIO_PCI_VRING_ALIGN);
+ vring->num = vq->vq_nentries;
+ vring->desc_packed =
+ (void *)(uintptr_t)desc_addr;
+ vring->driver_event =
+ (void *)(uintptr_t)avail_addr;
+ vring->device_event =
+ (void *)(uintptr_t)used_addr;
+ dev->packed_queues[queue_idx].avail_wrap_counter = true;
+ dev->packed_queues[queue_idx].used_wrap_counter = true;
+
+ for (i = 0; i < vring->num; i++) {
+ vring->desc_packed[i].flags = VRING_DESC_F_USED(1) |
+ VRING_DESC_F_AVAIL(1);
+ }
+}
+
+static void
+virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
{
- struct virtio_user_dev *dev = virtio_user_get_dev(hw);
uint16_t queue_idx = vq->vq_queue_index;
uint64_t desc_addr, avail_addr, used_addr;
@@ -288,6 +322,17 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (vtpci_packed_queue(hw))
+ virtio_user_setup_queue_packed(vq, dev);
+ else
+ virtio_user_setup_queue_split(vq, dev);
return 0;
}
@@ -317,7 +362,10 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (hw->cvq && (hw->cvq->vq == vq)) {
- virtio_user_handle_cq(dev, vq->vq_queue_index);
+ if (vtpci_packed_queue(vq->hw))
+ virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
+ else
+ virtio_user_handle_cq(dev, vq->vq_queue_index);
return;
}
--
2.21.0

View File

@ -1,197 +0,0 @@
From c276398e43bec444eb207c3184f667b3d97361f8 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Wed, 23 Jan 2019 01:01:40 +0800
Subject: [PATCH 16/18] net/virtio: fix control VQ
[ upstream commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5 ]
This patch mainly fixed below issues in the packed ring based
control vq support in virtio driver:
1. When parsing the used descriptors, we have to track the
number of descs that we need to skip;
2. vq->vq_free_cnt was decreased twice for a same desc;
Meanwhile, make the function name consistent with other parts.
Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command")
Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
[changed parameters to virtio_rmb/_wmb()]
(cherry picked from commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 62 ++++++++++++++----------------
drivers/net/virtio/virtqueue.h | 12 +-----
2 files changed, 31 insertions(+), 43 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 7bd38a292..c12fb157e 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
static struct virtio_pmd_ctrl *
-virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
{
struct virtqueue *vq = cvq->vq;
int head;
struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
struct virtio_pmd_ctrl *result;
- bool avail_wrap_counter, used_wrap_counter;
- uint16_t flags;
+ bool avail_wrap_counter;
int sum = 0;
+ int nb_descs = 0;
int k;
/*
@@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
*/
head = vq->vq_avail_idx;
avail_wrap_counter = vq->avail_wrap_counter;
- used_wrap_counter = vq->used_wrap_counter;
- desc[head].flags = VRING_DESC_F_NEXT;
desc[head].addr = cvq->virtio_net_hdr_mem;
desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
@@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t) * sum;
desc[vq->vq_avail_idx].len = dlen[k];
- flags = VRING_DESC_F_NEXT;
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
sum += dlen[k];
vq->vq_free_cnt--;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- rte_smp_wmb();
- vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
}
-
desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
- flags = VRING_DESC_F_WRITE;
- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
- VRING_DESC_F_USED(!vq->avail_wrap_counter);
- desc[vq->vq_avail_idx].flags = flags;
- flags = VRING_DESC_F_NEXT;
- flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) |
- VRING_DESC_F_USED(!avail_wrap_counter);
- desc[head].flags = flags;
- rte_smp_wmb();
-
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->avail_wrap_counter);
vq->vq_free_cnt--;
+ nb_descs++;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
vq->avail_wrap_counter ^= 1;
}
+ virtio_wmb();
+ desc[head].flags = VRING_DESC_F_NEXT |
+ VRING_DESC_F_AVAIL(avail_wrap_counter) |
+ VRING_DESC_F_USED(!avail_wrap_counter);
+
+ virtio_wmb();
virtqueue_notify(vq);
/* wait for used descriptors in virtqueue */
- do {
- rte_rmb();
+ while (!desc_is_used(&desc[head], vq))
usleep(100);
- } while (!__desc_is_used(&desc[head], used_wrap_counter));
+
+ virtio_rmb();
/* now get used descriptors */
- while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) {
- vq->vq_free_cnt++;
- if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
- vq->vq_used_cons_idx -= vq->vq_nentries;
- vq->used_wrap_counter ^= 1;
- }
+ vq->vq_free_cnt += nb_descs;
+ vq->vq_used_cons_idx += nb_descs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->used_wrap_counter ^= 1;
}
result = cvq->virtio_net_hdr_mz->addr;
@@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
sizeof(struct virtio_pmd_ctrl));
if (vtpci_packed_queue(vq->hw)) {
- result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
goto out_unlock;
}
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 75f5782bc..9e74b7bd0 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -256,7 +256,7 @@ struct virtio_tx_region {
};
static inline int
-__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
+desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
{
uint16_t used, avail, flags;
@@ -264,16 +264,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool wrap_counter)
used = !!(flags & VRING_DESC_F_USED(1));
avail = !!(flags & VRING_DESC_F_AVAIL(1));
- return avail == used && used == wrap_counter;
-}
-
-static inline int
-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
-{
- return __desc_is_used(desc, vq->used_wrap_counter);
+ return avail == used && used == vq->used_wrap_counter;
}
-
static inline void
vring_desc_init_packed(struct virtqueue *vq, int n)
{
@@ -329,7 +322,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
{
uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
virtio_wmb();
vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
--
2.21.0

View File

@ -1,146 +0,0 @@
From e5ee642672921b9e83aaa558067b6b685a7af0a3 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Wed, 23 Jan 2019 01:01:41 +0800
Subject: [PATCH 17/18] net/virtio-user: fix control VQ
[ upstream commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa ]
This patch fixed below issues in the packed ring based control
vq support in virtio user:
1. The idx_hdr should be used_idx instead of the id in the desc;
2. We just need to write out a single used descriptor for each
descriptor list;
3. The avail/used bits should be initialized to 0;
Meanwhile, make the function name consistent with other parts.
Fixes: 48a4464029a7 ("net/virtio-user: support control VQ for packed")
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
(cherry picked from commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 11 ++++++
.../net/virtio/virtio_user/virtio_user_dev.c | 37 +++++++++++--------
drivers/net/virtio/virtio_user_ethdev.c | 7 +---
3 files changed, 34 insertions(+), 21 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index c12fb157e..a31129484 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -224,6 +224,17 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
vq->used_wrap_counter ^= 1;
}
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+ "vq->vq_avail_idx=%d\n"
+ "vq->vq_used_cons_idx=%d\n"
+ "vq->avail_wrap_counter=%d\n"
+ "vq->used_wrap_counter=%d\n",
+ vq->vq_free_cnt,
+ vq->vq_avail_idx,
+ vq->vq_used_cons_idx,
+ vq->avail_wrap_counter,
+ vq->used_wrap_counter);
+
result = cvq->virtio_net_hdr_mz->addr;
return result;
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index ea5149929..d1157378d 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -632,9 +632,9 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
}
static uint32_t
-virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
- struct vring_packed *vring,
- uint16_t idx_hdr)
+virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev,
+ struct vring_packed *vring,
+ uint16_t idx_hdr)
{
struct virtio_net_ctrl_hdr *hdr;
virtio_net_ctrl_ack status = ~0;
@@ -671,6 +671,10 @@ virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
*(virtio_net_ctrl_ack *)(uintptr_t)
vring->desc_packed[idx_status].addr = status;
+ /* Update used descriptor */
+ vring->desc_packed[idx_hdr].id = vring->desc_packed[idx_status].id;
+ vring->desc_packed[idx_hdr].len = sizeof(status);
+
return n_descs;
}
@@ -679,24 +683,25 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
{
struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
struct vring_packed *vring = &dev->packed_vrings[queue_idx];
- uint16_t id, n_descs;
+ uint16_t n_descs;
while (desc_is_avail(&vring->desc_packed[vq->used_idx],
vq->used_wrap_counter)) {
- id = vring->desc_packed[vq->used_idx].id;
- n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
+ n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring,
+ vq->used_idx);
- do {
- vring->desc_packed[vq->used_idx].flags =
- VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
- VRING_DESC_F_USED(vq->used_wrap_counter);
- if (++vq->used_idx >= dev->queue_size) {
- vq->used_idx -= dev->queue_size;
- vq->used_wrap_counter ^= 1;
- }
- n_descs--;
- } while (n_descs);
+ rte_smp_wmb();
+ vring->desc_packed[vq->used_idx].flags =
+ VRING_DESC_F_WRITE |
+ VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
+ VRING_DESC_F_USED(vq->used_wrap_counter);
+
+ vq->used_idx += n_descs;
+ if (vq->used_idx >= dev->queue_size) {
+ vq->used_idx -= dev->queue_size;
+ vq->used_wrap_counter ^= 1;
+ }
}
}
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index c01f45cab..6423e1f61 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -274,7 +274,6 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused)
static void
virtio_user_setup_queue_packed(struct virtqueue *vq,
struct virtio_user_dev *dev)
-
{
uint16_t queue_idx = vq->vq_queue_index;
struct vring_packed *vring;
@@ -300,10 +299,8 @@ virtio_user_setup_queue_packed(struct virtqueue *vq,
dev->packed_queues[queue_idx].avail_wrap_counter = true;
dev->packed_queues[queue_idx].used_wrap_counter = true;
- for (i = 0; i < vring->num; i++) {
- vring->desc_packed[i].flags = VRING_DESC_F_USED(1) |
- VRING_DESC_F_AVAIL(1);
- }
+ for (i = 0; i < vring->num; i++)
+ vring->desc_packed[i].flags = 0;
}
static void
--
2.21.0

View File

@ -1,97 +0,0 @@
From f3bf9a1a9b1ad3419b436855306ad8b5d8efab2f Mon Sep 17 00:00:00 2001
From: Maxime Coquelin <maxime.coquelin@redhat.com>
Date: Thu, 20 Dec 2018 17:47:55 +0100
Subject: [PATCH 18/18] vhost: batch used descs chains write-back with packed
ring
[ upstream commit b473ec1131ee44ee25e0536a04be65246b93f4f3 ]
Instead of writing back descriptors chains in order, let's
write the first chain flags last in order to improve batching.
Also, move the write barrier in logging cache sync, so that it
is done only when logging is enabled. It means there is now
one more barrier for split ring when logging is enabled.
With Kernel's pktgen benchmark, ~3% performance gain is measured.
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
(cherry picked from commit b473ec1131ee44ee25e0536a04be65246b93f4f3)
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
lib/librte_vhost/vhost.h | 7 ++-----
lib/librte_vhost/virtio_net.c | 19 ++++++++++++++++---
2 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h
index 552b9298d..adc2fb78e 100644
--- a/lib/librte_vhost/vhost.h
+++ b/lib/librte_vhost/vhost.h
@@ -456,12 +456,9 @@ vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
!dev->log_base))
return;
- log_base = (unsigned long *)(uintptr_t)dev->log_base;
+ rte_smp_wmb();
- /*
- * It is expected a write memory barrier has been issued
- * before this function is called.
- */
+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
for (i = 0; i < vq->log_cache_nb_elem; i++) {
struct log_cache_entry *elem = vq->log_cache + i;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index 15d682c3c..ec70ef947 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -136,6 +136,8 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
{
int i;
uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
/* Split loop in two to save memory barriers */
for (i = 0; i < vq->shadow_used_idx; i++) {
@@ -165,12 +167,17 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
flags &= ~VRING_DESC_F_AVAIL;
}
- vq->desc_packed[vq->last_used_idx].flags = flags;
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
- vhost_log_cache_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
vq->last_used_idx *
sizeof(struct vring_packed_desc),
sizeof(struct vring_packed_desc));
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
vq->last_used_idx += vq->shadow_used_packed[i].count;
if (vq->last_used_idx >= vq->size) {
@@ -179,7 +186,13 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
}
}
- rte_smp_wmb();
+ vq->desc_packed[head_idx].flags = head_flags;
+
+ vhost_log_cache_used_vring(dev, vq,
+ head_idx *
+ sizeof(struct vring_packed_desc),
+ sizeof(struct vring_packed_desc));
+
vq->shadow_used_idx = 0;
vhost_log_cache_sync(dev, vq);
}
--
2.21.0

View File

@ -1,42 +0,0 @@
From daa23dec25e8e418cd4e921531c82b5aae39b362 Mon Sep 17 00:00:00 2001
From: Tiwei Bie <tiwei.bie@intel.com>
Date: Tue, 19 Mar 2019 14:43:04 +0800
Subject: [PATCH] net/virtio: fix interrupt helper for packed ring
When disabling interrupt, the shadow event flags should also be
updated accordingly. The unnecessary wmb is also dropped.
Fixes: e9f4feb7e622 ("net/virtio: add packed virtqueue helpers")
Cc: stable@dpdk.org
Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
---
drivers/net/virtio/virtqueue.h | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 9e74b7bd0..c9f1c0afa 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -296,12 +296,13 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n)
static inline void
virtqueue_disable_intr_packed(struct virtqueue *vq)
{
- uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
- *event_flags = RING_EVENT_FLAGS_DISABLE;
+ if (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+ vq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+ vq->ring_packed.driver_event->desc_event_flags =
+ vq->event_flags_shadow;
+ }
}
-
/**
* Tell the backend not to interrupt us.
*/
--
2.21.0

View File

@ -1,30 +0,0 @@
From f2e20b51ac6432390ea545e2b6247419dfcaab40 Mon Sep 17 00:00:00 2001
From: Jens Freimann <jfreimann@redhat.com>
Date: Mon, 16 Sep 2019 17:26:16 +0200
Subject: [PATCH] net/virtio: fix calculation of device_event ptr
Fix wrong pointer arithmetic. We only need to increment by 1 if we want
to advance it by the size of the driver event area.
Signed-off-by: Jens Freimann <jfreimann@redhat.com>
---
drivers/net/virtio/virtio_ring.h | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h
index 1760823c6..fdc62194e 100644
--- a/drivers/net/virtio/virtio_ring.h
+++ b/drivers/net/virtio/virtio_ring.h
@@ -165,8 +165,7 @@ vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align,
vr->driver_event = (struct vring_packed_desc_event *)(p +
vr->num * sizeof(struct vring_packed_desc));
vr->device_event = (struct vring_packed_desc_event *)
- RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event +
- sizeof(struct vring_packed_desc_event)), align);
+ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event + 1), align);
}
/*
--
2.21.0

View File

@ -1,540 +0,0 @@
# -*- cfg-sha: 9fc8b53ccd53cc8b64391f6252e1dba558ae660a73a72f10dcadff2ca5462243
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2015 Cavium, Inc
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Cavium, Inc
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="arm64"
# machine can define specific variables or action for a specific board
# RTE_MACHINE values are architecture we compile for. directories in mk/machine/
CONFIG_RTE_MACHINE="armv8a"
# The compiler we use.
# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/
CONFIG_RTE_TOOLCHAIN="gcc"
# Use intrinsics or assembly code for key routines
CONFIG_RTE_FORCE_INTRINSICS=y
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=128
# Memory model
CONFIG_RTE_USE_C11_MEM_MODEL=y
# Compile Environment Abstraction Layer
CONFIG_RTE_LIBRTE_EAL=y
CONFIG_RTE_MAX_LCORE=256
CONFIG_RTE_MAX_NUMA_NODES=8
CONFIG_RTE_MAX_HEAPS=32
CONFIG_RTE_MAX_MEMSEG_LISTS=64
# each memseg list will be limited to either RTE_MAX_MEMSEG_PER_LIST pages
# or RTE_MAX_MEM_MB_PER_LIST megabytes worth of memory, whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_LIST=8192
CONFIG_RTE_MAX_MEM_MB_PER_LIST=32768
# a "type" is a combination of page size and NUMA node. total number of memseg
# lists per type will be limited to either RTE_MAX_MEMSEG_PER_TYPE pages (split
# over multiple lists of RTE_MAX_MEMSEG_PER_LIST pages), or
# RTE_MAX_MEM_MB_PER_TYPE megabytes of memory (split over multiple lists of
# RTE_MAX_MEM_MB_PER_LIST), whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768
CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072
# global maximum usable amount of VA, in megabytes
CONFIG_RTE_MAX_MEM_MB=524288
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_ENABLE_ASSERT=n
CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_MAX_VFIO_GROUPS=64
CONFIG_RTE_MAX_VFIO_CONTAINERS=64
CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_USE_LIBBSD=n
# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing.
# AVX512 is marked as experimental for now, will enable it after enough
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y
# Compile architecture we compile for. PCI library
CONFIG_RTE_LIBRTE_PCI=y
# Compile architecture we compile for. argument parser library
CONFIG_RTE_LIBRTE_KVARGS=y
# Compile generic ethernet library
CONFIG_RTE_LIBRTE_ETHER=y
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
CONFIG_RTE_MAX_ETHPORTS=32
CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
CONFIG_RTE_LIBRTE_IEEE1588=n
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
CONFIG_RTE_ETHDEV_PROFILE_WITH_VTUNE=n
# Turn off Tx preparation stage
# Warning: rte_eth_tx_prepare() can be safely disabled only if using a
# driver which do not implement any Tx preparation.
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
# Common libraries, before Bus/PMDs
CONFIG_RTE_LIBRTE_COMMON_DPAAX=n
# Compile architecture we compile for. Intel FPGA bus
CONFIG_RTE_LIBRTE_IFPGA_BUS=n
# Compile PCI bus driver
CONFIG_RTE_LIBRTE_PCI_BUS=y
# Compile architecture we compile for. vdev bus
CONFIG_RTE_LIBRTE_VDEV_BUS=y
# Compile ARK PMD
CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_ARK_PAD_TX=y
CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
# Compile Aquantia Atlantic PMD driver
CONFIG_RTE_LIBRTE_ATLANTIC_PMD=n
# Compile AMD PMD
CONFIG_RTE_LIBRTE_AXGBE_PMD=n
CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG=n
# Compile burst-oriented Broadcom PMD driver
CONFIG_RTE_LIBRTE_BNX2X_PMD=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n
CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
# Compile burst-oriented Broadcom BNXT PMD driver
CONFIG_RTE_LIBRTE_BNXT_PMD=n
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA_PMD=n
CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
# Compile NXP DPAA2 FSL-MC Bus
CONFIG_RTE_LIBRTE_FSLMC_BUS=n
# Compile Support Libraries for NXP DPAA2
CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
# Compile burst-oriented NXP DPAA2 PMD driver
CONFIG_RTE_LIBRTE_DPAA2_PMD=n
CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
# Compile NXP ENETC PMD Driver
CONFIG_RTE_LIBRTE_ENETC_PMD=n
# Compile burst-oriented Amazon ENA PMD driver
CONFIG_RTE_LIBRTE_ENA_PMD=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n
# Compile burst-oriented Cisco ENIC PMD driver
CONFIG_RTE_LIBRTE_ENIC_PMD=n
# Compile burst-oriented IGB & EM PMD drivers
CONFIG_RTE_LIBRTE_EM_PMD=n
CONFIG_RTE_LIBRTE_IGB_PMD=y
CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=y
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_IXGBE_INC_VECTOR=y
CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n
# Compile burst-oriented I40E PMD driver
CONFIG_RTE_LIBRTE_I40E_PMD=y
CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
# Compile burst-oriented FM10K PMD
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_RX=n
# QLogic 10G/25G/40G/50G/100G PMD
CONFIG_RTE_LIBRTE_QEDE_PMD=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
#Provides abs path/name of architecture we compile for. firmware file.
#Empty string denotes driver will use default firmware
CONFIG_RTE_LIBRTE_QEDE_FW=""
# Compile burst-oriented Solarflare libefx-based PMD
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
# Compile burst-oriented Cavium LiquidIO PMD driver
CONFIG_RTE_LIBRTE_LIO_PMD=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n
# Compile burst-oriented VIRTIO PMD driver
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n
# Compile virtio device emulation inside virtio PMD driver
CONFIG_RTE_VIRTIO_USER=n
# Compile burst-oriented VMXNET3 PMD driver
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n
# Compile fail-safe PMD
CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y
# Compile Marvell PMD driver
CONFIG_RTE_LIBRTE_MVPP2_PMD=n
# Compile Marvell MVNETA PMD driver
CONFIG_RTE_LIBRTE_MVNETA_PMD=n
# Compile support for VMBus library
CONFIG_RTE_LIBRTE_VMBUS=n
# Compile native PMD for Hyper-V/Azure
CONFIG_RTE_LIBRTE_NETVSC_PMD=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_RX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_DUMP=n
# Compile virtual device driver for NetVSC on Hyper-V/Azure
CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD=n
# Compile null PMD
CONFIG_RTE_LIBRTE_PMD_NULL=n
# Compile software PMD backed by PCAP files
CONFIG_RTE_LIBRTE_PMD_PCAP=n
# Compile example software rings based PMD
CONFIG_RTE_LIBRTE_PMD_RING=y
CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16
CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16
# Compile SOFTNIC PMD
CONFIG_RTE_LIBRTE_PMD_SOFTNIC=n
# Compile architecture we compile for. TAP PMD
# It is enabled by default for Linux only.
CONFIG_RTE_LIBRTE_PMD_TAP=y
# Do prefetch of packet data within PMD driver receive function
CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
# Compile PMD for ARMv8 Crypto device
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
# Compile NXP CAAM JR crypto Driver
CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n
CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n
# Compile NXP DPAA2 crypto sec driver for CAAM HW
CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
# NXP DPAA caam - crypto driver
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
# Number of maximum virtio crypto devices
CONFIG_RTE_MAX_VIRTIO_CRYPTO=32
# Compile PMD for AESNI backed device
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
# Compile PMD for Software backed device
CONFIG_RTE_LIBRTE_PMD_OPENSSL=n
# Compile PMD for AESNI GCM device
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
# Compile PMD for SNOW 3G device
CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
# Compile PMD for KASUMI device
CONFIG_RTE_LIBRTE_PMD_KASUMI=n
# Compile PMD for ZUC device
CONFIG_RTE_LIBRTE_PMD_ZUC=n
# Compile PMD for Crypto Scheduler device
CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n
# Compile PMD for NULL Crypto device
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
# Compile PMD for AMD CCP crypto device
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
CONFIG_RTE_LIBRTE_COMPRESSDEV=n
CONFIG_RTE_COMPRESS_MAX_DEVS=64
# Compile compressdev unit test
CONFIG_RTE_COMPRESSDEV_TEST=n
# Compile PMD for Octeontx ZIPVF compression device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF=n
# Compile PMD for ISA-L compression device
CONFIG_RTE_LIBRTE_PMD_ISAL=n
# Compile PMD for ZLIB compression device
CONFIG_RTE_LIBRTE_PMD_ZLIB=n
# Compile generic event device library
CONFIG_RTE_LIBRTE_EVENTDEV=n
CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
CONFIG_RTE_EVENT_MAX_DEVS=16
CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32
CONFIG_RTE_EVENT_ETH_INTR_RING_SIZE=1024
CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32
CONFIG_RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE=32
# Compile PMD for skeleton event device
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n
# Compile PMD for software event device
CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
# Compile PMD for distributed software event device
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=n
# Compile PMD for NXP DPAA2 event device
CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
# Compile PMD for NXP DPAA2 QDMA raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
# Compile Mempool drivers
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET=y
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB=64
CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc"
CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
# Compile librte_timer
CONFIG_RTE_LIBRTE_TIMER=n
CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
# Compile librte_cfgfile
CONFIG_RTE_LIBRTE_CFGFILE=n
# Compile librte_cmdline
CONFIG_RTE_LIBRTE_CMDLINE=y
CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n
# Compile librte_hash
CONFIG_RTE_LIBRTE_HASH=y
CONFIG_RTE_LIBRTE_HASH_DEBUG=n
# Compile librte_efd
CONFIG_RTE_LIBRTE_EFD=n
# Compile librte_member
CONFIG_RTE_LIBRTE_MEMBER=y
# Compile librte_jobstats
CONFIG_RTE_LIBRTE_JOBSTATS=n
# Compile architecture we compile for. device metrics library
CONFIG_RTE_LIBRTE_METRICS=y
# Compile architecture we compile for. bitrate statistics library
CONFIG_RTE_LIBRTE_BITRATE=y
# Compile architecture we compile for. latency statistics library
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
# Compile librte_acl
CONFIG_RTE_LIBRTE_ACL=n
CONFIG_RTE_LIBRTE_ACL_DEBUG=n
# Compile librte_power
CONFIG_RTE_LIBRTE_POWER=n
CONFIG_RTE_LIBRTE_POWER_DEBUG=n
CONFIG_RTE_MAX_LCORE_FREQS=64
# Compile librte_net
CONFIG_RTE_LIBRTE_NET=y
# Compile librte_ip_frag
CONFIG_RTE_LIBRTE_IP_FRAG=y
CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n
CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4
CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n
# Compile GRO library
CONFIG_RTE_LIBRTE_GRO=y
# Compile GSO library
CONFIG_RTE_LIBRTE_GSO=y
# Compile librte_meter
CONFIG_RTE_LIBRTE_METER=y
# Compile librte_classify
CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=n
# Compile librte_sched
CONFIG_RTE_LIBRTE_SCHED=n
CONFIG_RTE_SCHED_DEBUG=n
CONFIG_RTE_SCHED_RED=n
CONFIG_RTE_SCHED_COLLECT_STATS=n
CONFIG_RTE_SCHED_SUBPORT_TC_OV=n
CONFIG_RTE_SCHED_PORT_N_GRINDERS=8
CONFIG_RTE_SCHED_VECTOR=n
# Compile architecture we compile for. distributor library
CONFIG_RTE_LIBRTE_DISTRIBUTOR=n
# Compile architecture we compile for. reorder library
CONFIG_RTE_LIBRTE_REORDER=n
# Compile librte_port
CONFIG_RTE_LIBRTE_PORT=n
CONFIG_RTE_PORT_STATS_COLLECT=n
CONFIG_RTE_PORT_PCAP=n
# Compile librte_table
CONFIG_RTE_LIBRTE_TABLE=n
CONFIG_RTE_TABLE_STATS_COLLECT=n
# Compile librte_pipeline
CONFIG_RTE_LIBRTE_PIPELINE=n
CONFIG_RTE_PIPELINE_STATS_COLLECT=n
# Compile librte_kni
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
# Compile vhost PMD
# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.
CONFIG_RTE_LIBRTE_PMD_VHOST=y
# Compile IFC driver
# To compile, CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_EAL_VFIO
# should be enabled.
CONFIG_RTE_LIBRTE_IFC_PMD=n
# Compile librte_bpf
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
CONFIG_RTE_ARCH_ARM64=y
CONFIG_RTE_ARCH_64=y
# Maximum available cache line size in arm64 implementations.
# Setting to maximum available cache line size in generic config
# to address minimum DMA alignment across all arm64 implementations.
# Accelarate rte_memcpy. Be sure to run unit test (memcpy_perf_autotest)
# to determine architecture we compile for. best threshold in code. Refer to notes in source file
# (lib/librte_eal/common/include/arch/arm/rte_memcpy_64.h) for more info.
CONFIG_RTE_ARCH_ARM64_MEMCPY=n
#CONFIG_RTE_ARM64_MEMCPY_ALIGNED_THRESHOLD=2048
#CONFIG_RTE_ARM64_MEMCPY_UNALIGNED_THRESHOLD=512
# Leave below RTE_ARM64_MEMCPY_xxx options commented out, unless there're
# strong reasons.
#CONFIG_RTE_ARM64_MEMCPY_SKIP_GCC_VER_CHECK=n
#CONFIG_RTE_ARM64_MEMCPY_ALIGN_MASK=0xF
#CONFIG_RTE_ARM64_MEMCPY_STRICT_ALIGN=n
CONFIG_RTE_TOOLCHAIN_GCC=y
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -1,104 +0,0 @@
# Copyright (C) 2017, Red Hat, Inc.
#
# Core configuration file library.
# Configurations are determined by sha values. The way to determine is by
# the special text:
# $FILE_COMMENT_TYPE -*- cfg-sha: $SHA256 -*-
export LC_ALL=C
# check required binaries
__check_reqd_binaries() {
local BIN __binaries=("egrep" "sort" "sha256sum" "sed")
for BIN in $__binaries; do
if ! type -P $BIN >/dev/null 2>&1; then
echo "Binary $BIN not found. Please install."
exit 1
fi
done
}
# Calculates a sha from a file
# The algorithm for generating a sha from a config is thus:
#
# 1. Remove all comment lines and blank lines
# 2. Sort the content
# 3. generate the sha-256 sum
#
# From a script perspective, this means:
# egrep -v ^\# %file% | egrep -v ^$ | sort -u | sha256sum
#
# Params:
# $1 = output variable
# $2 = file to use to calculate the shasum
# $3 = file comment type (defaults to # if unspecified)
calc_sha() {
__check_reqd_binaries
if [ "$1" == "" ]; then
echo "Please pass in a storage variable."
return 1
fi
local __resultvar=$1
__retval=1
shift
local __file=$1
local cmnt=${2:-#}
if [ -f "$__file" ]; then
local __shasum=$(egrep -v ^"$cmnt" "$__file" | egrep -v ^$ | sort -u | sha256sum -t | cut -d" " -f1)
eval $__resultvar="'$__shasum'"
__retval=0
fi
return $__retval
}
# Retrieves a sha stored in a file
# Param:
# $1 = output variable
# $2 = file to use to calculate the shasum
# $3 = file comment type (defaults to # if unspecified)
retr_sha() {
__check_reqd_binaries
if [ "$1" == "" ]; then
echo "Please pass in a storage variable."
return 1
fi
local __resultvar=$1
__retval=1
shift
local __file=$1
local cmnt=${2:-#}
if [ -f "$__file" ]; then
if grep -q "$cmnt -\*- cfg-sha:" "$__file"; then
local __shasum=$(grep "$cmnt -\*- cfg-sha:" "$__file" | sed -e "s@$cmnt -\*- cfg-sha: @@" | cut -d" " -f1)
eval $__resultvar="'$__shasum'"
__retval=0
fi
fi
return $__retval
}
# Set a config value
# set_conf dpdk_build_tree parameter value
# dpdk_build_tree is the directory where the .config lives
# parameter is the config parameter
# value is the value to set for the config parameter
set_conf() {
c="$1/.config"
shift
if grep -q "$1" "$c"; then
sed -i "s:^$1=.*$:$1=$2:g" $c
else
echo $1=$2 >> "$c"
fi
}

View File

@ -1,225 +0,0 @@
#!/bin/bash
source configlib.sh
# Generates arch configurations in the current directory based on
# 1. an dpdk.spec file
# 2. an expanded dpdk tree
if (( $# != 2 )); then
echo "$0: dpdk.spec dpdk_tree" >&2
exit 1
fi
DPDKSPEC="$1"
DPDKDIR="$2"
# accumulate all arch + name triples
DPDK_CONF_MACH_ARCH=()
for arch in $(grep %define\ machine_arch "$DPDKSPEC" | sed 's@%define machine_arch @@')
do
DPDK_CONF_MACH_ARCH+=($arch)
done
DPDK_CONF_MACH_TMPL=()
for tmpl in $(grep %define\ machine_tmpl "$DPDKSPEC" | sed 's@%define machine_tmpl @@')
do
DPDK_CONF_MACH_TMPL+=($tmpl)
done
DPDK_CONF_MACH=()
for mach in $(grep %define\ machine\ "$DPDKSPEC" | sed 's@%define machine @@')
do
DPDK_CONF_MACH+=($mach)
done
DPDK_TARGETS=()
for ((i=0; i < ${#DPDK_CONF_MACH[@]}; i++));
do
DPDK_TARGETS+=("${DPDK_CONF_MACH_ARCH[$i]}-${DPDK_CONF_MACH_TMPL[$i]}-linuxapp-gcc")
echo "DPDK-target: ${DPDK_TARGETS[$i]}"
done
OUTPUT_DIR=$(pwd)
pushd "$DPDKDIR"
for ((i=0; i < ${#DPDK_TARGETS[@]}; i++));
do
echo "For ${DPDK_TARGETS[$i]}:"
echo " a. Generating initial config"
echo " make V=1 T=${DPDK_TARGETS[$i]} O=${DPDK_TARGETS[$i]}"
make V=1 T=${DPDK_TARGETS[$i]} O=${DPDK_TARGETS[$i]} -j8 config
ORIG_SHA=""
OUTDIR="${DPDK_TARGETS[$i]}"
echo " b. calculating and applying sha"
calc_sha ORIG_SHA "${OUTDIR}/.config"
if [ "$ORIG_SHA" == "" ]; then
echo "ERROR: Unable to get sha for arch ${DPDK_TARGETS[$i]}"
exit 1
fi
echo "# -*- cfg-sha: ${ORIG_SHA}" > ${OUTDIR}/.config.new
cat "${OUTDIR}/.config" >> "${OUTDIR}/.config.new"
cp "${OUTDIR}/.config" "${OUTDIR}/.config.orig"
mv -f "${OUTDIR}/.config.new" "${OUTDIR}/.config"
echo " c. setting initial configurations"
# these are the original setconf values from dpdk.spec
set_conf "${OUTDIR}" CONFIG_RTE_MACHINE "\\\"${DPDK_CONF_MACH[$i]}\\\""
# Enable automatic driver loading from this path
set_conf "${OUTDIR}" CONFIG_RTE_EAL_PMD_PATH '"/usr/lib64/dpdk-pmds"'
# Disable DPDK libraries not needed
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_TIMER n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_CFGFILE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_JOBSTATS n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_LPM n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_ACL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_POWER n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_SCHED n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_DISTRIBUTOR n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_REORDER n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PORT n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_TABLE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PIPELINE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_KNI n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_CRYPTODEV n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_SECURITY n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_FLOW_CLASSIFY n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_BBDEV n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_COMPRESSDEV n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_BPF n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_DPAA_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_CFGFILE n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_EFD n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_FLOW_CLASSIFY n
# Disable all eventdevs
for eventdev in $(grep _EVENTDEV= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g')
do
set_conf "${OUTDIR}" $eventdev n
done
# Disable all rawdevs
for rawdev in $(grep _RAWDEV= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g')
do
set_conf "${OUTDIR}" $rawdev n
done
# Disable virtio user
set_conf "${OUTDIR}" CONFIG_RTE_VIRTIO_USER n
# Enable vhost numa as libnuma dep is ok
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VHOST_NUMA y
# start by disabling ALL PMDs
for pmd in $(grep _PMD= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g')
do
set_conf "${OUTDIR}" $pmd n
done
# PMDs which have their own naming scheme
# the default for this was 'n' at one point. Make sure we keep it
# as such
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_QAT n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VHOST n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_KNI n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_XENVIRT n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_NULL n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_PCAP n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_BOND n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_AF_PACKET n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_SOFTNIC n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_DPAA_SEC n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_COMMON_DPAAX n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CAAM_JR n
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE n
# whitelist of enabled PMDs
# Soft PMDs to enable
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_RING y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_VHOST y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VIRTIO_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_TAP y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PMD_FAILSAFE y
# start by disabling all buses
for bus in $(grep _BUS= "${OUTDIR}/.config" | sed 's@=\(y\|n\)@@g')
do
set_conf "${OUTDIR}" $bus n
done
# blacklist buses that don't conform to std naming
# May override VMBUS later in arch specific section
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VMBUS n
# whitelist buses
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_PCI_BUS y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VDEV_BUS y
# Disable some other miscellanous items related to test apps
set_conf "${OUTDIR}" CONFIG_RTE_TEST_BBDEV n
set_conf "${OUTDIR}" CONFIG_RTE_APP_CRYPTO_PERF n
# Disable kernel modules
set_conf "${OUTDIR}" CONFIG_RTE_EAL_IGB_UIO n
set_conf "${OUTDIR}" CONFIG_RTE_KNI_KMOD n
# Disable experimental stuff
set_conf "${OUTDIR}" CONFIG_RTE_NEXT_ABI n
# Build DPDK as shared library
set_conf "${OUTDIR}" CONFIG_RTE_BUILD_SHARED_LIB y
# Compile the PMD test application
set_conf "${OUTDIR}" CONFIG_RTE_TEST_PMD y
# Arch specific
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_I40E_PMD y
case "${DPDK_CONF_MACH_ARCH[i]}" in
x86_64)
# Hw PMD
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_BNXT_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_ENIC_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX4_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX5_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_NFP_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_QEDE_PMD y
# Sw PMD
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_NETVSC_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD y
# Bus
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_VMBUS y
;&
arm64)
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_IXGBE_PMD y
set_conf "${OUTDIR}" CONFIG_RTE_LIBRTE_IGB_PMD y
;;
esac
cp "${OUTDIR}/.config" "${OUTPUT_DIR}/${DPDK_TARGETS[$i]}-config"
done
popd >/dev/null
echo -n "For each arch ( "
for ((i=0; i < ${#DPDK_CONF_MACH_ARCH[@]}; i++));
do
echo -n "${DPDK_CONF_MACH_ARCH[i]} "
done
echo "):"
echo "1. ensure you enable the requisite hw"

View File

@ -1,550 +0,0 @@
# -*- cfg-sha: ac783e64ca20c977a7c1c42e72e6dce151b31aa9aecfbfa121b45e49e938f418
# BSD LICENSE
# Copyright (C) IBM Corporation 2014.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of IBM Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="ppc_64"
# machine can define specific variables or action for a specific board
# RTE_MACHINE values are architecture we compile for. directories in mk/machine/
CONFIG_RTE_MACHINE="power8"
# The compiler we use.
# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/
CONFIG_RTE_TOOLCHAIN="gcc"
# Use intrinsics or assembly code for key routines
CONFIG_RTE_FORCE_INTRINSICS=n
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=128
# Memory model
CONFIG_RTE_USE_C11_MEM_MODEL=n
# Compile Environment Abstraction Layer
CONFIG_RTE_LIBRTE_EAL=y
CONFIG_RTE_MAX_LCORE=256
CONFIG_RTE_MAX_NUMA_NODES=32
CONFIG_RTE_MAX_HEAPS=32
CONFIG_RTE_MAX_MEMSEG_LISTS=64
# each memseg list will be limited to either RTE_MAX_MEMSEG_PER_LIST pages
# or RTE_MAX_MEM_MB_PER_LIST megabytes worth of memory, whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_LIST=8192
CONFIG_RTE_MAX_MEM_MB_PER_LIST=32768
# a "type" is a combination of page size and NUMA node. total number of memseg
# lists per type will be limited to either RTE_MAX_MEMSEG_PER_TYPE pages (split
# over multiple lists of RTE_MAX_MEMSEG_PER_LIST pages), or
# RTE_MAX_MEM_MB_PER_TYPE megabytes of memory (split over multiple lists of
# RTE_MAX_MEM_MB_PER_LIST), whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768
CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072
# global maximum usable amount of VA, in megabytes
CONFIG_RTE_MAX_MEM_MB=524288
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_ENABLE_ASSERT=n
CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_MAX_VFIO_GROUPS=64
CONFIG_RTE_MAX_VFIO_CONTAINERS=64
CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_USE_LIBBSD=n
# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing.
# AVX512 is marked as experimental for now, will enable it after enough
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=n
# Compile architecture we compile for. PCI library
CONFIG_RTE_LIBRTE_PCI=y
# Compile architecture we compile for. argument parser library
CONFIG_RTE_LIBRTE_KVARGS=y
# Compile generic ethernet library
CONFIG_RTE_LIBRTE_ETHER=y
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
CONFIG_RTE_MAX_ETHPORTS=32
CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
CONFIG_RTE_LIBRTE_IEEE1588=n
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
CONFIG_RTE_ETHDEV_PROFILE_WITH_VTUNE=n
# Turn off Tx preparation stage
# Warning: rte_eth_tx_prepare() can be safely disabled only if using a
# driver which do not implement any Tx preparation.
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
# Common libraries, before Bus/PMDs
CONFIG_RTE_LIBRTE_COMMON_DPAAX=n
# Compile architecture we compile for. Intel FPGA bus
CONFIG_RTE_LIBRTE_IFPGA_BUS=n
# Compile PCI bus driver
CONFIG_RTE_LIBRTE_PCI_BUS=y
# Compile architecture we compile for. vdev bus
CONFIG_RTE_LIBRTE_VDEV_BUS=y
# Compile ARK PMD
CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_ARK_PAD_TX=y
CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
# Compile Aquantia Atlantic PMD driver
CONFIG_RTE_LIBRTE_ATLANTIC_PMD=n
# Compile AMD PMD
CONFIG_RTE_LIBRTE_AXGBE_PMD=n
CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG=n
# Compile burst-oriented Broadcom PMD driver
CONFIG_RTE_LIBRTE_BNX2X_PMD=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n
CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
# Compile burst-oriented Broadcom BNXT PMD driver
CONFIG_RTE_LIBRTE_BNXT_PMD=n
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA_PMD=n
CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
# Compile NXP DPAA2 FSL-MC Bus
CONFIG_RTE_LIBRTE_FSLMC_BUS=n
# Compile Support Libraries for NXP DPAA2
CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
# Compile burst-oriented NXP DPAA2 PMD driver
CONFIG_RTE_LIBRTE_DPAA2_PMD=n
CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
# Compile NXP ENETC PMD Driver
CONFIG_RTE_LIBRTE_ENETC_PMD=n
# Compile burst-oriented Amazon ENA PMD driver
CONFIG_RTE_LIBRTE_ENA_PMD=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n
# Compile burst-oriented Cisco ENIC PMD driver
CONFIG_RTE_LIBRTE_ENIC_PMD=n
# Compile burst-oriented IGB & EM PMD drivers
CONFIG_RTE_LIBRTE_EM_PMD=n
CONFIG_RTE_LIBRTE_IGB_PMD=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_IXGBE_INC_VECTOR=y
CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n
# Compile burst-oriented I40E PMD driver
CONFIG_RTE_LIBRTE_I40E_PMD=y
CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
# Compile burst-oriented FM10K PMD
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=n
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=n
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=n
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_RX=n
# QLogic 10G/25G/40G/50G/100G PMD
CONFIG_RTE_LIBRTE_QEDE_PMD=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
#Provides abs path/name of architecture we compile for. firmware file.
#Empty string denotes driver will use default firmware
CONFIG_RTE_LIBRTE_QEDE_FW=""
# Compile burst-oriented Solarflare libefx-based PMD
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
# Compile burst-oriented Cavium LiquidIO PMD driver
CONFIG_RTE_LIBRTE_LIO_PMD=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n
# Compile burst-oriented VIRTIO PMD driver
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n
# Compile virtio device emulation inside virtio PMD driver
CONFIG_RTE_VIRTIO_USER=n
# Compile burst-oriented VMXNET3 PMD driver
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n
# Compile fail-safe PMD
CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y
# Compile Marvell PMD driver
CONFIG_RTE_LIBRTE_MVPP2_PMD=n
# Compile Marvell MVNETA PMD driver
CONFIG_RTE_LIBRTE_MVNETA_PMD=n
# Compile support for VMBus library
CONFIG_RTE_LIBRTE_VMBUS=n
# Compile native PMD for Hyper-V/Azure
CONFIG_RTE_LIBRTE_NETVSC_PMD=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_RX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_DUMP=n
# Compile virtual device driver for NetVSC on Hyper-V/Azure
CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD=n
# Compile null PMD
CONFIG_RTE_LIBRTE_PMD_NULL=n
# Compile software PMD backed by PCAP files
CONFIG_RTE_LIBRTE_PMD_PCAP=n
# Compile example software rings based PMD
CONFIG_RTE_LIBRTE_PMD_RING=y
CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16
CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16
# Compile SOFTNIC PMD
CONFIG_RTE_LIBRTE_PMD_SOFTNIC=n
# Compile architecture we compile for. TAP PMD
# It is enabled by default for Linux only.
CONFIG_RTE_LIBRTE_PMD_TAP=y
# Do prefetch of packet data within PMD driver receive function
CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
# Compile PMD for ARMv8 Crypto device
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
# Compile NXP CAAM JR crypto Driver
CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n
CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n
# Compile NXP DPAA2 crypto sec driver for CAAM HW
CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
# NXP DPAA caam - crypto driver
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
# Number of maximum virtio crypto devices
CONFIG_RTE_MAX_VIRTIO_CRYPTO=32
# Compile PMD for AESNI backed device
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
# Compile PMD for Software backed device
CONFIG_RTE_LIBRTE_PMD_OPENSSL=n
# Compile PMD for AESNI GCM device
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
# Compile PMD for SNOW 3G device
CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
# Compile PMD for KASUMI device
CONFIG_RTE_LIBRTE_PMD_KASUMI=n
# Compile PMD for ZUC device
CONFIG_RTE_LIBRTE_PMD_ZUC=n
# Compile PMD for Crypto Scheduler device
CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n
# Compile PMD for NULL Crypto device
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
# Compile PMD for AMD CCP crypto device
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
CONFIG_RTE_LIBRTE_COMPRESSDEV=n
CONFIG_RTE_COMPRESS_MAX_DEVS=64
# Compile compressdev unit test
CONFIG_RTE_COMPRESSDEV_TEST=n
# Compile PMD for Octeontx ZIPVF compression device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF=n
# Compile PMD for ISA-L compression device
CONFIG_RTE_LIBRTE_PMD_ISAL=n
# Compile PMD for ZLIB compression device
CONFIG_RTE_LIBRTE_PMD_ZLIB=n
# Compile generic event device library
CONFIG_RTE_LIBRTE_EVENTDEV=n
CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
CONFIG_RTE_EVENT_MAX_DEVS=16
CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32
CONFIG_RTE_EVENT_ETH_INTR_RING_SIZE=1024
CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32
CONFIG_RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE=32
# Compile PMD for skeleton event device
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n
# Compile PMD for software event device
CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
# Compile PMD for distributed software event device
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=n
# Compile PMD for NXP DPAA2 event device
CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
# Compile PMD for NXP DPAA2 QDMA raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
# Compile Mempool drivers
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET=y
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB=64
CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc"
CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
# Compile librte_timer
CONFIG_RTE_LIBRTE_TIMER=n
CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
# Compile librte_cfgfile
CONFIG_RTE_LIBRTE_CFGFILE=n
# Compile librte_cmdline
CONFIG_RTE_LIBRTE_CMDLINE=y
CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n
# Compile librte_hash
CONFIG_RTE_LIBRTE_HASH=y
CONFIG_RTE_LIBRTE_HASH_DEBUG=n
# Compile librte_efd
CONFIG_RTE_LIBRTE_EFD=n
# Compile librte_member
CONFIG_RTE_LIBRTE_MEMBER=y
# Compile librte_jobstats
CONFIG_RTE_LIBRTE_JOBSTATS=n
# Compile architecture we compile for. device metrics library
CONFIG_RTE_LIBRTE_METRICS=y
# Compile architecture we compile for. bitrate statistics library
CONFIG_RTE_LIBRTE_BITRATE=y
# Compile architecture we compile for. latency statistics library
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
# Compile librte_acl
CONFIG_RTE_LIBRTE_ACL=n
CONFIG_RTE_LIBRTE_ACL_DEBUG=n
# Compile librte_power
CONFIG_RTE_LIBRTE_POWER=n
CONFIG_RTE_LIBRTE_POWER_DEBUG=n
CONFIG_RTE_MAX_LCORE_FREQS=64
# Compile librte_net
CONFIG_RTE_LIBRTE_NET=y
# Compile librte_ip_frag
CONFIG_RTE_LIBRTE_IP_FRAG=y
CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n
CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4
CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n
# Compile GRO library
CONFIG_RTE_LIBRTE_GRO=y
# Compile GSO library
CONFIG_RTE_LIBRTE_GSO=y
# Compile librte_meter
CONFIG_RTE_LIBRTE_METER=y
# Compile librte_classify
CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=n
# Compile librte_sched
CONFIG_RTE_LIBRTE_SCHED=n
CONFIG_RTE_SCHED_DEBUG=n
CONFIG_RTE_SCHED_RED=n
CONFIG_RTE_SCHED_COLLECT_STATS=n
CONFIG_RTE_SCHED_SUBPORT_TC_OV=n
CONFIG_RTE_SCHED_PORT_N_GRINDERS=8
CONFIG_RTE_SCHED_VECTOR=n
# Compile architecture we compile for. distributor library
CONFIG_RTE_LIBRTE_DISTRIBUTOR=n
# Compile architecture we compile for. reorder library
CONFIG_RTE_LIBRTE_REORDER=n
# Compile librte_port
CONFIG_RTE_LIBRTE_PORT=n
CONFIG_RTE_PORT_STATS_COLLECT=n
CONFIG_RTE_PORT_PCAP=n
# Compile librte_table
CONFIG_RTE_LIBRTE_TABLE=n
CONFIG_RTE_TABLE_STATS_COLLECT=n
# Compile librte_pipeline
CONFIG_RTE_LIBRTE_PIPELINE=n
CONFIG_RTE_PIPELINE_STATS_COLLECT=n
# Compile librte_kni
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
# Compile vhost PMD
# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.
CONFIG_RTE_LIBRTE_PMD_VHOST=y
# Compile IFC driver
# To compile, CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_EAL_VFIO
# should be enabled.
CONFIG_RTE_LIBRTE_IFC_PMD=n
# Compile librte_bpf
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
CONFIG_RTE_ARCH_PPC_64=y
CONFIG_RTE_ARCH_64=y
CONFIG_RTE_TOOLCHAIN_GCC=y
# Note: Power doesn't have this support
# Note: Initially, all of architecture we compile for. PMD drivers compilation are turned off on Power
# Will turn on them only after architecture we compile for. successful testing on Power
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -1,48 +0,0 @@
#!/bin/bash
# Copyright (C) 2017, Red Hat, Inc.
#
# set_config.sh will copy a configuration from $1 to $2, in the process
# checking that the sha header for $1 matches the header in $2
source configlib.sh
if (( $# < 2 )); then
echo "$0: source dest [comment-marker]"
exit 1
fi
if [ ! -f "$1" ]; then
echo "Source file $1 must exist."
exit 1
fi
src_file=$1
shift
if [ ! -f "$1" ]; then
echo "Dest file $1 must exist."
exit 1
fi
dst_file=$1
shift
comment_sep=${1:-#}
export LANG=en_US.utf8
DEST_FILE_SHA=""
SRC_FILE_SHA=""
calc_sha DEST_FILE_SHA "$dst_file" "$comment_sep" || echo "Failed to calc sha"
retr_sha SRC_FILE_SHA "$src_file" "$comment_sep" || echo "Failed to retrieve sha"
if [ "$DEST_FILE_SHA" != "$SRC_FILE_SHA" ]; then
echo "ERROR: The requisite starting sha from $dst_file does not match the"
echo " specified sha in $src_file."
echo "[ $DEST_FILE_SHA ] vs [ $SRC_FILE_SHA ]"
exit 1
fi
mv "$dst_file" "$dst_file".OLD
cp "$src_file" "$dst_file"
echo "copied 1 config file."
exit 0

View File

@ -1,525 +0,0 @@
# -*- cfg-sha: 2ba93102021dc5d38494cf5090c3ecaca37db13153dd558b1511a56f2a3d9b10
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2014 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2016 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2010-2017 Intel Corporation
# RTE_EXEC_ENV values are the directories in mk/exec-env/
CONFIG_RTE_EXEC_ENV="linuxapp"
# RTE_ARCH values are architecture we compile for. directories in mk/arch/
CONFIG_RTE_ARCH="x86_64"
# machine can define specific variables or action for a specific board
# RTE_MACHINE values are architecture we compile for. directories in mk/machine/
CONFIG_RTE_MACHINE="default"
# The compiler we use.
# RTE_TOOLCHAIN values are architecture we compile for. directories in mk/toolchain/
CONFIG_RTE_TOOLCHAIN="gcc"
# Use intrinsics or assembly code for key routines
CONFIG_RTE_FORCE_INTRINSICS=n
# Machine forces strict alignment constraints.
CONFIG_RTE_ARCH_STRICT_ALIGN=n
# Compile to share library
CONFIG_RTE_BUILD_SHARED_LIB=y
# Use newest code breaking previous ABI
CONFIG_RTE_NEXT_ABI=n
# Major ABI to overwrite library specific LIBABIVER
CONFIG_RTE_MAJOR_ABI=
# Machine's cache line size
CONFIG_RTE_CACHE_LINE_SIZE=64
# Memory model
CONFIG_RTE_USE_C11_MEM_MODEL=n
# Compile Environment Abstraction Layer
CONFIG_RTE_LIBRTE_EAL=y
CONFIG_RTE_MAX_LCORE=128
CONFIG_RTE_MAX_NUMA_NODES=8
CONFIG_RTE_MAX_HEAPS=32
CONFIG_RTE_MAX_MEMSEG_LISTS=64
# each memseg list will be limited to either RTE_MAX_MEMSEG_PER_LIST pages
# or RTE_MAX_MEM_MB_PER_LIST megabytes worth of memory, whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_LIST=8192
CONFIG_RTE_MAX_MEM_MB_PER_LIST=32768
# a "type" is a combination of page size and NUMA node. total number of memseg
# lists per type will be limited to either RTE_MAX_MEMSEG_PER_TYPE pages (split
# over multiple lists of RTE_MAX_MEMSEG_PER_LIST pages), or
# RTE_MAX_MEM_MB_PER_TYPE megabytes of memory (split over multiple lists of
# RTE_MAX_MEM_MB_PER_LIST), whichever is smaller
CONFIG_RTE_MAX_MEMSEG_PER_TYPE=32768
CONFIG_RTE_MAX_MEM_MB_PER_TYPE=131072
# global maximum usable amount of VA, in megabytes
CONFIG_RTE_MAX_MEM_MB=524288
CONFIG_RTE_MAX_MEMZONE=2560
CONFIG_RTE_MAX_TAILQ=32
CONFIG_RTE_ENABLE_ASSERT=n
CONFIG_RTE_LOG_DP_LEVEL=RTE_LOG_INFO
CONFIG_RTE_LOG_HISTORY=256
CONFIG_RTE_BACKTRACE=y
CONFIG_RTE_LIBEAL_USE_HPET=n
CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=y
CONFIG_RTE_MAX_VFIO_GROUPS=64
CONFIG_RTE_MAX_VFIO_CONTAINERS=64
CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=y
CONFIG_RTE_USE_LIBBSD=n
# Recognize/ignore architecture we compile for. AVX/AVX512 CPU flags for performance/power testing.
# AVX512 is marked as experimental for now, will enable it after enough
# field test and possible optimization.
CONFIG_RTE_ENABLE_AVX=y
CONFIG_RTE_ENABLE_AVX512=n
# Default driver path (or "" to disable)
CONFIG_RTE_EAL_PMD_PATH="/usr/lib64/dpdk-pmds"
# Compile Environment Abstraction Layer to support Vmware TSC map
CONFIG_RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT=y
# Compile architecture we compile for. PCI library
CONFIG_RTE_LIBRTE_PCI=y
# Compile architecture we compile for. argument parser library
CONFIG_RTE_LIBRTE_KVARGS=y
# Compile generic ethernet library
CONFIG_RTE_LIBRTE_ETHER=y
CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
CONFIG_RTE_MAX_ETHPORTS=32
CONFIG_RTE_MAX_QUEUES_PER_PORT=1024
CONFIG_RTE_LIBRTE_IEEE1588=n
CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS=16
CONFIG_RTE_ETHDEV_RXTX_CALLBACKS=y
CONFIG_RTE_ETHDEV_PROFILE_WITH_VTUNE=n
# Turn off Tx preparation stage
# Warning: rte_eth_tx_prepare() can be safely disabled only if using a
# driver which do not implement any Tx preparation.
CONFIG_RTE_ETHDEV_TX_PREPARE_NOOP=n
# Common libraries, before Bus/PMDs
CONFIG_RTE_LIBRTE_COMMON_DPAAX=n
# Compile architecture we compile for. Intel FPGA bus
CONFIG_RTE_LIBRTE_IFPGA_BUS=n
# Compile PCI bus driver
CONFIG_RTE_LIBRTE_PCI_BUS=y
# Compile architecture we compile for. vdev bus
CONFIG_RTE_LIBRTE_VDEV_BUS=y
# Compile ARK PMD
CONFIG_RTE_LIBRTE_ARK_PMD=n
CONFIG_RTE_LIBRTE_ARK_PAD_TX=y
CONFIG_RTE_LIBRTE_ARK_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_STATS=n
CONFIG_RTE_LIBRTE_ARK_DEBUG_TRACE=n
# Compile Aquantia Atlantic PMD driver
CONFIG_RTE_LIBRTE_ATLANTIC_PMD=n
# Compile AMD PMD
CONFIG_RTE_LIBRTE_AXGBE_PMD=n
CONFIG_RTE_LIBRTE_AXGBE_PMD_DEBUG=n
# Compile burst-oriented Broadcom PMD driver
CONFIG_RTE_LIBRTE_BNX2X_PMD=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_RX=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_TX=n
CONFIG_RTE_LIBRTE_BNX2X_MF_SUPPORT=n
CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC=n
# Compile burst-oriented Broadcom BNXT PMD driver
CONFIG_RTE_LIBRTE_BNXT_PMD=y
# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
CONFIG_RTE_LIBRTE_CXGBE_PMD=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_REG=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_CXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
CONFIG_RTE_LIBRTE_DPAA_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA_PMD=n
CONFIG_RTE_LIBRTE_DPAA_HWDEBUG=n
# Compile NXP DPAA2 FSL-MC Bus
CONFIG_RTE_LIBRTE_FSLMC_BUS=n
# Compile Support Libraries for NXP DPAA2
CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
CONFIG_RTE_LIBRTE_DPAA2_USE_PHYS_IOVA=y
# Compile burst-oriented NXP DPAA2 PMD driver
CONFIG_RTE_LIBRTE_DPAA2_PMD=n
CONFIG_RTE_LIBRTE_DPAA2_DEBUG_DRIVER=n
# Compile NXP ENETC PMD Driver
CONFIG_RTE_LIBRTE_ENETC_PMD=n
# Compile burst-oriented Amazon ENA PMD driver
CONFIG_RTE_LIBRTE_ENA_PMD=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_RX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX=n
CONFIG_RTE_LIBRTE_ENA_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_ENA_COM_DEBUG=n
# Compile burst-oriented Cisco ENIC PMD driver
CONFIG_RTE_LIBRTE_ENIC_PMD=y
# Compile burst-oriented IGB & EM PMD drivers
CONFIG_RTE_LIBRTE_EM_PMD=n
CONFIG_RTE_LIBRTE_IGB_PMD=y
CONFIG_RTE_LIBRTE_E1000_DEBUG_RX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX=n
CONFIG_RTE_LIBRTE_E1000_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC=n
# Compile burst-oriented IXGBE PMD driver
CONFIG_RTE_LIBRTE_IXGBE_PMD=y
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC=n
CONFIG_RTE_IXGBE_INC_VECTOR=y
CONFIG_RTE_LIBRTE_IXGBE_BYPASS=n
# Compile burst-oriented I40E PMD driver
CONFIG_RTE_LIBRTE_I40E_PMD=y
CONFIG_RTE_LIBRTE_I40E_DEBUG_RX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX=n
CONFIG_RTE_LIBRTE_I40E_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC=y
CONFIG_RTE_LIBRTE_I40E_INC_VECTOR=y
CONFIG_RTE_LIBRTE_I40E_16BYTE_RX_DESC=n
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF=64
CONFIG_RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM=4
# Compile burst-oriented FM10K PMD
CONFIG_RTE_LIBRTE_FM10K_PMD=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_RX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX=n
CONFIG_RTE_LIBRTE_FM10K_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
# Compile burst-oriented AVF PMD driver
CONFIG_RTE_LIBRTE_AVF_PMD=n
CONFIG_RTE_LIBRTE_AVF_INC_VECTOR=y
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_TX_FREE=n
CONFIG_RTE_LIBRTE_AVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVF_16BYTE_RX_DESC=n
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
CONFIG_RTE_LIBRTE_MLX4_PMD=y
CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_DLOPEN_DEPS=y
# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
# (MLX5) PMD
CONFIG_RTE_LIBRTE_MLX5_PMD=y
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS=y
# Compile burst-oriented Netronome NFP PMD driver
CONFIG_RTE_LIBRTE_NFP_PMD=y
CONFIG_RTE_LIBRTE_NFP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NFP_DEBUG_RX=n
# QLogic 10G/25G/40G/50G/100G PMD
CONFIG_RTE_LIBRTE_QEDE_PMD=y
CONFIG_RTE_LIBRTE_QEDE_DEBUG_TX=n
CONFIG_RTE_LIBRTE_QEDE_DEBUG_RX=n
#Provides abs path/name of architecture we compile for. firmware file.
#Empty string denotes driver will use default firmware
CONFIG_RTE_LIBRTE_QEDE_FW=""
# Compile burst-oriented Solarflare libefx-based PMD
CONFIG_RTE_LIBRTE_SFC_EFX_PMD=n
CONFIG_RTE_LIBRTE_SFC_EFX_DEBUG=n
# Compile software PMD backed by SZEDATA2 device
CONFIG_RTE_LIBRTE_PMD_SZEDATA2=n
# Compile burst-oriented Cavium Thunderx NICVF PMD driver
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX=n
CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX=n
# Compile burst-oriented Cavium LiquidIO PMD driver
CONFIG_RTE_LIBRTE_LIO_PMD=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_MBOX=n
CONFIG_RTE_LIBRTE_LIO_DEBUG_REGS=n
# Compile burst-oriented Cavium OCTEONTX network PMD driver
CONFIG_RTE_LIBRTE_OCTEONTX_PMD=n
# Compile WRS accelerated virtual port (AVP) guest PMD driver
CONFIG_RTE_LIBRTE_AVP_PMD=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_RX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_TX=n
CONFIG_RTE_LIBRTE_AVP_DEBUG_BUFFERS=n
# Compile burst-oriented VIRTIO PMD driver
CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n
# Compile virtio device emulation inside virtio PMD driver
CONFIG_RTE_VIRTIO_USER=n
# Compile burst-oriented VMXNET3 PMD driver
CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_RX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX=n
CONFIG_RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE=n
# Compile software PMD backed by AF_PACKET sockets (Linux only)
CONFIG_RTE_LIBRTE_PMD_AF_PACKET=n
# Compile link bonding PMD library
CONFIG_RTE_LIBRTE_PMD_BOND=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB=n
CONFIG_RTE_LIBRTE_BOND_DEBUG_ALB_L1=n
# Compile fail-safe PMD
CONFIG_RTE_LIBRTE_PMD_FAILSAFE=y
# Compile Marvell PMD driver
CONFIG_RTE_LIBRTE_MVPP2_PMD=n
# Compile Marvell MVNETA PMD driver
CONFIG_RTE_LIBRTE_MVNETA_PMD=n
# Compile support for VMBus library
CONFIG_RTE_LIBRTE_VMBUS=y
# Compile native PMD for Hyper-V/Azure
CONFIG_RTE_LIBRTE_NETVSC_PMD=y
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_RX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_TX=n
CONFIG_RTE_LIBRTE_NETVSC_DEBUG_DUMP=n
# Compile virtual device driver for NetVSC on Hyper-V/Azure
CONFIG_RTE_LIBRTE_VDEV_NETVSC_PMD=y
# Compile null PMD
CONFIG_RTE_LIBRTE_PMD_NULL=n
# Compile software PMD backed by PCAP files
CONFIG_RTE_LIBRTE_PMD_PCAP=n
# Compile example software rings based PMD
CONFIG_RTE_LIBRTE_PMD_RING=y
CONFIG_RTE_PMD_RING_MAX_RX_RINGS=16
CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16
# Compile SOFTNIC PMD
CONFIG_RTE_LIBRTE_PMD_SOFTNIC=n
# Compile architecture we compile for. TAP PMD
# It is enabled by default for Linux only.
CONFIG_RTE_LIBRTE_PMD_TAP=y
# Do prefetch of packet data within PMD driver receive function
CONFIG_RTE_PMD_PACKET_PREFETCH=y
# Compile generic wireless base band device library
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_BBDEV=n
CONFIG_RTE_BBDEV_MAX_DEVS=128
CONFIG_RTE_BBDEV_OFFLOAD_COST=n
# Compile PMD for NULL bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_NULL=y
# Compile PMD for turbo software bbdev device
CONFIG_RTE_LIBRTE_PMD_BBDEV_TURBO_SW=n
# Compile generic crypto device library
CONFIG_RTE_LIBRTE_CRYPTODEV=n
CONFIG_RTE_CRYPTO_MAX_DEVS=64
# Compile PMD for ARMv8 Crypto device
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO=n
CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG=n
# Compile NXP CAAM JR crypto Driver
CONFIG_RTE_LIBRTE_PMD_CAAM_JR=n
CONFIG_RTE_LIBRTE_PMD_CAAM_JR_BE=n
# Compile NXP DPAA2 crypto sec driver for CAAM HW
CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC=n
# NXP DPAA caam - crypto driver
CONFIG_RTE_LIBRTE_PMD_DPAA_SEC=n
CONFIG_RTE_LIBRTE_DPAA_MAX_CRYPTODEV=4
# Compile PMD for Cavium OCTEON TX crypto device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO=y
# Compile PMD for QuickAssist based devices - see docs for details
CONFIG_RTE_LIBRTE_PMD_QAT=n
CONFIG_RTE_LIBRTE_PMD_QAT_SYM=n
# Max. number of QuickAssist devices, which can be detected and attached
CONFIG_RTE_PMD_QAT_MAX_PCI_DEVICES=48
CONFIG_RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS=16
CONFIG_RTE_PMD_QAT_COMP_IM_BUFFER_SIZE=65536
# Compile PMD for virtio crypto devices
CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO=n
# Number of maximum virtio crypto devices
CONFIG_RTE_MAX_VIRTIO_CRYPTO=32
# Compile PMD for AESNI backed device
CONFIG_RTE_LIBRTE_PMD_AESNI_MB=n
# Compile PMD for Software backed device
CONFIG_RTE_LIBRTE_PMD_OPENSSL=n
# Compile PMD for AESNI GCM device
CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=n
# Compile PMD for SNOW 3G device
CONFIG_RTE_LIBRTE_PMD_SNOW3G=n
CONFIG_RTE_LIBRTE_PMD_SNOW3G_DEBUG=n
# Compile PMD for KASUMI device
CONFIG_RTE_LIBRTE_PMD_KASUMI=n
# Compile PMD for ZUC device
CONFIG_RTE_LIBRTE_PMD_ZUC=n
# Compile PMD for Crypto Scheduler device
CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n
# Compile PMD for NULL Crypto device
CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=n
# Compile PMD for AMD CCP crypto device
CONFIG_RTE_LIBRTE_PMD_CCP=n
# Compile PMD for Marvell Crypto device
CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO=n
# Compile generic security library
CONFIG_RTE_LIBRTE_SECURITY=n
# Compile generic compression device library
CONFIG_RTE_LIBRTE_COMPRESSDEV=n
CONFIG_RTE_COMPRESS_MAX_DEVS=64
# Compile compressdev unit test
CONFIG_RTE_COMPRESSDEV_TEST=n
# Compile PMD for Octeontx ZIPVF compression device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF=n
# Compile PMD for ISA-L compression device
CONFIG_RTE_LIBRTE_PMD_ISAL=n
# Compile PMD for ZLIB compression device
CONFIG_RTE_LIBRTE_PMD_ZLIB=n
# Compile generic event device library
CONFIG_RTE_LIBRTE_EVENTDEV=n
CONFIG_RTE_LIBRTE_EVENTDEV_DEBUG=n
CONFIG_RTE_EVENT_MAX_DEVS=16
CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=64
CONFIG_RTE_EVENT_TIMER_ADAPTER_NUM_MAX=32
CONFIG_RTE_EVENT_ETH_INTR_RING_SIZE=1024
CONFIG_RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE=32
CONFIG_RTE_EVENT_ETH_TX_ADAPTER_MAX_INSTANCE=32
# Compile PMD for skeleton event device
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV=n
CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG=n
# Compile PMD for software event device
CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=n
# Compile PMD for distributed software event device
CONFIG_RTE_LIBRTE_PMD_DSW_EVENTDEV=n
# Compile PMD for octeontx sso event device
CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=n
# Compile PMD for OPDL event device
CONFIG_RTE_LIBRTE_PMD_OPDL_EVENTDEV=n
# Compile PMD for NXP DPAA event device
CONFIG_RTE_LIBRTE_PMD_DPAA_EVENTDEV=n
# Compile PMD for NXP DPAA2 event device
CONFIG_RTE_LIBRTE_PMD_DPAA2_EVENTDEV=n
# Compile raw device support
# EXPERIMENTAL: API may change without prior notice
CONFIG_RTE_LIBRTE_RAWDEV=n
CONFIG_RTE_RAWDEV_MAX_DEVS=10
CONFIG_RTE_LIBRTE_PMD_SKELETON_RAWDEV=n
# Compile PMD for NXP DPAA2 CMDIF raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_CMDIF_RAWDEV=n
# Compile PMD for NXP DPAA2 QDMA raw device
CONFIG_RTE_LIBRTE_PMD_DPAA2_QDMA_RAWDEV=n
# Compile PMD for Intel FPGA raw device
CONFIG_RTE_LIBRTE_PMD_IFPGA_RAWDEV=n
# Compile librte_ring
CONFIG_RTE_LIBRTE_RING=y
# Compile librte_mempool
CONFIG_RTE_LIBRTE_MEMPOOL=y
CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
# Compile Mempool drivers
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET=y
CONFIG_RTE_DRIVER_MEMPOOL_BUCKET_SIZE_KB=64
CONFIG_RTE_DRIVER_MEMPOOL_RING=y
CONFIG_RTE_DRIVER_MEMPOOL_STACK=y
# Compile PMD for octeontx fpa mempool device
CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=n
# Compile librte_mbuf
CONFIG_RTE_LIBRTE_MBUF=y
CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="ring_mp_mc"
CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
CONFIG_RTE_PKTMBUF_HEADROOM=128
# Compile librte_timer
CONFIG_RTE_LIBRTE_TIMER=n
CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
# Compile librte_cfgfile
CONFIG_RTE_LIBRTE_CFGFILE=n
# Compile librte_cmdline
CONFIG_RTE_LIBRTE_CMDLINE=y
CONFIG_RTE_LIBRTE_CMDLINE_DEBUG=n
# Compile librte_hash
CONFIG_RTE_LIBRTE_HASH=y
CONFIG_RTE_LIBRTE_HASH_DEBUG=n
# Compile librte_efd
CONFIG_RTE_LIBRTE_EFD=n
# Compile librte_member
CONFIG_RTE_LIBRTE_MEMBER=y
# Compile librte_jobstats
CONFIG_RTE_LIBRTE_JOBSTATS=n
# Compile architecture we compile for. device metrics library
CONFIG_RTE_LIBRTE_METRICS=y
# Compile architecture we compile for. bitrate statistics library
CONFIG_RTE_LIBRTE_BITRATE=y
# Compile architecture we compile for. latency statistics library
CONFIG_RTE_LIBRTE_LATENCY_STATS=y
# Compile librte_telemetry
CONFIG_RTE_LIBRTE_TELEMETRY=n
# Compile librte_lpm
CONFIG_RTE_LIBRTE_LPM=n
CONFIG_RTE_LIBRTE_LPM_DEBUG=n
# Compile librte_acl
CONFIG_RTE_LIBRTE_ACL=n
CONFIG_RTE_LIBRTE_ACL_DEBUG=n
# Compile librte_power
CONFIG_RTE_LIBRTE_POWER=n
CONFIG_RTE_LIBRTE_POWER_DEBUG=n
CONFIG_RTE_MAX_LCORE_FREQS=64
# Compile librte_net
CONFIG_RTE_LIBRTE_NET=y
# Compile librte_ip_frag
CONFIG_RTE_LIBRTE_IP_FRAG=y
CONFIG_RTE_LIBRTE_IP_FRAG_DEBUG=n
CONFIG_RTE_LIBRTE_IP_FRAG_MAX_FRAG=4
CONFIG_RTE_LIBRTE_IP_FRAG_TBL_STAT=n
# Compile GRO library
CONFIG_RTE_LIBRTE_GRO=y
# Compile GSO library
CONFIG_RTE_LIBRTE_GSO=y
# Compile librte_meter
CONFIG_RTE_LIBRTE_METER=y
# Compile librte_classify
CONFIG_RTE_LIBRTE_FLOW_CLASSIFY=n
# Compile librte_sched
CONFIG_RTE_LIBRTE_SCHED=n
CONFIG_RTE_SCHED_DEBUG=n
CONFIG_RTE_SCHED_RED=n
CONFIG_RTE_SCHED_COLLECT_STATS=n
CONFIG_RTE_SCHED_SUBPORT_TC_OV=n
CONFIG_RTE_SCHED_PORT_N_GRINDERS=8
CONFIG_RTE_SCHED_VECTOR=n
# Compile architecture we compile for. distributor library
CONFIG_RTE_LIBRTE_DISTRIBUTOR=n
# Compile architecture we compile for. reorder library
CONFIG_RTE_LIBRTE_REORDER=n
# Compile librte_port
CONFIG_RTE_LIBRTE_PORT=n
CONFIG_RTE_PORT_STATS_COLLECT=n
CONFIG_RTE_PORT_PCAP=n
# Compile librte_table
CONFIG_RTE_LIBRTE_TABLE=n
CONFIG_RTE_TABLE_STATS_COLLECT=n
# Compile librte_pipeline
CONFIG_RTE_LIBRTE_PIPELINE=n
CONFIG_RTE_PIPELINE_STATS_COLLECT=n
# Compile librte_kni
CONFIG_RTE_LIBRTE_KNI=n
CONFIG_RTE_LIBRTE_PMD_KNI=n
CONFIG_RTE_KNI_KMOD=n
CONFIG_RTE_KNI_KMOD_ETHTOOL=n
CONFIG_RTE_KNI_PREEMPT_DEFAULT=y
# Compile architecture we compile for. pdump library
CONFIG_RTE_LIBRTE_PDUMP=y
# Compile vhost user library
CONFIG_RTE_LIBRTE_VHOST=y
CONFIG_RTE_LIBRTE_VHOST_NUMA=y
CONFIG_RTE_LIBRTE_VHOST_DEBUG=n
# Compile vhost PMD
# To compile, CONFIG_RTE_LIBRTE_VHOST should be enabled.
CONFIG_RTE_LIBRTE_PMD_VHOST=y
# Compile IFC driver
# To compile, CONFIG_RTE_LIBRTE_VHOST and CONFIG_RTE_EAL_VFIO
# should be enabled.
CONFIG_RTE_LIBRTE_IFC_PMD=n
# Compile librte_bpf
CONFIG_RTE_LIBRTE_BPF=n
# allow load BPF from ELF files (requires libelf)
CONFIG_RTE_LIBRTE_BPF_ELF=n
# Compile architecture we compile for. test application
CONFIG_RTE_APP_TEST=y
CONFIG_RTE_APP_TEST_RESOURCE_TAR=n
# Compile architecture we compile for. procinfo application
CONFIG_RTE_PROC_INFO=y
# Compile architecture we compile for. PMD test application
CONFIG_RTE_TEST_PMD=y
CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
# Compile architecture we compile for. bbdev test application
CONFIG_RTE_TEST_BBDEV=n
# Compile architecture we compile for. crypto performance application
CONFIG_RTE_APP_CRYPTO_PERF=n
# Compile architecture we compile for. eventdev application
CONFIG_RTE_APP_EVENTDEV=n
CONFIG_RTE_EXEC_ENV_LINUXAPP=y
CONFIG_RTE_LIBRTE_VHOST_POSTCOPY=n
# Common libraries, before Bus/PMDs
# NXP DPAA BUS and drivers
# NXP FSLMC BUS and DPAA2 drivers
# NXP ENETC PMD Driver
CONFIG_RTE_ARCH_X86_64=y
CONFIG_RTE_ARCH_X86=y
CONFIG_RTE_ARCH_64=y
CONFIG_RTE_TOOLCHAIN_GCC=y
CONFIG_RTE_LIBRTE_PMD_XENVIRT=n

View File

@ -4,66 +4,33 @@
%bcond_without tools
# Dont edit Version: and Release: directly, only these:
#% define commit0 0da7f445df445630c794897347ee360d6fe6348b
#% define date 20181127
#% define commit0 7001c8fdb27357c67147c0a13cb3826e48c0f2bf
#% define date 20191128
#% define shortcommit0 %(c=%{commit0}; echo ${c:0:7})
%define ver 18.11.2
%define rel 3
%define ver 23.11
%define rel 1
%define srcname dpdk-stable
%define srcname dpdk%(awk -F. '{ if (NF > 2) print "-stable" }' <<<%{version})
%define pyelftoolsver 0.27
Name: dpdk
Version: %{ver}
Release: %{rel}%{?commit0:.%{date}git%{shortcommit0}}%{?dist}
%if 0%{?fedora} || 0%{?rhel} > 8
Epoch: 2
%endif
URL: http://dpdk.org
%if 0%{?commit0:1}
Source: http://dpdk.org/browse/dpdk/snapshot/dpdk-%{commit0}.tar.xz
Source: https://dpdk.org/browse/dpdk/snapshot/dpdk-%{commit0}.tar.xz
%else
Source: http://fast.dpdk.org/rel/dpdk-%{ver}.tar.xz
Source: https://fast.dpdk.org/rel/dpdk-%{ver}.tar.xz
%endif
# Only needed for creating snapshot tarballs, not used in build itself
Source100: dpdk-snapshot.sh
Source500: configlib.sh
Source501: gen_config_group.sh
Source502: set_config.sh
# Important: source503 is used as the actual copy file
# @TODO: this causes a warning - fix it?
Source504: arm64-armv8a-linuxapp-gcc-config
Source505: ppc_64-power8-linuxapp-gcc-config
Source506: x86_64-native-linuxapp-gcc-config
# Patches only in dpdk package
# Bug 1525039
Patch10: 0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch
# Bug 1700373
Patch11: 0001-net-virtio-add-packed-virtqueue-defines.patch
Patch12: 0002-net-virtio-add-packed-virtqueue-helpers.patch
Patch13: 0003-net-virtio-vring-init-for-packed-queues.patch
Patch14: 0004-net-virtio-dump-packed-virtqueue-data.patch
Patch15: 0005-net-virtio-implement-Tx-path-for-packed-queues.patch
Patch16: 0006-net-virtio-implement-Rx-path-for-packed-queues.patch
Patch17: 0007-net-virtio-support-packed-queue-in-send-command.patch
Patch18: 0008-net-virtio-user-add-option-to-use-packed-queues.patch
Patch19: 0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch
Patch20: 0010-net-virtio-enable-packed-virtqueues-by-default.patch
Patch21: 0011-net-virtio-avoid-double-accounting-of-bytes.patch
Patch22: 0012-net-virtio-user-fix-packed-vq-option-parsing.patch
Patch23: 0013-net-virtio-user-fix-supported-features-list.patch
Patch24: 0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch
Patch25: 0015-net-virtio-user-support-control-VQ-for-packed.patch
Patch26: 0016-net-virtio-fix-control-VQ.patch
Patch27: 0017-net-virtio-user-fix-control-VQ.patch
Patch28: 0018-vhost-batch-used-descs-chains-write-back-with-packed.patch
Patch29: 0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch
Patch30: 0020-net-virtio-fix-calculation-of-device_event-ptr.patch
Summary: Set of libraries and drivers for fast packet processing
#
@ -79,50 +46,26 @@ License: BSD and LGPLv2 and GPLv2
# needs extensive work to port it to other architectures.
ExclusiveArch: x86_64 aarch64 ppc64le
# machine_arch maps between rpm and dpdk arch name, often same as _target_cpu
# machine_tmpl is the config template machine name, often "native"
# machine is the actual machine name used in the dpdk make system
%ifarch x86_64
%define machine_arch x86_64
%define machine_tmpl native
%define machine default
%endif
%ifarch aarch64
%define machine_arch arm64
%define machine_tmpl armv8a
%define machine armv8a
%endif
%ifarch ppc64le
%define machine_arch ppc_64
%define machine_tmpl power8
%define machine power8
%endif
%define target %{machine_arch}-%{machine_tmpl}-linuxapp-gcc
%define sdkdir %{_datadir}/%{name}
%define docdir %{_docdir}/%{name}
%define incdir %{_includedir}/%{name}
%define pmddir %{_libdir}/%{name}-pmds
%define pmddir %{_libdir}/%{name}-pmds
%if 0%{?rhel} > 7 || 0%{?fedora}
%define _py python3
%define _py_exec %{?__python3}
%else
%define _py python
%define _py_exec %{?__python2}
%endif
%if 0%{?rhel} > 7
# Fix conflicts with README and MAINTAINERS (included in dpdk-doc < 18.11-2)
%if 0%{?rhel} && 0%{?rhel} < 9
# Fix conflicts with README and MAINTAINERS (included in dpdk-doc < 18.11-2),
# this affects only RHEL8.
Conflicts: dpdk-doc < 18.11-2
%endif
BuildRequires: gcc, kernel-headers, zlib-devel, numactl-devel
BuildRequires: doxygen, %{_py}-devel, %{_py}-sphinx
BuildRequires: meson
Source1: https://github.com/eliben/pyelftools/archive/refs/tags/v%{pyelftoolsver}.tar.gz#/pyelftools-%{pyelftoolsver}.tar.gz
%if 0%{?rhel} > 8 || 0%{?fedora}
BuildRequires: python3-pyelftools
%endif
BuildRequires: gcc, zlib-devel, numactl-devel, libarchive-devel
BuildRequires: doxygen, python3-sphinx
%ifarch x86_64
BuildRequires: rdma-core-devel >= 15 libmnl-devel
%global __requires_exclude_from ^%{_libdir}/librte_pmd_mlx[45]_glue\.so.*$
BuildRequires: rdma-core-devel >= 15
%endif
%description
@ -131,7 +74,10 @@ fast packet processing in the user space.
%package devel
Summary: Data Plane Development Kit development files
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}%{?_isa} = %{?epoch:%{epoch}:}%{version}-%{release}
%ifarch x86_64
Requires: rdma-core-devel
%endif
%description devel
This package contains the headers and other files needed for developing
@ -147,8 +93,8 @@ API programming documentation for the Data Plane Development Kit.
%if %{with tools}
%package tools
Summary: Tools for setting up Data Plane Development Kit environment
Requires: %{name} = %{version}-%{release}
Requires: kmod pciutils findutils iproute %{_py_exec}
Requires: %{name} = %{?epoch:%{epoch}:}%{version}-%{release}
Requires: kmod pciutils findutils iproute python3
%description tools
%{summary}
@ -165,130 +111,146 @@ as L2 and L3 forwarding.
%endif
%prep
%autosetup -n %{srcname}-%{?commit0:%{commit0}}%{!?commit0:%{ver}} -p1
%if 0%{?rhel} && 0%{?rhel} < 9
%setup -q -a 1 -n %{srcname}-%{?commit0:%{commit0}}%{!?commit0:%{ver}}
%else
%setup -q -n %{srcname}-%{?commit0:%{commit0}}%{!?commit0:%{ver}}
%endif
%autopatch -p1
%build
# In case dpdk-devel is installed
unset RTE_SDK RTE_INCLUDE RTE_TARGET
# Avoid appending second -Wall to everything, it breaks upstream warning
# disablers in makefiles. Strip expclit -march= from optflags since they
# will only guarantee build failures, DPDK is picky with that.
# Note: _hardening_ldflags has to go on the extra cflags line because dpdk is
# astoundingly convoluted in how it processes its linker flags. Fixing it in
# dpdk is the preferred solution, but adjusting to allow a gcc option in the
# ldflags, even when gcc is used as the linker, requires large tree-wide changes
touch obj.o
gcc -### obj.o 2>&1 | awk '/.*collect2.*/ { print $0}' | sed -e 's/\S*\.res\S*//g' -e 's/-z \S*//g' -e 's/[^ ]*\.o//g' -e 's/ /\n/g' | sort -u > ./noopts.txt
gcc -### $RPM_LD_FLAGS obj.o 2>&1 | awk '/.*collect2.*/ {print $0}' | sed -e 's/\S*\.res\S*//g' -e 's/-z \S*//g' -e 's/[^ ]*\.o//g' -e 's/ /\n/g' | sort -u > ./opts.txt
EXTRA_RPM_LDFLAGS=$(comm -13 ./noopts.txt ./opts.txt)
rm -f obj.o
export EXTRA_CFLAGS="$(echo %{optflags} | sed -e 's:-Wall::g' -e 's:-march=[[:alnum:]]* ::g') -Wformat -fPIC %{_hardening_ldflags}"
export EXTRA_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-Wl,//g' -e's/-spec.*//')
export HOST_EXTRA_CFLAGS="$EXTRA_CFLAGS $EXTRA_RPM_LDFLAGS"
export EXTRA_HOST_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-spec.*//')
# DPDK defaults to using builder-specific compiler flags. However,
# the config has been changed by specifying CONFIG_RTE_MACHINE=default
# in order to build for a more generic host. NOTE: It is possible that
# the compiler flags used still won't work for all Fedora-supported
# machines, but runtime checks in DPDK will catch those situations.
make V=1 O=%{target} T=%{target} %{?_smp_mflags} config
cp -f %{SOURCE500} %{SOURCE502} "%{_sourcedir}/%{target}-config" .
%{SOURCE502} %{target}-config "%{target}/.config"
make V=1 O=%{target} %{?_smp_mflags}
# Creating PDF's has excessive build-requirements, html docs suffice fine
make V=1 O=%{target} %{?_smp_mflags} doc-api-html doc-guides-html
%if %{with examples}
make V=1 O=%{target}/examples T=%{target} %{?_smp_mflags} examples
%if 0%{?rhel} && 0%{?rhel} < 9
export PYTHONPATH=$(pwd)/pyelftools-%{pyelftoolsver}
%endif
ENABLED_APPS=(
test-pmd
test-bbdev
)
for app in "${ENABLED_APPS[@]}"; do
enable_apps="${enable_apps:+$enable_apps,}"$app
done
ENABLED_DRIVERS=(
bus/pci
bus/vdev
mempool/ring
net/failsafe
net/i40e
net/ring
net/vhost
net/virtio
net/tap
)
%ifarch x86_64
ENABLED_DRIVERS+=(
baseband/acc
bus/auxiliary
bus/vmbus
common/iavf
common/mlx5
common/nfp
net/bnxt
net/enic
net/iavf
net/ice
net/mlx5
net/netvsc
net/nfp
net/qede
net/vdev_netvsc
)
%endif
%ifarch aarch64 x86_64
ENABLED_DRIVERS+=(
net/e1000
net/ixgbe
)
%endif
for driver in "${ENABLED_DRIVERS[@]}"; do
enable_drivers="${enable_drivers:+$enable_drivers,}"$driver
done
# If doing any updates, this must be aligned with:
# https://access.redhat.com/articles/3538141
ENABLED_LIBS=(
bbdev
bitratestats
bpf
cmdline
cryptodev
dmadev
gro
gso
hash
ip_frag
latencystats
member
meter
metrics
pcapng
pdump
security
stack
vhost
)
for lib in "${ENABLED_LIBS[@]}"; do
enable_libs="${enable_libs:+$enable_libs,}"$lib
done
ln -s /usr/bin/true mandb
export PATH=$(pwd):$PATH
%meson --includedir=include/dpdk \
--default-library=shared \
-Ddeveloper_mode=disabled \
-Denable_libs="$enable_libs" \
-Ddrivers_install_subdir=dpdk-pmds \
-Denable_apps="$enable_apps" \
-Denable_docs=true \
-Denable_drivers="$enable_drivers" \
-Dplatform=generic \
-Dmax_ethports=32 \
-Dmax_numa_nodes=8 \
-Dtests=false
# Check drivers and libraries
for driver in "${ENABLED_DRIVERS[@]}"; do
config_token="RTE_$(echo "$driver" | tr [a-z/] [A-Z_])"
! grep -Fqw "$config_token" */rte_build_config.h || continue
echo "!!! Could not find $driver in rte_build_config.h, please check dependencies. !!!"
false
done
for lib in "${ENABLED_LIBS[@]}"; do
config_token="RTE_LIB_$(echo "$lib" | tr [a-z/] [A-Z_])"
! grep -Fqw "$config_token" */rte_build_config.h || continue
echo "!!! Could not find $lib in rte_build_config.h, please check dependencies. !!!"
false
done
%meson_build
%install
# In case dpdk-devel is installed
unset RTE_SDK RTE_INCLUDE RTE_TARGET
%meson_install
%make_install O=%{target} prefix=%{_usr} libdir=%{_libdir}
# Replace /usr/bin/env python with the correct python binary
find %{buildroot}%{sdkdir}/ -name "*.py" -exec \
sed -i -e 's|#!\s*/usr/bin/env python|#!%{_py_exec}|' {} +
# Create a driver directory with symlinks to all pmds
mkdir -p %{buildroot}/%{pmddir}
for f in %{buildroot}/%{_libdir}/*_pmd_*.so.*; do
bn=$(basename ${f})
%ifarch x86_64
case $bn in
librte_pmd_mlx[45]_glue.so.*)
mkdir -p %{buildroot}/%{pmddir}-glue
ln -s ../${bn} %{buildroot}%{pmddir}-glue/${bn}
continue
;;
esac
%endif
ln -s ../${bn} %{buildroot}%{pmddir}/${bn}
done
%if ! %{with tools}
rm -rf %{buildroot}%{sdkdir}/usertools
rm -rf %{buildroot}%{_sbindir}/dpdk-devbind
%endif
rm -f %{buildroot}%{sdkdir}/usertools/dpdk-setup.sh
rm -f %{buildroot}%{sdkdir}/usertools/meson.build
rm -f %{buildroot}%{_bindir}/dpdk-pmdinfo
rm -f %{buildroot}%{_bindir}/dpdk-test-crypto-perf
rm -f %{buildroot}%{_bindir}/dpdk-test-eventdev
%if %{with examples}
find %{target}/examples/ -name "*.map" | xargs rm -f
for f in %{target}/examples/*/%{target}/app/*; do
bn=`basename ${f}`
cp -p ${f} %{buildroot}%{_bindir}/dpdk-${bn}
done
%else
rm -rf %{buildroot}%{sdkdir}/examples
%endif
# Setup RTE_SDK environment as expected by apps etc
mkdir -p %{buildroot}/%{_sysconfdir}/profile.d
cat << EOF > %{buildroot}/%{_sysconfdir}/profile.d/dpdk-sdk-%{_arch}.sh
if [ -z "\${RTE_SDK}" ]; then
export RTE_SDK="%{sdkdir}"
export RTE_TARGET="%{target}"
export RTE_INCLUDE="%{incdir}"
fi
EOF
cat << EOF > %{buildroot}/%{_sysconfdir}/profile.d/dpdk-sdk-%{_arch}.csh
if ( ! \$RTE_SDK ) then
setenv RTE_SDK "%{sdkdir}"
setenv RTE_TARGET "%{target}"
setenv RTE_INCLUDE "%{incdir}"
endif
EOF
# Fixup target machine mismatch
sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profile.d/dpdk-sdk*
rm -f %{buildroot}%{_libdir}/*.a
# Taken from debian/rules
rm -f %{buildroot}%{docdir}/html/.buildinfo
rm -f %{buildroot}%{docdir}/html/objects.inv
rm -rf %{buildroot}%{docdir}/html/.doctrees
find %{buildroot}%{_datadir}/man/ -type f -a ! -iname "*rte_*" -exec rm {} \;
%files
# BSD
%doc README MAINTAINERS
%{_bindir}/testpmd
%{_bindir}/dpdk-procinfo
%{_bindir}/dpdk-pdump
%{_bindir}/dpdk-testpmd
%{_bindir}/dpdk-test-bbdev
%dir %{pmddir}
%{_libdir}/*.so.*
%{pmddir}/*.so.*
%ifarch x86_64
%dir %{pmddir}-glue
%{pmddir}-glue/*.so.*
%endif
%files doc
#BSD
@ -301,28 +263,77 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
%{incdir}/
%{sdkdir}/
%if %{with tools}
%exclude %{sdkdir}/usertools/
%exclude %{_bindir}/dpdk-*.py
%endif
%if %{with examples}
%exclude %{sdkdir}/examples/
%endif
%{_sysconfdir}/profile.d/dpdk-sdk-*.*
%{_libdir}/*.so
%{pmddir}/*.so
%{_libdir}/pkgconfig/libdpdk.pc
%{_libdir}/pkgconfig/libdpdk-libs.pc
%{_datadir}/man
%if %{with examples}
%files examples
%exclude %{_bindir}/dpdk-procinfo
%exclude %{_bindir}/dpdk-pdump
%{_bindir}/dpdk-*
%doc %{sdkdir}/examples/
%endif
%if %{with tools}
%files tools
%{sdkdir}/usertools/
%{_sbindir}/dpdk-devbind
%{_bindir}/dpdk-*.py
%endif
%changelog
* Fri Dec 15 2023 David Marchand <david.marchand@redhat.com> - 23.11-1
- Rebase to 23.11 (RHEL-19584)
* Fri Dec 23 2022 Timothy Redaelli <tredaelli@redhat.com> - 21.11-3
- Version bump just to be sure it's updated from dpdk-21.11-2.el8_7
* Wed Oct 26 2022 Timothy Redaelli <tredaelli@redhat.com> - 21.11-2
- Backport fixes for CVE-2022-2132 (#2107171)
* Tue Nov 23 2021 David Marchand <david.marchand@redhat.com> - 21.11-1
- Rebase to 21.11 (#2029497)
* Tue Feb 16 2021 Timothy Redaelli <tredaelli@redhat.com> - 20.11-3
- Fix gating since on DPDK 20.11 testpmd is called dpdk-testpmd
* Wed Feb 10 2021 Timothy Redaelli <tredaelli@redhat.com> - 20.11-2
- Enable ice PMD for x86_64 (#1927179)
* Tue Dec 01 2020 Timothy Redaelli <tredaelli@redhat.com> - 20.11-1
- Rebase DPDK to 20.11 using meson build system (#1908446)
* Thu Aug 13 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11.3-1
- Rebase DPDK to 19.11.3 (#1868708)
* Wed May 20 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11.2-1
- Rebase DPDK to 19.11.2 (#1836830, #1837024, #1837030, #1837022)
* Fri Apr 17 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11.1-1
- Rebase DPDK to 19.11.1 (#1824905)
- Remove dpdk-pmdinfo.py (#1801361)
- Add Requires: rdma-core-devel libmnl-devel on x86_64 for dpdk-devel (#1813252)
* Thu Feb 20 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-4
- Remove MLX{4,5} glue libraries since RHEL 8 ships the correct libibverbs
library. (#1805140)
* Mon Feb 17 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-3
- Remove /usr/share/dpdk/mk/exec-env/{bsd,linux}app symlinks (#1773889)
* Thu Feb 13 2020 Timothy Redaelli <tredaelli@redhat.com> - 19.11-2
- Add pretrans to handle /usr/share/dpdk/mk/exec-env/{bsd,linux}app (#1773889)
* Thu Nov 28 2019 David Marchand <david.marchand@redhat.com> - 19.11-1
- Rebase to 19.11 (#1773889)
- Remove dpdk-pdump (#1779229)
* Mon Nov 04 2019 Timothy Redaelli <tredaelli@redhat.com> - 18.11.2-4
- Pass the correct LDFLAGS to host apps (dpdk-pmdinfogen) too (#1755538)
* Mon Sep 16 2019 Jens Freimann <jfreimann@redhat.com> - 18.11.2-3
- Add fix for wrong pointer calculation to fix Covscan issue
- https://cov01.lab.eng.brq.redhat.com/covscanhub/task/135452/log/added.html
@ -538,7 +549,7 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
- New snapshot
- Add spec option for enabling vhost-user instead of vhost-cuse
- Build requires fuse-devel only with vhost-cuse
- Add virtual provide for vhost user/cuse tracking
- Add virtual provide for vhost user/cuse tracking
* Fri Mar 27 2015 Panu Matilainen <pmatilai@redhat.com> - 2.0.0-0.2038.git91a8743e.3
- Disable vhost-user for now to get vhost-cuse support, argh.
@ -695,7 +706,7 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi
- Remove ix86 from ExclusiveArch -- it does not build with above changes
* Thu Jul 10 2014 - Neil Horman <nhorman@tuxdriver.com> - 1.7.0-1.0
- Update source to official 1.7.0 release
- Update source to official 1.7.0 release
* Thu Jul 03 2014 - Neil Horman <nhorman@tuxdriver.com>
- Fixing up release numbering