From 2c0d7bdaea1e7b93512e686a5de8ff5eb3025970 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Tue, 5 Nov 2019 14:41:50 -0500 Subject: [PATCH] import dpdk-18.11.2-3.el8 --- .dpdk.metadata | 2 +- .gitignore | 2 +- ...mbus-fix-race-in-subchannel-creation.patch | 57 -- ...-virtio-add-packed-virtqueue-defines.patch | 102 +++ ...-allocate-vrings-on-device-NUMA-node.patch | 78 +++ SOURCES/0002-net-netvsc-enable-SR-IOV.patch | 38 -- ...-virtio-add-packed-virtqueue-helpers.patch | 141 ++++ ...disable-multi-queue-on-older-servers.patch | 46 -- ...-virtio-vring-init-for-packed-queues.patch | 175 +++++ ...et-virtio-dump-packed-virtqueue-data.patch | 41 ++ ...-implement-Tx-path-for-packed-queues.patch | 448 +++++++++++++ ...-implement-Rx-path-for-packed-queues.patch | 613 ++++++++++++++++++ ...support-packed-queue-in-send-command.patch | 142 ++++ ...user-add-option-to-use-packed-queues.patch | 139 ++++ ...-user-fail-if-cq-used-with-packed-vq.patch | 44 ++ ...-enable-packed-virtqueues-by-default.patch | 45 ++ ...tio-avoid-double-accounting-of-bytes.patch | 33 + ...io-user-fix-packed-vq-option-parsing.patch | 85 +++ ...tio-user-fix-supported-features-list.patch | 36 + ...-head-desc-with-correct-wrap-counter.patch | 98 +++ ...o-user-support-control-VQ-for-packed.patch | 277 ++++++++ SOURCES/0016-net-virtio-fix-control-VQ.patch | 197 ++++++ .../0017-net-virtio-user-fix-control-VQ.patch | 146 +++++ ...-descs-chains-write-back-with-packed.patch | 97 +++ ...fix-interrupt-helper-for-packed-ring.patch | 42 ++ ...-fix-calculation-of-device_event-ptr.patch | 30 + SPECS/dpdk.spec | 74 ++- 27 files changed, 3079 insertions(+), 149 deletions(-) delete mode 100644 SOURCES/0001-bus-vmbus-fix-race-in-subchannel-creation.patch create mode 100644 SOURCES/0001-net-virtio-add-packed-virtqueue-defines.patch create mode 100644 SOURCES/0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch delete mode 100644 SOURCES/0002-net-netvsc-enable-SR-IOV.patch create mode 100644 SOURCES/0002-net-virtio-add-packed-virtqueue-helpers.patch delete mode 100644 SOURCES/0003-net-netvsc-disable-multi-queue-on-older-servers.patch create mode 100644 SOURCES/0003-net-virtio-vring-init-for-packed-queues.patch create mode 100644 SOURCES/0004-net-virtio-dump-packed-virtqueue-data.patch create mode 100644 SOURCES/0005-net-virtio-implement-Tx-path-for-packed-queues.patch create mode 100644 SOURCES/0006-net-virtio-implement-Rx-path-for-packed-queues.patch create mode 100644 SOURCES/0007-net-virtio-support-packed-queue-in-send-command.patch create mode 100644 SOURCES/0008-net-virtio-user-add-option-to-use-packed-queues.patch create mode 100644 SOURCES/0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch create mode 100644 SOURCES/0010-net-virtio-enable-packed-virtqueues-by-default.patch create mode 100644 SOURCES/0011-net-virtio-avoid-double-accounting-of-bytes.patch create mode 100644 SOURCES/0012-net-virtio-user-fix-packed-vq-option-parsing.patch create mode 100644 SOURCES/0013-net-virtio-user-fix-supported-features-list.patch create mode 100644 SOURCES/0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch create mode 100644 SOURCES/0015-net-virtio-user-support-control-VQ-for-packed.patch create mode 100644 SOURCES/0016-net-virtio-fix-control-VQ.patch create mode 100644 SOURCES/0017-net-virtio-user-fix-control-VQ.patch create mode 100644 SOURCES/0018-vhost-batch-used-descs-chains-write-back-with-packed.patch create mode 100644 SOURCES/0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch create mode 100644 SOURCES/0020-net-virtio-fix-calculation-of-device_event-ptr.patch diff --git a/.dpdk.metadata b/.dpdk.metadata index 2f634f6..8677a15 100644 --- a/.dpdk.metadata +++ b/.dpdk.metadata @@ -1 +1 @@ -9f538fb3449205bccca93073d79176636134705b SOURCES/dpdk-18.11.tar.xz +6e04c3e3a82f91ebe0360b8067df59e2b774924d SOURCES/dpdk-18.11.2.tar.xz diff --git a/.gitignore b/.gitignore index be3a994..d90f2aa 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/dpdk-18.11.tar.xz +SOURCES/dpdk-18.11.2.tar.xz diff --git a/SOURCES/0001-bus-vmbus-fix-race-in-subchannel-creation.patch b/SOURCES/0001-bus-vmbus-fix-race-in-subchannel-creation.patch deleted file mode 100644 index b8517cd..0000000 --- a/SOURCES/0001-bus-vmbus-fix-race-in-subchannel-creation.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 25363eb99bc43443bec354abea1e40db61280b30 Mon Sep 17 00:00:00 2001 -From: Stephen Hemminger -Date: Wed, 5 Dec 2018 14:11:56 -0800 -Subject: [PATCH 1/3] bus/vmbus: fix race in subchannel creation - -[ upstream commit 4970103e89f4f828669acf3b465e984fdc891e1e ] - -When using multiple queues, there was a race with the kernel -in setting up the second channel. This regression is due to a kernel change -which does not allow accessing sysfs files for Hyper-V channels that are not opened. - -The fix is simple, just move the logic to detect not ready -sub channels earlier in the existing loop. - -Fixes: 831dba47bd36 ("bus/vmbus: add Hyper-V virtual bus support") - -Reported-by: Mohammed Gamal -Signed-off-by: Stephen Hemminger -(cherry picked from commit ca17e6624251b05cf188997cffc3e1ab2e50561a) -Signed-off-by: Maxime Coquelin ---- - drivers/bus/vmbus/linux/vmbus_uio.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/drivers/bus/vmbus/linux/vmbus_uio.c b/drivers/bus/vmbus/linux/vmbus_uio.c -index 12e97e3a4..38df4d724 100644 ---- a/drivers/bus/vmbus/linux/vmbus_uio.c -+++ b/drivers/bus/vmbus/linux/vmbus_uio.c -@@ -357,6 +357,12 @@ int vmbus_uio_get_subchan(struct vmbus_channel *primary, - continue; - } - -+ if (!vmbus_isnew_subchannel(primary, relid)) -+ continue; /* Already know about you */ -+ -+ if (!vmbus_uio_ring_present(dev, relid)) -+ continue; /* Ring may not be ready yet */ -+ - snprintf(subchan_path, sizeof(subchan_path), "%s/%lu", - chan_path, relid); - err = vmbus_uio_sysfs_read(subchan_path, "subchannel_id", -@@ -370,12 +376,6 @@ int vmbus_uio_get_subchan(struct vmbus_channel *primary, - if (subid == 0) - continue; /* skip primary channel */ - -- if (!vmbus_isnew_subchannel(primary, relid)) -- continue; -- -- if (!vmbus_uio_ring_present(dev, relid)) -- continue; /* Ring may not be ready yet */ -- - err = vmbus_uio_sysfs_read(subchan_path, "monitor_id", - &monid, UINT8_MAX); - if (err) { --- -2.20.1 - diff --git a/SOURCES/0001-net-virtio-add-packed-virtqueue-defines.patch b/SOURCES/0001-net-virtio-add-packed-virtqueue-defines.patch new file mode 100644 index 0000000..e1d943b --- /dev/null +++ b/SOURCES/0001-net-virtio-add-packed-virtqueue-defines.patch @@ -0,0 +1,102 @@ +From 93f21370ca38ae61dc2d938adf569f6668381c32 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:30 +0100 +Subject: [PATCH 01/18] net/virtio: add packed virtqueue defines + +[ upstream commit 4c3f5822eb21476fbbd807a7c40584c1090695e5 ] + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit 4c3f5822eb21476fbbd807a7c40584c1090695e5) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_pci.h | 1 + + drivers/net/virtio/virtio_ring.h | 30 ++++++++++++++++++++++++++++++ + drivers/net/virtio/virtqueue.h | 6 ++++++ + 3 files changed, 37 insertions(+) + +diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h +index e961a58ca..4c975a531 100644 +--- a/drivers/net/virtio/virtio_pci.h ++++ b/drivers/net/virtio/virtio_pci.h +@@ -113,6 +113,7 @@ struct virtnet_ctl; + + #define VIRTIO_F_VERSION_1 32 + #define VIRTIO_F_IOMMU_PLATFORM 33 ++#define VIRTIO_F_RING_PACKED 34 + + /* + * Some VirtIO feature bits (currently bits 28 through 31) are +diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h +index 9e3c2a015..464449074 100644 +--- a/drivers/net/virtio/virtio_ring.h ++++ b/drivers/net/virtio/virtio_ring.h +@@ -15,6 +15,10 @@ + #define VRING_DESC_F_WRITE 2 + /* This means the buffer contains a list of buffer descriptors. */ + #define VRING_DESC_F_INDIRECT 4 ++/* This flag means the descriptor was made available by the driver */ ++#define VRING_DESC_F_AVAIL(b) ((uint16_t)(b) << 7) ++/* This flag means the descriptor was used by the device */ ++#define VRING_DESC_F_USED(b) ((uint16_t)(b) << 15) + + /* The Host uses this in used->flags to advise the Guest: don't kick me + * when you add a buffer. It's unreliable, so it's simply an +@@ -54,6 +58,32 @@ struct vring_used { + struct vring_used_elem ring[0]; + }; + ++/* For support of packed virtqueues in Virtio 1.1 the format of descriptors ++ * looks like this. ++ */ ++struct vring_packed_desc { ++ uint64_t addr; ++ uint32_t len; ++ uint16_t id; ++ uint16_t flags; ++}; ++ ++#define RING_EVENT_FLAGS_ENABLE 0x0 ++#define RING_EVENT_FLAGS_DISABLE 0x1 ++#define RING_EVENT_FLAGS_DESC 0x2 ++struct vring_packed_desc_event { ++ uint16_t desc_event_off_wrap; ++ uint16_t desc_event_flags; ++}; ++ ++struct vring_packed { ++ unsigned int num; ++ struct vring_packed_desc *desc_packed; ++ struct vring_packed_desc_event *driver_event; ++ struct vring_packed_desc_event *device_event; ++ ++}; ++ + struct vring { + unsigned int num; + struct vring_desc *desc; +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index 2e2abf15b..1525c7d10 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -161,11 +161,17 @@ struct virtio_pmd_ctrl { + struct vq_desc_extra { + void *cookie; + uint16_t ndescs; ++ uint16_t next; + }; + + struct virtqueue { + struct virtio_hw *hw; /**< virtio_hw structure pointer. */ + struct vring vq_ring; /**< vring keeping desc, used and avail */ ++ struct vring_packed ring_packed; /**< vring keeping descs */ ++ bool avail_wrap_counter; ++ bool used_wrap_counter; ++ uint16_t event_flags_shadow; ++ uint16_t avail_used_flags; + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. +-- +2.21.0 + diff --git a/SOURCES/0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch b/SOURCES/0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch new file mode 100644 index 0000000..fb88a05 --- /dev/null +++ b/SOURCES/0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch @@ -0,0 +1,78 @@ +From 8093f82b3e52efe012e46c429b7af4e82492f71c Mon Sep 17 00:00:00 2001 +From: Maxime Coquelin +Date: Tue, 27 Nov 2018 11:54:27 +0100 +Subject: [PATCH] net/virtio: allocate vrings on device NUMA node + +[ upstream commit 4a5140ab17d29e77eefa47b5cb514238e8e0c132 ] + +When a guest is spanned on multiple NUMA nodes and +multiple Virtio devices are spanned onto these nodes, +we expect that their ring memory is allocated in the +right memory node. + +Otherwise, vCPUs from node A may be polling Virtio rings +allocated on node B, which would increase QPI bandwidth +and impact performance. + +Signed-off-by: Maxime Coquelin +Reviewed-by: David Marchand +Signed-off-by: Maxime Coquelin +--- + drivers/net/virtio/virtio_ethdev.c | 12 +++++++----- + 1 file changed, 7 insertions(+), 5 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 2ba66d291..cb2b2e0bf 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -335,8 +335,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + void *sw_ring = NULL; + int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx); + int ret; ++ int numa_node = dev->device->numa_node; + +- PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); ++ PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d", ++ vtpci_queue_idx, numa_node); + + /* + * Read the virtqueue size from the Queue Size field +@@ -372,7 +374,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + } + + vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, +- SOCKET_ID_ANY); ++ numa_node); + if (vq == NULL) { + PMD_INIT_LOG(ERR, "can not allocate vq"); + return -ENOMEM; +@@ -392,7 +394,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, +- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ++ numa_node, RTE_MEMZONE_IOVA_CONTIG, + VIRTIO_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) +@@ -418,7 +420,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr", + dev->data->port_id, vtpci_queue_idx); + hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, +- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG, ++ numa_node, RTE_MEMZONE_IOVA_CONTIG, + RTE_CACHE_LINE_SIZE); + if (hdr_mz == NULL) { + if (rte_errno == EEXIST) +@@ -435,7 +437,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + sizeof(vq->sw_ring[0]); + + sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, +- RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); ++ RTE_CACHE_LINE_SIZE, numa_node); + if (!sw_ring) { + PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); + ret = -ENOMEM; +-- +2.20.1 + diff --git a/SOURCES/0002-net-netvsc-enable-SR-IOV.patch b/SOURCES/0002-net-netvsc-enable-SR-IOV.patch deleted file mode 100644 index 393733c..0000000 --- a/SOURCES/0002-net-netvsc-enable-SR-IOV.patch +++ /dev/null @@ -1,38 +0,0 @@ -From da9c7a3059fb4cffef8d1101a247fafabd9be7bd Mon Sep 17 00:00:00 2001 -From: Stephen Hemminger -Date: Wed, 5 Dec 2018 14:11:57 -0800 -Subject: [PATCH 2/3] net/netvsc: enable SR-IOV - -[ upstream commit 825ab257b5ce8235ab0cdc260e5b7b757e102875 ] - -Make DPDK enable SRIOV flag in same way as Linux and FreeBSD. - -Fixes: dc7680e8597c ("net/netvsc: support integrated VF") - -Signed-off-by: Stephen Hemminger -(cherry picked from commit 21dc946c2b5524c7e6ec1fe4079864f3322dd483) -Signed-off-by: Maxime Coquelin ---- - drivers/net/netvsc/hn_nvs.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/net/netvsc/hn_nvs.c b/drivers/net/netvsc/hn_nvs.c -index 9690c5f8a..d58770e04 100644 ---- a/drivers/net/netvsc/hn_nvs.c -+++ b/drivers/net/netvsc/hn_nvs.c -@@ -326,9 +326,9 @@ hn_nvs_conf_ndis(struct hn_data *hv, unsigned int mtu) - conf.mtu = mtu + ETHER_HDR_LEN; - conf.caps = NVS_NDIS_CONF_VLAN; - -- /* TODO enable SRIOV */ -- //if (hv->nvs_ver >= NVS_VERSION_5) -- // conf.caps |= NVS_NDIS_CONF_SRIOV; -+ /* enable SRIOV */ -+ if (hv->nvs_ver >= NVS_VERSION_5) -+ conf.caps |= NVS_NDIS_CONF_SRIOV; - - /* NOTE: No response. */ - error = hn_nvs_req_send(hv, &conf, sizeof(conf)); --- -2.20.1 - diff --git a/SOURCES/0002-net-virtio-add-packed-virtqueue-helpers.patch b/SOURCES/0002-net-virtio-add-packed-virtqueue-helpers.patch new file mode 100644 index 0000000..260a415 --- /dev/null +++ b/SOURCES/0002-net-virtio-add-packed-virtqueue-helpers.patch @@ -0,0 +1,141 @@ +From 652a2e3a1ba0db81ae1814e8c3cb989e9e89c4e0 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:31 +0100 +Subject: [PATCH 02/18] net/virtio: add packed virtqueue helpers + +[ upstream commit e9f4feb7e6225f671b59375aff44b9d576121577 ] + +Add helper functions to set/clear and check descriptor flags. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit e9f4feb7e6225f671b59375aff44b9d576121577) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_pci.h | 6 +++ + drivers/net/virtio/virtqueue.h | 72 ++++++++++++++++++++++++++++++++- + 2 files changed, 76 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h +index 4c975a531..b22b62dad 100644 +--- a/drivers/net/virtio/virtio_pci.h ++++ b/drivers/net/virtio/virtio_pci.h +@@ -315,6 +315,12 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit) + return (hw->guest_features & (1ULL << bit)) != 0; + } + ++static inline int ++vtpci_packed_queue(struct virtio_hw *hw) ++{ ++ return vtpci_with_feature(hw, VIRTIO_F_RING_PACKED); ++} ++ + /* + * Function declaration from virtio_pci.c + */ +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index 1525c7d10..c32812427 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -251,6 +251,31 @@ struct virtio_tx_region { + __attribute__((__aligned__(16))); + }; + ++static inline int ++desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) ++{ ++ uint16_t used, avail, flags; ++ ++ flags = desc->flags; ++ used = !!(flags & VRING_DESC_F_USED(1)); ++ avail = !!(flags & VRING_DESC_F_AVAIL(1)); ++ ++ return avail == used && used == vq->used_wrap_counter; ++} ++ ++ ++static inline void ++vring_desc_init_packed(struct virtqueue *vq, int n) ++{ ++ int i; ++ for (i = 0; i < n - 1; i++) { ++ vq->ring_packed.desc_packed[i].id = i; ++ vq->vq_descx[i].next = i + 1; ++ } ++ vq->ring_packed.desc_packed[i].id = i; ++ vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; ++} ++ + /* Chain all the descriptors in the ring with an END */ + static inline void + vring_desc_init(struct vring_desc *dp, uint16_t n) +@@ -262,13 +287,53 @@ vring_desc_init(struct vring_desc *dp, uint16_t n) + dp[i].next = VQ_RING_DESC_CHAIN_END; + } + ++/** ++ * Tell the backend not to interrupt us. ++ */ ++static inline void ++virtqueue_disable_intr_packed(struct virtqueue *vq) ++{ ++ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; ++ ++ *event_flags = RING_EVENT_FLAGS_DISABLE; ++} ++ ++ + /** + * Tell the backend not to interrupt us. + */ + static inline void + virtqueue_disable_intr(struct virtqueue *vq) + { +- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; ++ if (vtpci_packed_queue(vq->hw)) ++ virtqueue_disable_intr_packed(vq); ++ else ++ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; ++} ++ ++/** ++ * Tell the backend to interrupt. Implementation for packed virtqueues. ++ */ ++static inline void ++virtqueue_enable_intr_packed(struct virtqueue *vq) ++{ ++ uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; ++ ++ ++ if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { ++ virtio_wmb(); ++ vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE; ++ *event_flags = vq->event_flags_shadow; ++ } ++} ++ ++/** ++ * Tell the backend to interrupt. Implementation for split virtqueues. ++ */ ++static inline void ++virtqueue_enable_intr_split(struct virtqueue *vq) ++{ ++ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); + } + + /** +@@ -277,7 +342,10 @@ virtqueue_disable_intr(struct virtqueue *vq) + static inline void + virtqueue_enable_intr(struct virtqueue *vq) + { +- vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); ++ if (vtpci_packed_queue(vq->hw)) ++ virtqueue_enable_intr_packed(vq); ++ else ++ virtqueue_enable_intr_split(vq); + } + + /** +-- +2.21.0 + diff --git a/SOURCES/0003-net-netvsc-disable-multi-queue-on-older-servers.patch b/SOURCES/0003-net-netvsc-disable-multi-queue-on-older-servers.patch deleted file mode 100644 index 6d055d4..0000000 --- a/SOURCES/0003-net-netvsc-disable-multi-queue-on-older-servers.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 0598625d2e17374b7d5693972f5acb59fef25f63 Mon Sep 17 00:00:00 2001 -From: Stephen Hemminger -Date: Wed, 5 Dec 2018 14:11:58 -0800 -Subject: [PATCH 3/3] net/netvsc: disable multi-queue on older servers - -[ upstream commit afbc22bf51ab98b9b61b11eb6d38278a9d577111 ] - -NDIS multi-queue support is only in WS2012 or later. Check the NDIS -version to limit to single queue on older versions. Similar code -exists in Linux driver. - -Fixes: 4e9c73e96e83 ("net/netvsc: add Hyper-V network device") - -Signed-off-by: Stephen Hemminger -(cherry picked from commit d387b7ae45a520970ff55ea6ce75b48d5e69c4d9) -Signed-off-by: Maxime Coquelin ---- - drivers/net/netvsc/hn_ethdev.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c -index b330bf3d7..1256fa399 100644 ---- a/drivers/net/netvsc/hn_ethdev.c -+++ b/drivers/net/netvsc/hn_ethdev.c -@@ -732,6 +732,7 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) - hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP]; - hv->port_id = eth_dev->data->port_id; - hv->latency = HN_CHAN_LATENCY_NS; -+ hv->max_queues = 1; - - err = hn_parse_args(eth_dev); - if (err) -@@ -770,6 +771,10 @@ eth_hn_dev_init(struct rte_eth_dev *eth_dev) - if (err) - goto failed; - -+ /* Multi queue requires later versions of windows server */ -+ if (hv->nvs_ver < NVS_VERSION_5) -+ return 0; -+ - max_chan = rte_vmbus_max_channels(vmbus); - PMD_INIT_LOG(DEBUG, "VMBus max channels %d", max_chan); - if (max_chan <= 0) --- -2.20.1 - diff --git a/SOURCES/0003-net-virtio-vring-init-for-packed-queues.patch b/SOURCES/0003-net-virtio-vring-init-for-packed-queues.patch new file mode 100644 index 0000000..b8a05a5 --- /dev/null +++ b/SOURCES/0003-net-virtio-vring-init-for-packed-queues.patch @@ -0,0 +1,175 @@ +From 4e832cad1879f87a694e2f78b8718f986f7c76e2 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:32 +0100 +Subject: [PATCH 03/18] net/virtio: vring init for packed queues + +[ upstream commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f ] + +Add and initialize descriptor data structures. + +Signed-off-by: Jens Freimann +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit f803734b0f2e6c556d9bf7fe8f11638429e3a00f) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 32 ++++++++++++++++++++---------- + drivers/net/virtio/virtio_ring.h | 28 ++++++++++++++++++++++---- + drivers/net/virtio/virtqueue.h | 2 +- + 3 files changed, 46 insertions(+), 16 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 2ba66d291..ee52e3cdb 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -299,20 +299,22 @@ virtio_init_vring(struct virtqueue *vq) + + PMD_INIT_FUNC_TRACE(); + +- /* +- * Reinitialise since virtio port might have been stopped and restarted +- */ + memset(ring_mem, 0, vq->vq_ring_size); +- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); ++ + vq->vq_used_cons_idx = 0; + vq->vq_desc_head_idx = 0; + vq->vq_avail_idx = 0; + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries); +- +- vring_desc_init(vr->desc, size); +- ++ if (vtpci_packed_queue(vq->hw)) { ++ vring_init_packed(&vq->ring_packed, ring_mem, ++ VIRTIO_PCI_VRING_ALIGN, size); ++ vring_desc_init_packed(vq, size); ++ } else { ++ vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size); ++ vring_desc_init_split(vr->desc, size); ++ } + /* + * Disable device(host) interrupting guest + */ +@@ -382,11 +384,16 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + vq->hw = hw; + vq->vq_queue_index = vtpci_queue_idx; + vq->vq_nentries = vq_size; ++ vq->event_flags_shadow = 0; ++ if (vtpci_packed_queue(hw)) { ++ vq->avail_wrap_counter = 1; ++ vq->used_wrap_counter = 1; ++ } + + /* + * Reserve a memzone for vring elements + */ +- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); ++ size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); + PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", + size, vq->vq_ring_size); +@@ -489,7 +496,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + for (i = 0; i < vq_size; i++) { + struct vring_desc *start_dp = txr[i].tx_indir; + +- vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); ++ vring_desc_init_split(start_dp, ++ RTE_DIM(txr[i].tx_indir)); + + /* first indirect descriptor is always the tx header */ + start_dp->addr = txvq->virtio_net_hdr_mem +@@ -1486,7 +1494,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) + + /* Setting up rx_header size for the device */ + if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || +- vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) ++ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || ++ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); + else + hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); +@@ -1906,7 +1915,8 @@ virtio_dev_configure(struct rte_eth_dev *dev) + + if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) { + hw->use_inorder_tx = 1; +- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { ++ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) && ++ !vtpci_packed_queue(hw)) { + hw->use_inorder_rx = 1; + hw->use_simple_rx = 0; + } else { +diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h +index 464449074..1760823c6 100644 +--- a/drivers/net/virtio/virtio_ring.h ++++ b/drivers/net/virtio/virtio_ring.h +@@ -125,10 +125,18 @@ struct vring { + #define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) + + static inline size_t +-vring_size(unsigned int num, unsigned long align) ++vring_size(struct virtio_hw *hw, unsigned int num, unsigned long align) + { + size_t size; + ++ if (vtpci_packed_queue(hw)) { ++ size = num * sizeof(struct vring_packed_desc); ++ size += sizeof(struct vring_packed_desc_event); ++ size = RTE_ALIGN_CEIL(size, align); ++ size += sizeof(struct vring_packed_desc_event); ++ return size; ++ } ++ + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); +@@ -136,10 +144,9 @@ vring_size(unsigned int num, unsigned long align) + (num * sizeof(struct vring_used_elem)); + return size; + } +- + static inline void +-vring_init(struct vring *vr, unsigned int num, uint8_t *p, +- unsigned long align) ++vring_init_split(struct vring *vr, uint8_t *p, unsigned long align, ++ unsigned int num) + { + vr->num = num; + vr->desc = (struct vring_desc *) p; +@@ -149,6 +156,19 @@ vring_init(struct vring *vr, unsigned int num, uint8_t *p, + RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align); + } + ++static inline void ++vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align, ++ unsigned int num) ++{ ++ vr->num = num; ++ vr->desc_packed = (struct vring_packed_desc *)p; ++ vr->driver_event = (struct vring_packed_desc_event *)(p + ++ vr->num * sizeof(struct vring_packed_desc)); ++ vr->device_event = (struct vring_packed_desc_event *) ++ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event + ++ sizeof(struct vring_packed_desc_event)), align); ++} ++ + /* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * Assuming a given event_idx value from the other size, if we have +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index c32812427..d08ef9112 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -278,7 +278,7 @@ vring_desc_init_packed(struct virtqueue *vq, int n) + + /* Chain all the descriptors in the ring with an END */ + static inline void +-vring_desc_init(struct vring_desc *dp, uint16_t n) ++vring_desc_init_split(struct vring_desc *dp, uint16_t n) + { + uint16_t i; + +-- +2.21.0 + diff --git a/SOURCES/0004-net-virtio-dump-packed-virtqueue-data.patch b/SOURCES/0004-net-virtio-dump-packed-virtqueue-data.patch new file mode 100644 index 0000000..9ac14ab --- /dev/null +++ b/SOURCES/0004-net-virtio-dump-packed-virtqueue-data.patch @@ -0,0 +1,41 @@ +From 2dc70f1db67091cc3a9131d2093da464738b31d8 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:33 +0100 +Subject: [PATCH 04/18] net/virtio: dump packed virtqueue data + +[ upstream commit 56785a2d6fad987c025278909307db776df59bd9 ] + +Add support to dump packed virtqueue data to the +VIRTQUEUE_DUMP() macro. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit 56785a2d6fad987c025278909307db776df59bd9) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtqueue.h | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index d08ef9112..e9c35a553 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -434,6 +434,15 @@ virtqueue_notify(struct virtqueue *vq) + uint16_t used_idx, nused; \ + used_idx = (vq)->vq_ring.used->idx; \ + nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ ++ if (vtpci_packed_queue((vq)->hw)) { \ ++ PMD_INIT_LOG(DEBUG, \ ++ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \ ++ "VQ: - avail_wrap_counter=%d; used_wrap_counter=%d", \ ++ (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \ ++ (vq)->vq_avail_idx, (vq)->avail_wrap_counter, \ ++ (vq)->used_wrap_counter); \ ++ break; \ ++ } \ + PMD_INIT_LOG(DEBUG, \ + "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ +-- +2.21.0 + diff --git a/SOURCES/0005-net-virtio-implement-Tx-path-for-packed-queues.patch b/SOURCES/0005-net-virtio-implement-Tx-path-for-packed-queues.patch new file mode 100644 index 0000000..9d4149f --- /dev/null +++ b/SOURCES/0005-net-virtio-implement-Tx-path-for-packed-queues.patch @@ -0,0 +1,448 @@ +From 97ee69c836bfb08e674fd0f28d1fc7a14f2d4de0 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:34 +0100 +Subject: [PATCH 05/18] net/virtio: implement Tx path for packed queues + +[ upstream commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e ] + +This implements the transmit path for devices with +support for packed virtqueues. + +Signed-off-by: Jens Freimann +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit 892dc798fa9c24e6172b8bcecc9586f2f9a7a49e) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 56 ++++--- + drivers/net/virtio/virtio_ethdev.h | 2 + + drivers/net/virtio/virtio_rxtx.c | 236 ++++++++++++++++++++++++++++- + drivers/net/virtio/virtqueue.h | 20 ++- + 4 files changed, 292 insertions(+), 22 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index ee52e3cdb..6023d6f2c 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -388,6 +388,9 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + if (vtpci_packed_queue(hw)) { + vq->avail_wrap_counter = 1; + vq->used_wrap_counter = 1; ++ vq->avail_used_flags = ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); + } + + /* +@@ -495,17 +498,26 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) + memset(txr, 0, vq_size * sizeof(*txr)); + for (i = 0; i < vq_size; i++) { + struct vring_desc *start_dp = txr[i].tx_indir; +- +- vring_desc_init_split(start_dp, +- RTE_DIM(txr[i].tx_indir)); ++ struct vring_packed_desc *start_dp_packed = ++ txr[i].tx_indir_pq; + + /* first indirect descriptor is always the tx header */ +- start_dp->addr = txvq->virtio_net_hdr_mem +- + i * sizeof(*txr) +- + offsetof(struct virtio_tx_region, tx_hdr); +- +- start_dp->len = hw->vtnet_hdr_size; +- start_dp->flags = VRING_DESC_F_NEXT; ++ if (vtpci_packed_queue(hw)) { ++ start_dp_packed->addr = txvq->virtio_net_hdr_mem ++ + i * sizeof(*txr) ++ + offsetof(struct virtio_tx_region, ++ tx_hdr); ++ start_dp_packed->len = hw->vtnet_hdr_size; ++ } else { ++ vring_desc_init_split(start_dp, ++ RTE_DIM(txr[i].tx_indir)); ++ start_dp->addr = txvq->virtio_net_hdr_mem ++ + i * sizeof(*txr) ++ + offsetof(struct virtio_tx_region, ++ tx_hdr); ++ start_dp->len = hw->vtnet_hdr_size; ++ start_dp->flags = VRING_DESC_F_NEXT; ++ } + } + } + +@@ -1334,6 +1346,23 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) + { + struct virtio_hw *hw = eth_dev->data->dev_private; + ++ if (vtpci_packed_queue(hw)) { ++ PMD_INIT_LOG(INFO, ++ "virtio: using packed ring standard Tx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed; ++ } else { ++ if (hw->use_inorder_tx) { ++ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; ++ } else { ++ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->tx_pkt_burst = virtio_xmit_pkts; ++ } ++ } ++ + if (hw->use_simple_rx) { + PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", + eth_dev->data->port_id); +@@ -1354,15 +1383,6 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) + eth_dev->rx_pkt_burst = &virtio_recv_pkts; + } + +- if (hw->use_inorder_tx) { +- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u", +- eth_dev->data->port_id); +- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder; +- } else { +- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u", +- eth_dev->data->port_id); +- eth_dev->tx_pkt_burst = virtio_xmit_pkts; +- } + } + + /* Only support 1:1 queue/interrupt mapping so far. +diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h +index e0f80e5a4..05d355180 100644 +--- a/drivers/net/virtio/virtio_ethdev.h ++++ b/drivers/net/virtio/virtio_ethdev.h +@@ -82,6 +82,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, + + uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); ++uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts); + + uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c +index eb891433e..ab74917a8 100644 +--- a/drivers/net/virtio/virtio_rxtx.c ++++ b/drivers/net/virtio/virtio_rxtx.c +@@ -88,6 +88,23 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) + dp->next = VQ_RING_DESC_CHAIN_END; + } + ++static void ++vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) ++{ ++ struct vq_desc_extra *dxp; ++ ++ dxp = &vq->vq_descx[id]; ++ vq->vq_free_cnt += dxp->ndescs; ++ ++ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) ++ vq->vq_desc_head_idx = id; ++ else ++ vq->vq_descx[vq->vq_desc_tail_idx].next = id; ++ ++ vq->vq_desc_tail_idx = id; ++ dxp->next = VQ_RING_DESC_CHAIN_END; ++} ++ + static uint16_t + virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, + uint32_t *len, uint16_t num) +@@ -165,6 +182,33 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq, + #endif + + /* Cleanup from completed transmits. */ ++static void ++virtio_xmit_cleanup_packed(struct virtqueue *vq, int num) ++{ ++ uint16_t used_idx, id; ++ uint16_t size = vq->vq_nentries; ++ struct vring_packed_desc *desc = vq->ring_packed.desc_packed; ++ struct vq_desc_extra *dxp; ++ ++ used_idx = vq->vq_used_cons_idx; ++ while (num-- && desc_is_used(&desc[used_idx], vq)) { ++ used_idx = vq->vq_used_cons_idx; ++ id = desc[used_idx].id; ++ dxp = &vq->vq_descx[id]; ++ vq->vq_used_cons_idx += dxp->ndescs; ++ if (vq->vq_used_cons_idx >= size) { ++ vq->vq_used_cons_idx -= size; ++ vq->used_wrap_counter ^= 1; ++ } ++ vq_ring_free_id_packed(vq, id); ++ if (dxp->cookie != NULL) { ++ rte_pktmbuf_free(dxp->cookie); ++ dxp->cookie = NULL; ++ } ++ used_idx = vq->vq_used_cons_idx; ++ } ++} ++ + static void + virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) + { +@@ -456,6 +500,107 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, + vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1); + } + ++static inline void ++virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, ++ uint16_t needed, int can_push) ++{ ++ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; ++ struct vq_desc_extra *dxp; ++ struct virtqueue *vq = txvq->vq; ++ struct vring_packed_desc *start_dp, *head_dp; ++ uint16_t idx, id, head_idx, head_flags; ++ uint16_t head_size = vq->hw->vtnet_hdr_size; ++ struct virtio_net_hdr *hdr; ++ uint16_t prev; ++ ++ id = vq->vq_desc_head_idx; ++ ++ dxp = &vq->vq_descx[id]; ++ dxp->ndescs = needed; ++ dxp->cookie = cookie; ++ ++ head_idx = vq->vq_avail_idx; ++ idx = head_idx; ++ prev = head_idx; ++ start_dp = vq->ring_packed.desc_packed; ++ ++ head_dp = &vq->ring_packed.desc_packed[idx]; ++ head_flags = cookie->next ? VRING_DESC_F_NEXT : 0; ++ head_flags |= vq->avail_used_flags; ++ ++ if (can_push) { ++ /* prepend cannot fail, checked by caller */ ++ hdr = (struct virtio_net_hdr *) ++ rte_pktmbuf_prepend(cookie, head_size); ++ /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, ++ * which is wrong. Below subtract restores correct pkt size. ++ */ ++ cookie->pkt_len -= head_size; ++ ++ /* if offload disabled, it is not zeroed below, do it now */ ++ if (!vq->hw->has_tx_offload) { ++ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); ++ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); ++ ASSIGN_UNLESS_EQUAL(hdr->flags, 0); ++ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); ++ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); ++ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); ++ } ++ } else { ++ /* setup first tx ring slot to point to header ++ * stored in reserved region. ++ */ ++ start_dp[idx].addr = txvq->virtio_net_hdr_mem + ++ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr); ++ start_dp[idx].len = vq->hw->vtnet_hdr_size; ++ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; ++ idx++; ++ if (idx >= vq->vq_nentries) { ++ idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ vq->avail_used_flags = ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); ++ } ++ } ++ ++ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); ++ ++ do { ++ uint16_t flags; ++ ++ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); ++ start_dp[idx].len = cookie->data_len; ++ if (likely(idx != head_idx)) { ++ flags = cookie->next ? VRING_DESC_F_NEXT : 0; ++ flags |= vq->avail_used_flags; ++ start_dp[idx].flags = flags; ++ } ++ prev = idx; ++ idx++; ++ if (idx >= vq->vq_nentries) { ++ idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ vq->avail_used_flags = ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); ++ } ++ } while ((cookie = cookie->next) != NULL); ++ ++ start_dp[prev].id = id; ++ ++ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); ++ ++ vq->vq_desc_head_idx = dxp->next; ++ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) ++ vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END; ++ ++ vq->vq_avail_idx = idx; ++ ++ rte_smp_wmb(); ++ head_dp->flags = head_flags; ++} ++ + static inline void + virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, + uint16_t needed, int use_indirect, int can_push, +@@ -733,8 +878,10 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + + PMD_INIT_FUNC_TRACE(); + +- if (hw->use_inorder_tx) +- vq->vq_ring.desc[vq->vq_nentries - 1].next = 0; ++ if (!vtpci_packed_queue(hw)) { ++ if (hw->use_inorder_tx) ++ vq->vq_ring.desc[vq->vq_nentries - 1].next = 0; ++ } + + VIRTQUEUE_DUMP(vq); + +@@ -1346,6 +1493,91 @@ virtio_recv_mergeable_pkts(void *rx_queue, + return nb_rx; + } + ++uint16_t ++virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, ++ uint16_t nb_pkts) ++{ ++ struct virtnet_tx *txvq = tx_queue; ++ struct virtqueue *vq = txvq->vq; ++ struct virtio_hw *hw = vq->hw; ++ uint16_t hdr_size = hw->vtnet_hdr_size; ++ uint16_t nb_tx = 0; ++ int error; ++ ++ if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) ++ return nb_tx; ++ ++ if (unlikely(nb_pkts < 1)) ++ return nb_pkts; ++ ++ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); ++ ++ if (nb_pkts > vq->vq_free_cnt) ++ virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt); ++ ++ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { ++ struct rte_mbuf *txm = tx_pkts[nb_tx]; ++ int can_push = 0, slots, need; ++ ++ /* Do VLAN tag insertion */ ++ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { ++ error = rte_vlan_insert(&txm); ++ if (unlikely(error)) { ++ rte_pktmbuf_free(txm); ++ continue; ++ } ++ } ++ ++ /* optimize ring usage */ ++ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || ++ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && ++ rte_mbuf_refcnt_read(txm) == 1 && ++ RTE_MBUF_DIRECT(txm) && ++ txm->nb_segs == 1 && ++ rte_pktmbuf_headroom(txm) >= hdr_size && ++ rte_is_aligned(rte_pktmbuf_mtod(txm, char *), ++ __alignof__(struct virtio_net_hdr_mrg_rxbuf))) ++ can_push = 1; ++ ++ /* How many main ring entries are needed to this Tx? ++ * any_layout => number of segments ++ * default => number of segments + 1 ++ */ ++ slots = txm->nb_segs + !can_push; ++ need = slots - vq->vq_free_cnt; ++ ++ /* Positive value indicates it need free vring descriptors */ ++ if (unlikely(need > 0)) { ++ virtio_rmb(); ++ need = RTE_MIN(need, (int)nb_pkts); ++ virtio_xmit_cleanup_packed(vq, need); ++ need = slots - vq->vq_free_cnt; ++ if (unlikely(need > 0)) { ++ PMD_TX_LOG(ERR, ++ "No free tx descriptors to transmit"); ++ break; ++ } ++ } ++ ++ /* Enqueue Packet buffers */ ++ virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push); ++ ++ txvq->stats.bytes += txm->pkt_len; ++ virtio_update_packet_stats(&txvq->stats, txm); ++ } ++ ++ txvq->stats.packets += nb_tx; ++ ++ if (likely(nb_tx)) { ++ if (unlikely(virtqueue_kick_prepare_packed(vq))) { ++ virtqueue_notify(vq); ++ PMD_TX_LOG(DEBUG, "Notified backend after xmit"); ++ } ++ } ++ ++ return nb_tx; ++} ++ + uint16_t + virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + { +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index e9c35a553..b142fd488 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -247,8 +247,12 @@ struct virtio_net_hdr_mrg_rxbuf { + #define VIRTIO_MAX_TX_INDIRECT 8 + struct virtio_tx_region { + struct virtio_net_hdr_mrg_rxbuf tx_hdr; +- struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] +- __attribute__((__aligned__(16))); ++ union { ++ struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] ++ __attribute__((__aligned__(16))); ++ struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT] ++ __attribute__((__aligned__(16))); ++ }; + }; + + static inline int +@@ -380,6 +384,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) + #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) + + void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx); ++void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx); + void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, + uint16_t num); + +@@ -418,6 +423,17 @@ virtqueue_kick_prepare(struct virtqueue *vq) + return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); + } + ++static inline int ++virtqueue_kick_prepare_packed(struct virtqueue *vq) ++{ ++ uint16_t flags; ++ ++ virtio_mb(); ++ flags = vq->ring_packed.device_event->desc_event_flags; ++ ++ return flags != RING_EVENT_FLAGS_DISABLE; ++} ++ + static inline void + virtqueue_notify(struct virtqueue *vq) + { +-- +2.21.0 + diff --git a/SOURCES/0006-net-virtio-implement-Rx-path-for-packed-queues.patch b/SOURCES/0006-net-virtio-implement-Rx-path-for-packed-queues.patch new file mode 100644 index 0000000..9a77b7d --- /dev/null +++ b/SOURCES/0006-net-virtio-implement-Rx-path-for-packed-queues.patch @@ -0,0 +1,613 @@ +From a1168f29a051eba2344407d72267b5d5f648d80c Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:35 +0100 +Subject: [PATCH 06/18] net/virtio: implement Rx path for packed queues + +[ upstream commit a76290c8f1cf9c4774c23592921302a04a90bded ] + +Implement the receive part. + +Signed-off-by: Jens Freimann +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit a76290c8f1cf9c4774c23592921302a04a90bded) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 56 +++-- + drivers/net/virtio/virtio_ethdev.h | 5 + + drivers/net/virtio/virtio_rxtx.c | 375 ++++++++++++++++++++++++++++- + drivers/net/virtio/virtqueue.c | 43 +++- + 4 files changed, 457 insertions(+), 22 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 6023d6f2c..4ef1da393 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -1363,24 +1363,40 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) + } + } + +- if (hw->use_simple_rx) { +- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", +- eth_dev->data->port_id); +- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; +- } else if (hw->use_inorder_rx) { +- PMD_INIT_LOG(INFO, +- "virtio: using inorder mergeable buffer Rx path on port %u", +- eth_dev->data->port_id); +- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder; +- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { +- PMD_INIT_LOG(INFO, +- "virtio: using mergeable buffer Rx path on port %u", +- eth_dev->data->port_id); +- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; ++ if (vtpci_packed_queue(hw)) { ++ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { ++ PMD_INIT_LOG(INFO, ++ "virtio: using packed ring mergeable buffer Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = ++ &virtio_recv_mergeable_pkts_packed; ++ } else { ++ PMD_INIT_LOG(INFO, ++ "virtio: using packed ring standard Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; ++ } + } else { +- PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", +- eth_dev->data->port_id); +- eth_dev->rx_pkt_burst = &virtio_recv_pkts; ++ if (hw->use_simple_rx) { ++ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; ++ } else if (hw->use_inorder_rx) { ++ PMD_INIT_LOG(INFO, ++ "virtio: using inorder mergeable buffer Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = ++ &virtio_recv_mergeable_pkts_inorder; ++ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { ++ PMD_INIT_LOG(INFO, ++ "virtio: using mergeable buffer Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts; ++ } else { ++ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u", ++ eth_dev->data->port_id); ++ eth_dev->rx_pkt_burst = &virtio_recv_pkts; ++ } + } + + } +@@ -1944,6 +1960,12 @@ virtio_dev_configure(struct rte_eth_dev *dev) + } + } + ++ if (vtpci_packed_queue(hw)) { ++ hw->use_simple_rx = 0; ++ hw->use_inorder_rx = 0; ++ hw->use_inorder_tx = 0; ++ } ++ + #if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM + if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) { + hw->use_simple_rx = 0; +diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h +index 05d355180..88b8c42a3 100644 +--- a/drivers/net/virtio/virtio_ethdev.h ++++ b/drivers/net/virtio/virtio_ethdev.h +@@ -73,10 +73,15 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + + uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); ++uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts); + + uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + ++uint16_t virtio_recv_mergeable_pkts_packed(void *rx_queue, ++ struct rte_mbuf **rx_pkts, uint16_t nb_pkts); ++ + uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c +index ab74917a8..0bcf3b08a 100644 +--- a/drivers/net/virtio/virtio_rxtx.c ++++ b/drivers/net/virtio/virtio_rxtx.c +@@ -31,6 +31,7 @@ + #include "virtqueue.h" + #include "virtio_rxtx.h" + #include "virtio_rxtx_simple.h" ++#include "virtio_ring.h" + + #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP + #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) +@@ -105,6 +106,47 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) + dxp->next = VQ_RING_DESC_CHAIN_END; + } + ++static uint16_t ++virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq, ++ struct rte_mbuf **rx_pkts, ++ uint32_t *len, ++ uint16_t num) ++{ ++ struct rte_mbuf *cookie; ++ uint16_t used_idx; ++ uint16_t id; ++ struct vring_packed_desc *desc; ++ uint16_t i; ++ ++ desc = vq->ring_packed.desc_packed; ++ ++ for (i = 0; i < num; i++) { ++ used_idx = vq->vq_used_cons_idx; ++ if (!desc_is_used(&desc[used_idx], vq)) ++ return i; ++ len[i] = desc[used_idx].len; ++ id = desc[used_idx].id; ++ cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie; ++ if (unlikely(cookie == NULL)) { ++ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", ++ vq->vq_used_cons_idx); ++ break; ++ } ++ rte_prefetch0(cookie); ++ rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *)); ++ rx_pkts[i] = cookie; ++ ++ vq->vq_free_cnt++; ++ vq->vq_used_cons_idx++; ++ if (vq->vq_used_cons_idx >= vq->vq_nentries) { ++ vq->vq_used_cons_idx -= vq->vq_nentries; ++ vq->used_wrap_counter ^= 1; ++ } ++ } ++ ++ return i; ++} ++ + static uint16_t + virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, + uint32_t *len, uint16_t num) +@@ -350,6 +392,51 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) + return 0; + } + ++static inline int ++virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, ++ struct rte_mbuf **cookie, uint16_t num) ++{ ++ struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed; ++ uint16_t flags = VRING_DESC_F_WRITE | vq->avail_used_flags; ++ struct virtio_hw *hw = vq->hw; ++ struct vq_desc_extra *dxp; ++ uint16_t idx; ++ int i; ++ ++ if (unlikely(vq->vq_free_cnt == 0)) ++ return -ENOSPC; ++ if (unlikely(vq->vq_free_cnt < num)) ++ return -EMSGSIZE; ++ ++ for (i = 0; i < num; i++) { ++ idx = vq->vq_avail_idx; ++ dxp = &vq->vq_descx[idx]; ++ dxp->cookie = (void *)cookie[i]; ++ dxp->ndescs = 1; ++ ++ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) + ++ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; ++ start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM ++ + hw->vtnet_hdr_size; ++ ++ vq->vq_desc_head_idx = dxp->next; ++ if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) ++ vq->vq_desc_tail_idx = vq->vq_desc_head_idx; ++ rte_smp_wmb(); ++ start_dp[idx].flags = flags; ++ if (++vq->vq_avail_idx >= vq->vq_nentries) { ++ vq->vq_avail_idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ vq->avail_used_flags = ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); ++ flags = VRING_DESC_F_WRITE | vq->avail_used_flags; ++ } ++ } ++ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num); ++ return 0; ++} ++ + /* When doing TSO, the IP length is not included in the pseudo header + * checksum of the packet given to the PMD, but for virtio it is + * expected. +@@ -801,7 +888,11 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) + break; + + /* Enqueue allocated buffers */ +- error = virtqueue_enqueue_recv_refill(vq, m); ++ if (vtpci_packed_queue(vq->hw)) ++ error = virtqueue_enqueue_recv_refill_packed(vq, ++ &m, 1); ++ else ++ error = virtqueue_enqueue_recv_refill(vq, m); + if (error) { + rte_pktmbuf_free(m); + break; +@@ -809,7 +900,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) + nbufs++; + } + +- vq_update_avail_idx(vq); ++ if (!vtpci_packed_queue(vq->hw)) ++ vq_update_avail_idx(vq); + } + + PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); +@@ -896,7 +988,10 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) + * Requeue the discarded mbuf. This should always be + * successful since it was just dequeued. + */ +- error = virtqueue_enqueue_recv_refill(vq, m); ++ if (vtpci_packed_queue(vq->hw)) ++ error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1); ++ else ++ error = virtqueue_enqueue_recv_refill(vq, m); + + if (unlikely(error)) { + RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf"); +@@ -1135,6 +1230,104 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) + return nb_rx; + } + ++uint16_t ++virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ struct virtnet_rx *rxvq = rx_queue; ++ struct virtqueue *vq = rxvq->vq; ++ struct virtio_hw *hw = vq->hw; ++ struct rte_mbuf *rxm, *new_mbuf; ++ uint16_t num, nb_rx; ++ uint32_t len[VIRTIO_MBUF_BURST_SZ]; ++ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; ++ int error; ++ uint32_t i, nb_enqueued; ++ uint32_t hdr_size; ++ struct virtio_net_hdr *hdr; ++ ++ nb_rx = 0; ++ if (unlikely(hw->started == 0)) ++ return nb_rx; ++ ++ num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts); ++ if (likely(num > DESC_PER_CACHELINE)) ++ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); ++ ++ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); ++ PMD_RX_LOG(DEBUG, "dequeue:%d", num); ++ ++ nb_enqueued = 0; ++ hdr_size = hw->vtnet_hdr_size; ++ ++ for (i = 0; i < num; i++) { ++ rxm = rcv_pkts[i]; ++ ++ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); ++ ++ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { ++ PMD_RX_LOG(ERR, "Packet drop"); ++ nb_enqueued++; ++ virtio_discard_rxbuf(vq, rxm); ++ rxvq->stats.errors++; ++ continue; ++ } ++ ++ rxm->port = rxvq->port_id; ++ rxm->data_off = RTE_PKTMBUF_HEADROOM; ++ rxm->ol_flags = 0; ++ rxm->vlan_tci = 0; ++ ++ rxm->pkt_len = (uint32_t)(len[i] - hdr_size); ++ rxm->data_len = (uint16_t)(len[i] - hdr_size); ++ ++ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + ++ RTE_PKTMBUF_HEADROOM - hdr_size); ++ ++ if (hw->vlan_strip) ++ rte_vlan_strip(rxm); ++ ++ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { ++ virtio_discard_rxbuf(vq, rxm); ++ rxvq->stats.errors++; ++ continue; ++ } ++ ++ virtio_rx_stats_updated(rxvq, rxm); ++ ++ rx_pkts[nb_rx++] = rxm; ++ } ++ ++ rxvq->stats.packets += nb_rx; ++ ++ /* Allocate new mbuf for the used descriptor */ ++ while (likely(!virtqueue_full(vq))) { ++ new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); ++ if (unlikely(new_mbuf == NULL)) { ++ struct rte_eth_dev *dev = ++ &rte_eth_devices[rxvq->port_id]; ++ dev->data->rx_mbuf_alloc_failed++; ++ break; ++ } ++ error = virtqueue_enqueue_recv_refill_packed(vq, &new_mbuf, 1); ++ if (unlikely(error)) { ++ rte_pktmbuf_free(new_mbuf); ++ break; ++ } ++ nb_enqueued++; ++ } ++ ++ if (likely(nb_enqueued)) { ++ if (unlikely(virtqueue_kick_prepare_packed(vq))) { ++ virtqueue_notify(vq); ++ PMD_RX_LOG(DEBUG, "Notified"); ++ } ++ } ++ ++ return nb_rx; ++} ++ ++ + uint16_t + virtio_recv_mergeable_pkts_inorder(void *rx_queue, + struct rte_mbuf **rx_pkts, +@@ -1493,6 +1686,182 @@ virtio_recv_mergeable_pkts(void *rx_queue, + return nb_rx; + } + ++uint16_t ++virtio_recv_mergeable_pkts_packed(void *rx_queue, ++ struct rte_mbuf **rx_pkts, ++ uint16_t nb_pkts) ++{ ++ struct virtnet_rx *rxvq = rx_queue; ++ struct virtqueue *vq = rxvq->vq; ++ struct virtio_hw *hw = vq->hw; ++ struct rte_mbuf *rxm; ++ struct rte_mbuf *prev = NULL; ++ uint16_t num, nb_rx = 0; ++ uint32_t len[VIRTIO_MBUF_BURST_SZ]; ++ struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; ++ uint32_t nb_enqueued = 0; ++ uint32_t seg_num = 0; ++ uint32_t seg_res = 0; ++ uint32_t hdr_size = hw->vtnet_hdr_size; ++ int32_t i; ++ int error; ++ ++ if (unlikely(hw->started == 0)) ++ return nb_rx; ++ ++ ++ num = nb_pkts; ++ if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) ++ num = VIRTIO_MBUF_BURST_SZ; ++ if (likely(num > DESC_PER_CACHELINE)) ++ num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); ++ ++ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num); ++ ++ for (i = 0; i < num; i++) { ++ struct virtio_net_hdr_mrg_rxbuf *header; ++ ++ PMD_RX_LOG(DEBUG, "dequeue:%d", num); ++ PMD_RX_LOG(DEBUG, "packet len:%d", len[i]); ++ ++ rxm = rcv_pkts[i]; ++ ++ if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { ++ PMD_RX_LOG(ERR, "Packet drop"); ++ nb_enqueued++; ++ virtio_discard_rxbuf(vq, rxm); ++ rxvq->stats.errors++; ++ continue; ++ } ++ ++ header = (struct virtio_net_hdr_mrg_rxbuf *)((char *) ++ rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size); ++ seg_num = header->num_buffers; ++ ++ if (seg_num == 0) ++ seg_num = 1; ++ ++ rxm->data_off = RTE_PKTMBUF_HEADROOM; ++ rxm->nb_segs = seg_num; ++ rxm->ol_flags = 0; ++ rxm->vlan_tci = 0; ++ rxm->pkt_len = (uint32_t)(len[i] - hdr_size); ++ rxm->data_len = (uint16_t)(len[i] - hdr_size); ++ ++ rxm->port = rxvq->port_id; ++ rx_pkts[nb_rx] = rxm; ++ prev = rxm; ++ ++ if (hw->has_rx_offload && ++ virtio_rx_offload(rxm, &header->hdr) < 0) { ++ virtio_discard_rxbuf(vq, rxm); ++ rxvq->stats.errors++; ++ continue; ++ } ++ ++ if (hw->vlan_strip) ++ rte_vlan_strip(rx_pkts[nb_rx]); ++ ++ seg_res = seg_num - 1; ++ ++ /* Merge remaining segments */ ++ while (seg_res != 0 && i < (num - 1)) { ++ i++; ++ ++ rxm = rcv_pkts[i]; ++ rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; ++ rxm->pkt_len = (uint32_t)(len[i]); ++ rxm->data_len = (uint16_t)(len[i]); ++ ++ rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]); ++ rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]); ++ ++ if (prev) ++ prev->next = rxm; ++ ++ prev = rxm; ++ seg_res -= 1; ++ } ++ ++ if (!seg_res) { ++ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); ++ nb_rx++; ++ } ++ } ++ ++ /* Last packet still need merge segments */ ++ while (seg_res != 0) { ++ uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res, ++ VIRTIO_MBUF_BURST_SZ); ++ if (likely(vq->vq_free_cnt >= rcv_cnt)) { ++ num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, ++ len, rcv_cnt); ++ uint16_t extra_idx = 0; ++ ++ rcv_cnt = num; ++ ++ while (extra_idx < rcv_cnt) { ++ rxm = rcv_pkts[extra_idx]; ++ ++ rxm->data_off = ++ RTE_PKTMBUF_HEADROOM - hdr_size; ++ rxm->pkt_len = (uint32_t)(len[extra_idx]); ++ rxm->data_len = (uint16_t)(len[extra_idx]); ++ ++ prev->next = rxm; ++ prev = rxm; ++ rx_pkts[nb_rx]->pkt_len += len[extra_idx]; ++ rx_pkts[nb_rx]->data_len += len[extra_idx]; ++ extra_idx += 1; ++ } ++ seg_res -= rcv_cnt; ++ if (!seg_res) { ++ virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]); ++ nb_rx++; ++ } ++ } else { ++ PMD_RX_LOG(ERR, ++ "No enough segments for packet."); ++ if (prev) ++ virtio_discard_rxbuf(vq, prev); ++ rxvq->stats.errors++; ++ break; ++ } ++ } ++ ++ rxvq->stats.packets += nb_rx; ++ ++ /* Allocate new mbuf for the used descriptor */ ++ if (likely(!virtqueue_full(vq))) { ++ /* free_cnt may include mrg descs */ ++ uint16_t free_cnt = vq->vq_free_cnt; ++ struct rte_mbuf *new_pkts[free_cnt]; ++ ++ if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) { ++ error = virtqueue_enqueue_recv_refill_packed(vq, ++ new_pkts, free_cnt); ++ if (unlikely(error)) { ++ for (i = 0; i < free_cnt; i++) ++ rte_pktmbuf_free(new_pkts[i]); ++ } ++ nb_enqueued += free_cnt; ++ } else { ++ struct rte_eth_dev *dev = ++ &rte_eth_devices[rxvq->port_id]; ++ dev->data->rx_mbuf_alloc_failed += free_cnt; ++ } ++ } ++ ++ if (likely(nb_enqueued)) { ++ if (unlikely(virtqueue_kick_prepare_packed(vq))) { ++ virtqueue_notify(vq); ++ PMD_RX_LOG(DEBUG, "Notified"); ++ } ++ } ++ ++ return nb_rx; ++} ++ + uint16_t + virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c +index 56a77cc71..5b03f7a27 100644 +--- a/drivers/net/virtio/virtqueue.c ++++ b/drivers/net/virtio/virtqueue.c +@@ -54,9 +54,36 @@ virtqueue_detach_unused(struct virtqueue *vq) + return NULL; + } + ++/* Flush used descs */ ++static void ++virtqueue_rxvq_flush_packed(struct virtqueue *vq) ++{ ++ struct vq_desc_extra *dxp; ++ uint16_t i; ++ ++ struct vring_packed_desc *descs = vq->ring_packed.desc_packed; ++ int cnt = 0; ++ ++ i = vq->vq_used_cons_idx; ++ while (desc_is_used(&descs[i], vq) && cnt++ < vq->vq_nentries) { ++ dxp = &vq->vq_descx[descs[i].id]; ++ if (dxp->cookie != NULL) { ++ rte_pktmbuf_free(dxp->cookie); ++ dxp->cookie = NULL; ++ } ++ vq->vq_free_cnt++; ++ vq->vq_used_cons_idx++; ++ if (vq->vq_used_cons_idx >= vq->vq_nentries) { ++ vq->vq_used_cons_idx -= vq->vq_nentries; ++ vq->used_wrap_counter ^= 1; ++ } ++ i = vq->vq_used_cons_idx; ++ } ++} ++ + /* Flush the elements in the used ring. */ +-void +-virtqueue_rxvq_flush(struct virtqueue *vq) ++static void ++virtqueue_rxvq_flush_split(struct virtqueue *vq) + { + struct virtnet_rx *rxq = &vq->rxq; + struct virtio_hw *hw = vq->hw; +@@ -102,3 +129,15 @@ virtqueue_rxvq_flush(struct virtqueue *vq) + } + } + } ++ ++/* Flush the elements in the used ring. */ ++void ++virtqueue_rxvq_flush(struct virtqueue *vq) ++{ ++ struct virtio_hw *hw = vq->hw; ++ ++ if (vtpci_packed_queue(hw)) ++ virtqueue_rxvq_flush_packed(vq); ++ else ++ virtqueue_rxvq_flush_split(vq); ++} +-- +2.21.0 + diff --git a/SOURCES/0007-net-virtio-support-packed-queue-in-send-command.patch b/SOURCES/0007-net-virtio-support-packed-queue-in-send-command.patch new file mode 100644 index 0000000..2075aae --- /dev/null +++ b/SOURCES/0007-net-virtio-support-packed-queue-in-send-command.patch @@ -0,0 +1,142 @@ +From d8d854a2f1814e10cf51ce88bf00b020167c772e Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:36 +0100 +Subject: [PATCH 07/18] net/virtio: support packed queue in send command + +[ upstream commit ec194c2f189525b2fb4be5604422a28ea5f08acd ] + +Use packed virtqueue format when reading and writing descriptors +to/from the ring. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit ec194c2f189525b2fb4be5604422a28ea5f08acd) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 96 ++++++++++++++++++++++++++++++ + 1 file changed, 96 insertions(+) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 4ef1da393..53773445b 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -141,6 +141,96 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { + + struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; + ++static struct virtio_pmd_ctrl * ++virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, ++ int *dlen, int pkt_num) ++{ ++ struct virtqueue *vq = cvq->vq; ++ int head; ++ struct vring_packed_desc *desc = vq->ring_packed.desc_packed; ++ struct virtio_pmd_ctrl *result; ++ int wrap_counter; ++ uint16_t flags; ++ int sum = 0; ++ int k; ++ ++ /* ++ * Format is enforced in qemu code: ++ * One TX packet for header; ++ * At least one TX packet per argument; ++ * One RX packet for ACK. ++ */ ++ head = vq->vq_avail_idx; ++ wrap_counter = vq->avail_wrap_counter; ++ desc[head].flags = VRING_DESC_F_NEXT; ++ desc[head].addr = cvq->virtio_net_hdr_mem; ++ desc[head].len = sizeof(struct virtio_net_ctrl_hdr); ++ vq->vq_free_cnt--; ++ if (++vq->vq_avail_idx >= vq->vq_nentries) { ++ vq->vq_avail_idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ } ++ ++ for (k = 0; k < pkt_num; k++) { ++ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem ++ + sizeof(struct virtio_net_ctrl_hdr) ++ + sizeof(ctrl->status) + sizeof(uint8_t) * sum; ++ desc[vq->vq_avail_idx].len = dlen[k]; ++ flags = VRING_DESC_F_NEXT; ++ sum += dlen[k]; ++ vq->vq_free_cnt--; ++ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); ++ desc[vq->vq_avail_idx].flags = flags; ++ rte_smp_wmb(); ++ vq->vq_free_cnt--; ++ if (++vq->vq_avail_idx >= vq->vq_nentries) { ++ vq->vq_avail_idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ } ++ } ++ ++ ++ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem ++ + sizeof(struct virtio_net_ctrl_hdr); ++ desc[vq->vq_avail_idx].len = sizeof(ctrl->status); ++ flags = VRING_DESC_F_WRITE; ++ flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); ++ desc[vq->vq_avail_idx].flags = flags; ++ flags = VRING_DESC_F_NEXT; ++ flags |= VRING_DESC_F_AVAIL(wrap_counter) | ++ VRING_DESC_F_USED(!wrap_counter); ++ desc[head].flags = flags; ++ rte_smp_wmb(); ++ ++ vq->vq_free_cnt--; ++ if (++vq->vq_avail_idx >= vq->vq_nentries) { ++ vq->vq_avail_idx -= vq->vq_nentries; ++ vq->avail_wrap_counter ^= 1; ++ } ++ ++ virtqueue_notify(vq); ++ ++ /* wait for used descriptors in virtqueue */ ++ do { ++ rte_rmb(); ++ usleep(100); ++ } while (!desc_is_used(&desc[head], vq)); ++ ++ /* now get used descriptors */ ++ while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) { ++ vq->vq_free_cnt++; ++ if (++vq->vq_used_cons_idx >= vq->vq_nentries) { ++ vq->vq_used_cons_idx -= vq->vq_nentries; ++ vq->used_wrap_counter ^= 1; ++ } ++ } ++ ++ result = cvq->virtio_net_hdr_mz->addr; ++ return result; ++} ++ + static int + virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int *dlen, int pkt_num) +@@ -174,6 +264,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, + sizeof(struct virtio_pmd_ctrl)); + ++ if (vtpci_packed_queue(vq->hw)) { ++ result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num); ++ goto out_unlock; ++ } ++ + /* + * Format is enforced in qemu code: + * One TX packet for header; +@@ -245,6 +340,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + + result = cvq->virtio_net_hdr_mz->addr; + ++out_unlock: + rte_spinlock_unlock(&cvq->lock); + return result->status; + } +-- +2.21.0 + diff --git a/SOURCES/0008-net-virtio-user-add-option-to-use-packed-queues.patch b/SOURCES/0008-net-virtio-user-add-option-to-use-packed-queues.patch new file mode 100644 index 0000000..c80a058 --- /dev/null +++ b/SOURCES/0008-net-virtio-user-add-option-to-use-packed-queues.patch @@ -0,0 +1,139 @@ +From 0cdcdd50e4cbb88737abfee1e545019500f11e38 Mon Sep 17 00:00:00 2001 +From: Yuanhan Liu +Date: Mon, 17 Dec 2018 22:31:37 +0100 +Subject: [PATCH] net/virtio-user: add option to use packed queues + +[ upstream commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6 ] + +From: Yuanhan Liu + +Add option to enable packed queue support for virtio-user +devices. + +Signed-off-by: Yuanhan Liu +Reviewed-by: Maxime Coquelin +(cherry picked from commit 34f3966c7f81f947e9eebb347dec6a9f68eec4e6) +Signed-off-by: Jens Freimann +--- + .../net/virtio/virtio_user/virtio_user_dev.c | 20 ++++++++++++++----- + .../net/virtio/virtio_user/virtio_user_dev.h | 2 +- + drivers/net/virtio/virtio_user_ethdev.c | 14 ++++++++++++- + 3 files changed, 29 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index f0051f887..7d0acaeb7 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -1,4 +1,4 @@ +-/* SPDX-License-Identifier: BSD-3-Clause ++/* SPDX-License-Identifier: BSD-1-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +@@ -58,6 +58,8 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) + + state.index = queue_sel; + state.num = 0; /* no reservation */ ++ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) ++ state.num |= (1 << 15); + dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state); + + dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr); +@@ -407,12 +409,13 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) + 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \ + 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ + 1ULL << VIRTIO_F_IN_ORDER | \ +- 1ULL << VIRTIO_F_VERSION_1) ++ 1ULL << VIRTIO_F_VERSION_1 | \ ++ 1ULL << VIRTIO_F_RING_PACKED) + + int + virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac, char **ifname, +- int server, int mrg_rxbuf, int in_order) ++ int server, int mrg_rxbuf, int in_order, int packed_vq) + { + pthread_mutex_init(&dev->mutex, NULL); + snprintf(dev->path, PATH_MAX, "%s", path); +@@ -465,10 +468,17 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + if (!in_order) + dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); + +- if (dev->mac_specified) +- dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); ++ if (packed_vq) ++ dev->device_features |= (1ull << VIRTIO_F_RING_PACKED); + else ++ dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED); ++ ++ if (dev->mac_specified) { ++ dev->device_features |= (1ull << VIRTIO_NET_F_MAC); ++ } else { ++ dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC); + dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); ++ } + + if (cq) { + /* device does not really need to know anything about CQ, +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h +index 3e3a7b787..67a9c01ac 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h +@@ -50,7 +50,7 @@ int virtio_user_start_device(struct virtio_user_dev *dev); + int virtio_user_stop_device(struct virtio_user_dev *dev); + int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac, char **ifname, +- int server, int mrg_rxbuf, int in_order); ++ int server, int mrg_rxbuf, int in_order, int packed_vq); + void virtio_user_dev_uninit(struct virtio_user_dev *dev); + void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); + uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs); +diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c +index 5781c0948..daad8f452 100644 +--- a/drivers/net/virtio/virtio_user_ethdev.c ++++ b/drivers/net/virtio/virtio_user_ethdev.c +@@ -361,6 +361,8 @@ static const char *valid_args[] = { + VIRTIO_USER_ARG_MRG_RXBUF, + #define VIRTIO_USER_ARG_IN_ORDER "in_order" + VIRTIO_USER_ARG_IN_ORDER, ++#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" ++ VIRTIO_USER_ARG_PACKED_VQ, + NULL + }; + +@@ -468,6 +470,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) + char *ifname = NULL; + char *mac_addr = NULL; + int ret = -1; ++ uint64_t packed_vq = 0; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + const char *name = rte_vdev_device_name(dev); +@@ -571,6 +574,15 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) + cq = 1; + } + ++ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PACKED_VQ) == 1) { ++ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PACKED_VQ, ++ &get_integer_arg, &packed_vq) < 0) { ++ PMD_INIT_LOG(ERR, "error to parse %s", ++ VIRTIO_USER_ARG_PACKED_VQ); ++ goto end; ++ } ++ } ++ + if (queues > 1 && cq == 0) { + PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); + goto end; +@@ -610,7 +622,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) + hw = eth_dev->data->dev_private; + if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, + queue_size, mac_addr, &ifname, server_mode, +- mrg_rxbuf, in_order) < 0) { ++ mrg_rxbuf, in_order, packed_vq) < 0) { + PMD_INIT_LOG(ERR, "virtio_user_dev_init fails"); + virtio_user_eth_dev_free(eth_dev); + goto end; +-- +2.21.0 + diff --git a/SOURCES/0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch b/SOURCES/0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch new file mode 100644 index 0000000..5aa09e3 --- /dev/null +++ b/SOURCES/0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch @@ -0,0 +1,44 @@ +From f5302062cbc98b3b8b1002cc48e7125a48ead96c Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:38 +0100 +Subject: [PATCH 09/18] net/virtio-user: fail if cq used with packed vq + +[ upstream commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9 ] + +Until we have support for control virtqueues let's disable it and +fail device initalization if specified as a parameter. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit 07dd7e250d0128bf1edfd73e9d83bde09cdb11e9) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_user/virtio_user_dev.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 77cec1d3c..2f75091d5 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -467,10 +467,16 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + if (!in_order) + dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); + +- if (packed_vq) ++ if (packed_vq) { ++ if (cq) { ++ PMD_INIT_LOG(ERR, "control vq not supported yet with " ++ "packed virtqueues\n"); ++ return -1; ++ } + dev->device_features |= (1ull << VIRTIO_F_RING_PACKED); +- else ++ } else { + dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED); ++ } + + if (dev->mac_specified) { + dev->device_features |= (1ull << VIRTIO_NET_F_MAC); +-- +2.21.0 + diff --git a/SOURCES/0010-net-virtio-enable-packed-virtqueues-by-default.patch b/SOURCES/0010-net-virtio-enable-packed-virtqueues-by-default.patch new file mode 100644 index 0000000..83df7c5 --- /dev/null +++ b/SOURCES/0010-net-virtio-enable-packed-virtqueues-by-default.patch @@ -0,0 +1,45 @@ +From d1b8c268219498c865511b375b0c0c89244046f9 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 17 Dec 2018 22:31:39 +0100 +Subject: [PATCH 10/18] net/virtio: enable packed virtqueues by default + +[ upstream commit aea29aa5d37b40080cfc1f9a1acba239bf03922f ] + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit aea29aa5d37b40080cfc1f9a1acba239bf03922f) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.h | 1 + + drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 ++- + 2 files changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h +index 88b8c42a3..364ecbb50 100644 +--- a/drivers/net/virtio/virtio_ethdev.h ++++ b/drivers/net/virtio/virtio_ethdev.h +@@ -34,6 +34,7 @@ + 1u << VIRTIO_RING_F_INDIRECT_DESC | \ + 1ULL << VIRTIO_F_VERSION_1 | \ + 1ULL << VIRTIO_F_IN_ORDER | \ ++ 1ULL << VIRTIO_F_RING_PACKED | \ + 1ULL << VIRTIO_F_IOMMU_PLATFORM) + + #define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \ +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 2f75091d5..5999b7d9d 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -410,7 +410,8 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) + 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ + 1ULL << VIRTIO_F_IN_ORDER | \ + 1ULL << VIRTIO_F_VERSION_1 | \ +- 1ULL << VIRTIO_F_RING_PACKED) ++ 1ULL << VIRTIO_F_RING_PACKED | \ ++ 1ULL << VIRTIO_RING_F_EVENT_IDX) + + int + virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, +-- +2.21.0 + diff --git a/SOURCES/0011-net-virtio-avoid-double-accounting-of-bytes.patch b/SOURCES/0011-net-virtio-avoid-double-accounting-of-bytes.patch new file mode 100644 index 0000000..02d1f54 --- /dev/null +++ b/SOURCES/0011-net-virtio-avoid-double-accounting-of-bytes.patch @@ -0,0 +1,33 @@ +From 440731f30a1257c3318badfcf17f5ab9e5085317 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Thu, 20 Dec 2018 11:56:24 +0100 +Subject: [PATCH 11/18] net/virtio: avoid double accounting of bytes + +[ upstream commit 517ad3e018e31ab2596d1ece5369894703c850c2 ] + +Accounting of bytes was moved to a common function, so at the moment we do +it twice. This patches fixes it for sending packets with packed virtqueues. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit 517ad3e018e31ab2596d1ece5369894703c850c2) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_rxtx.c | 1 - + 1 file changed, 1 deletion(-) + +diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c +index 0bcf3b08a..50eb4c694 100644 +--- a/drivers/net/virtio/virtio_rxtx.c ++++ b/drivers/net/virtio/virtio_rxtx.c +@@ -1931,7 +1931,6 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts, + /* Enqueue Packet buffers */ + virtqueue_enqueue_xmit_packed(txvq, txm, slots, can_push); + +- txvq->stats.bytes += txm->pkt_len; + virtio_update_packet_stats(&txvq->stats, txm); + } + +-- +2.21.0 + diff --git a/SOURCES/0012-net-virtio-user-fix-packed-vq-option-parsing.patch b/SOURCES/0012-net-virtio-user-fix-packed-vq-option-parsing.patch new file mode 100644 index 0000000..e6b879f --- /dev/null +++ b/SOURCES/0012-net-virtio-user-fix-packed-vq-option-parsing.patch @@ -0,0 +1,85 @@ +From ec53a1992df973607cbb10db6a0816ed2ef498dd Mon Sep 17 00:00:00 2001 +From: Tiwei Bie +Date: Thu, 3 Jan 2019 10:40:06 +0800 +Subject: [PATCH] net/virtio-user: fix packed vq option parsing + +[ upstream commit 9070f88b81dab42739fb169265e3ea727e47dfa2 ] + +Add the RING_PACKED feature to dev->unsupported_features +when it's disabled, and add the missing packed vq param +string. And also revert the unexpected change to MAC option +introduced when adding packed vq option. + +Fixes: 34f3966c7f81 ("net/virtio-user: add option to use packed queues") + +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit 9070f88b81dab42739fb169265e3ea727e47dfa2) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_user/virtio_user_dev.c | 11 ++++------- + drivers/net/virtio/virtio_user_ethdev.c | 7 ++++--- + 2 files changed, 8 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 811b95c45..426682c93 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -475,17 +475,14 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + "packed virtqueues\n"); + return -1; + } +- dev->device_features |= (1ull << VIRTIO_F_RING_PACKED); + } else { +- dev->device_features &= ~(1ull << VIRTIO_F_RING_PACKED); ++ dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); + } + +- if (dev->mac_specified) { +- dev->device_features |= (1ull << VIRTIO_NET_F_MAC); +- } else { +- dev->device_features &= ~(1ull << VIRTIO_NET_F_MAC); ++ if (dev->mac_specified) ++ dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); ++ else + dev->unsupported_features |= (1ull << VIRTIO_NET_F_MAC); +- } + + if (cq) { + /* device does not really need to know anything about CQ, +diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c +index daad8f452..a2911febf 100644 +--- a/drivers/net/virtio/virtio_user_ethdev.c ++++ b/drivers/net/virtio/virtio_user_ethdev.c +@@ -361,7 +361,7 @@ static const char *valid_args[] = { + VIRTIO_USER_ARG_MRG_RXBUF, + #define VIRTIO_USER_ARG_IN_ORDER "in_order" + VIRTIO_USER_ARG_IN_ORDER, +-#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" ++#define VIRTIO_USER_ARG_PACKED_VQ "packed_vq" + VIRTIO_USER_ARG_PACKED_VQ, + NULL + }; +@@ -466,11 +466,11 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev) + uint64_t server_mode = VIRTIO_USER_DEF_SERVER_MODE; + uint64_t mrg_rxbuf = 1; + uint64_t in_order = 1; ++ uint64_t packed_vq = 0; + char *path = NULL; + char *ifname = NULL; + char *mac_addr = NULL; + int ret = -1; +- uint64_t packed_vq = 0; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { + const char *name = rte_vdev_device_name(dev); +@@ -698,4 +698,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user, + "iface= " + "server=<0|1> " + "mrg_rxbuf=<0|1> " +- "in_order=<0|1>"); ++ "in_order=<0|1> " ++ "packed_vq=<0|1>"); +-- +2.21.0 + diff --git a/SOURCES/0013-net-virtio-user-fix-supported-features-list.patch b/SOURCES/0013-net-virtio-user-fix-supported-features-list.patch new file mode 100644 index 0000000..1dcd272 --- /dev/null +++ b/SOURCES/0013-net-virtio-user-fix-supported-features-list.patch @@ -0,0 +1,36 @@ +From b6da125960fb1fb017427af5910b43ac81586850 Mon Sep 17 00:00:00 2001 +From: Tiwei Bie +Date: Thu, 3 Jan 2019 10:40:07 +0800 +Subject: [PATCH 13/18] net/virtio-user: fix supported features list + +[ upstream commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1 ] + +Currently virtio-user doesn't support event idx. + +Fixes: aea29aa5d37b ("net/virtio: enable packed virtqueues by default") + +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit 8532a0fcd8f2cf3a5d3189b453bd90a69991b1b1) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_user/virtio_user_dev.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index c4e026096..77341f895 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -410,8 +410,7 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) + 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \ + 1ULL << VIRTIO_F_IN_ORDER | \ + 1ULL << VIRTIO_F_VERSION_1 | \ +- 1ULL << VIRTIO_F_RING_PACKED | \ +- 1ULL << VIRTIO_RING_F_EVENT_IDX) ++ 1ULL << VIRTIO_F_RING_PACKED) + + int + virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, +-- +2.21.0 + diff --git a/SOURCES/0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch b/SOURCES/0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch new file mode 100644 index 0000000..acdfc63 --- /dev/null +++ b/SOURCES/0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch @@ -0,0 +1,98 @@ +From 82b43dd199d5492527b73002d4c3b009a98ca7a0 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Fri, 11 Jan 2019 10:39:28 +0100 +Subject: [PATCH 14/18] net/virtio: check head desc with correct wrap counter + +[ upstream commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a ] + +In virtio_pq_send_command() we check for a used descriptor +and wait in an idle loop until it becomes used. We can't use +vq->used_wrap_counter here to check for the first descriptor +we made available because the ring could have wrapped. Let's use +the used_wrap_counter that matches the state of the head descriptor. + +Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command") + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit a4270ea4ff79b46280dd542f4ab3eb45f8c9685a) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 11 ++++++----- + drivers/net/virtio/virtqueue.h | 10 ++++++++-- + 2 files changed, 14 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 53773445b..7bd38a292 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -149,7 +149,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + int head; + struct vring_packed_desc *desc = vq->ring_packed.desc_packed; + struct virtio_pmd_ctrl *result; +- int wrap_counter; ++ bool avail_wrap_counter, used_wrap_counter; + uint16_t flags; + int sum = 0; + int k; +@@ -161,7 +161,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + * One RX packet for ACK. + */ + head = vq->vq_avail_idx; +- wrap_counter = vq->avail_wrap_counter; ++ avail_wrap_counter = vq->avail_wrap_counter; ++ used_wrap_counter = vq->used_wrap_counter; + desc[head].flags = VRING_DESC_F_NEXT; + desc[head].addr = cvq->virtio_net_hdr_mem; + desc[head].len = sizeof(struct virtio_net_ctrl_hdr); +@@ -199,8 +200,8 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + VRING_DESC_F_USED(!vq->avail_wrap_counter); + desc[vq->vq_avail_idx].flags = flags; + flags = VRING_DESC_F_NEXT; +- flags |= VRING_DESC_F_AVAIL(wrap_counter) | +- VRING_DESC_F_USED(!wrap_counter); ++ flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) | ++ VRING_DESC_F_USED(!avail_wrap_counter); + desc[head].flags = flags; + rte_smp_wmb(); + +@@ -216,7 +217,7 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + do { + rte_rmb(); + usleep(100); +- } while (!desc_is_used(&desc[head], vq)); ++ } while (!__desc_is_used(&desc[head], used_wrap_counter)); + + /* now get used descriptors */ + while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) { +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index b142fd488..75f5782bc 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -256,7 +256,7 @@ struct virtio_tx_region { + }; + + static inline int +-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) ++__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter) + { + uint16_t used, avail, flags; + +@@ -264,7 +264,13 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) + used = !!(flags & VRING_DESC_F_USED(1)); + avail = !!(flags & VRING_DESC_F_AVAIL(1)); + +- return avail == used && used == vq->used_wrap_counter; ++ return avail == used && used == wrap_counter; ++} ++ ++static inline int ++desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) ++{ ++ return __desc_is_used(desc, vq->used_wrap_counter); + } + + +-- +2.21.0 + diff --git a/SOURCES/0015-net-virtio-user-support-control-VQ-for-packed.patch b/SOURCES/0015-net-virtio-user-support-control-VQ-for-packed.patch new file mode 100644 index 0000000..ce41b03 --- /dev/null +++ b/SOURCES/0015-net-virtio-user-support-control-VQ-for-packed.patch @@ -0,0 +1,277 @@ +From 74bbcd238093edc81b1a1f0b9b6e0d3c3fe32584 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Fri, 11 Jan 2019 10:39:29 +0100 +Subject: [PATCH] net/virtio-user: support control VQ for packed + +[ upstream commit 48a4464029a7f76dfb2c1f09146a391917b075e5 ] + +Add support to virtio-user for control virtqueues. + +Signed-off-by: Jens Freimann +Reviewed-by: Maxime Coquelin +(cherry picked from commit 48a4464029a7f76dfb2c1f09146a391917b075e5) +Signed-off-by: Jens Freimann +--- + .../net/virtio/virtio_user/virtio_user_dev.c | 102 ++++++++++++++++-- + .../net/virtio/virtio_user/virtio_user_dev.h | 15 ++- + drivers/net/virtio/virtio_user_ethdev.c | 56 +++++++++- + 3 files changed, 157 insertions(+), 16 deletions(-) + +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index 2caaaad5f..83d3fb531 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -43,15 +43,26 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) + struct vhost_vring_file file; + struct vhost_vring_state state; + struct vring *vring = &dev->vrings[queue_sel]; ++ struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel]; + struct vhost_vring_addr addr = { + .index = queue_sel, +- .desc_user_addr = (uint64_t)(uintptr_t)vring->desc, +- .avail_user_addr = (uint64_t)(uintptr_t)vring->avail, +- .used_user_addr = (uint64_t)(uintptr_t)vring->used, + .log_guest_addr = 0, + .flags = 0, /* disable log */ + }; + ++ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) { ++ addr.desc_user_addr = ++ (uint64_t)(uintptr_t)pq_vring->desc_packed; ++ addr.avail_user_addr = ++ (uint64_t)(uintptr_t)pq_vring->driver_event; ++ addr.used_user_addr = ++ (uint64_t)(uintptr_t)pq_vring->device_event; ++ } else { ++ addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc; ++ addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail; ++ addr.used_user_addr = (uint64_t)(uintptr_t)vring->used; ++ } ++ + state.index = queue_sel; + state.num = vring->num; + dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state); +@@ -468,15 +479,8 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + if (!in_order) + dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER); + +- if (packed_vq) { +- if (cq) { +- PMD_INIT_LOG(ERR, "control vq not supported yet with " +- "packed virtqueues\n"); +- return -1; +- } +- } else { ++ if (!packed_vq) + dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED); +- } + + if (dev->mac_specified) + dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC); +@@ -621,6 +625,82 @@ virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, + return n_descs; + } + ++static inline int ++desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) ++{ ++ return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL(1)) && ++ wrap_counter != !!(desc->flags & VRING_DESC_F_USED(1)); ++} ++ ++static uint32_t ++virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev, ++ struct vring_packed *vring, ++ uint16_t idx_hdr) ++{ ++ struct virtio_net_ctrl_hdr *hdr; ++ virtio_net_ctrl_ack status = ~0; ++ uint16_t idx_data, idx_status; ++ /* initialize to one, header is first */ ++ uint32_t n_descs = 1; ++ ++ /* locate desc for header, data, and status */ ++ idx_data = idx_hdr + 1; ++ if (idx_data >= dev->queue_size) ++ idx_data -= dev->queue_size; ++ ++ n_descs++; ++ ++ idx_status = idx_data; ++ while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) { ++ idx_status++; ++ if (idx_status >= dev->queue_size) ++ idx_status -= dev->queue_size; ++ n_descs++; ++ } ++ ++ hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr; ++ if (hdr->class == VIRTIO_NET_CTRL_MQ && ++ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { ++ uint16_t queues; ++ ++ queues = *(uint16_t *)(uintptr_t) ++ vring->desc_packed[idx_data].addr; ++ status = virtio_user_handle_mq(dev, queues); ++ } ++ ++ /* Update status */ ++ *(virtio_net_ctrl_ack *)(uintptr_t) ++ vring->desc_packed[idx_status].addr = status; ++ ++ return n_descs; ++} ++ ++void ++virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) ++{ ++ struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; ++ struct vring_packed *vring = &dev->packed_vrings[queue_idx]; ++ uint16_t id, n_descs; ++ ++ while (desc_is_avail(&vring->desc_packed[vq->used_idx], ++ vq->used_wrap_counter)) { ++ id = vring->desc_packed[vq->used_idx].id; ++ ++ n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id); ++ ++ do { ++ vring->desc_packed[vq->used_idx].flags = ++ VRING_DESC_F_AVAIL(vq->used_wrap_counter) | ++ VRING_DESC_F_USED(vq->used_wrap_counter); ++ if (++vq->used_idx >= dev->queue_size) { ++ vq->used_idx -= dev->queue_size; ++ vq->used_wrap_counter ^= 1; ++ } ++ n_descs--; ++ } while (n_descs); ++ } ++} ++ + void + virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) + { +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h +index 67a9c01ac..c6c2f7d6e 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h +@@ -11,6 +11,12 @@ + #include "../virtio_ring.h" + #include "vhost.h" + ++struct virtio_user_queue { ++ uint16_t used_idx; ++ bool avail_wrap_counter; ++ bool used_wrap_counter; ++}; ++ + struct virtio_user_dev { + /* for vhost_user backend */ + int vhostfd; +@@ -39,7 +45,12 @@ struct virtio_user_dev { + uint16_t port_id; + uint8_t mac_addr[ETHER_ADDR_LEN]; + char path[PATH_MAX]; +- struct vring vrings[VIRTIO_MAX_VIRTQUEUES]; ++ union { ++ struct vring vrings[VIRTIO_MAX_VIRTQUEUES]; ++ struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES]; ++ }; ++ struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES]; ++ + struct virtio_user_backend_ops *ops; + pthread_mutex_t mutex; + bool started; +@@ -53,5 +64,7 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int server, int mrg_rxbuf, int in_order, int packed_vq); + void virtio_user_dev_uninit(struct virtio_user_dev *dev); + void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); ++void virtio_user_handle_cq_packed(struct virtio_user_dev *dev, ++ uint16_t queue_idx); + uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs); + #endif +diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c +index a2911febf..dddb7dd23 100644 +--- a/drivers/net/virtio/virtio_user_ethdev.c ++++ b/drivers/net/virtio/virtio_user_ethdev.c +@@ -271,10 +271,44 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) + return dev->queue_size; + } + +-static int +-virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) ++static void ++virtio_user_setup_queue_packed(struct virtqueue *vq, ++ struct virtio_user_dev *dev) ++ ++{ ++ uint16_t queue_idx = vq->vq_queue_index; ++ struct vring_packed *vring; ++ uint64_t desc_addr; ++ uint64_t avail_addr; ++ uint64_t used_addr; ++ uint16_t i; ++ ++ vring = &dev->packed_vrings[queue_idx]; ++ desc_addr = (uintptr_t)vq->vq_ring_virt_mem; ++ avail_addr = desc_addr + vq->vq_nentries * ++ sizeof(struct vring_packed_desc); ++ used_addr = RTE_ALIGN_CEIL(avail_addr + ++ sizeof(struct vring_packed_desc_event), ++ VIRTIO_PCI_VRING_ALIGN); ++ vring->num = vq->vq_nentries; ++ vring->desc_packed = ++ (void *)(uintptr_t)desc_addr; ++ vring->driver_event = ++ (void *)(uintptr_t)avail_addr; ++ vring->device_event = ++ (void *)(uintptr_t)used_addr; ++ dev->packed_queues[queue_idx].avail_wrap_counter = true; ++ dev->packed_queues[queue_idx].used_wrap_counter = true; ++ ++ for (i = 0; i < vring->num; i++) { ++ vring->desc_packed[i].flags = VRING_DESC_F_USED(1) | ++ VRING_DESC_F_AVAIL(1); ++ } ++} ++ ++static void ++virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev) + { +- struct virtio_user_dev *dev = virtio_user_get_dev(hw); + uint16_t queue_idx = vq->vq_queue_index; + uint64_t desc_addr, avail_addr, used_addr; + +@@ -288,6 +322,17 @@ virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) + dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; + dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; + dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; ++} ++ ++static int ++virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) ++{ ++ struct virtio_user_dev *dev = virtio_user_get_dev(hw); ++ ++ if (vtpci_packed_queue(hw)) ++ virtio_user_setup_queue_packed(vq, dev); ++ else ++ virtio_user_setup_queue_split(vq, dev); + + return 0; + } +@@ -317,7 +362,10 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + if (hw->cvq && (hw->cvq->vq == vq)) { +- virtio_user_handle_cq(dev, vq->vq_queue_index); ++ if (vtpci_packed_queue(vq->hw)) ++ virtio_user_handle_cq_packed(dev, vq->vq_queue_index); ++ else ++ virtio_user_handle_cq(dev, vq->vq_queue_index); + return; + } + +-- +2.21.0 + diff --git a/SOURCES/0016-net-virtio-fix-control-VQ.patch b/SOURCES/0016-net-virtio-fix-control-VQ.patch new file mode 100644 index 0000000..34bf1cf --- /dev/null +++ b/SOURCES/0016-net-virtio-fix-control-VQ.patch @@ -0,0 +1,197 @@ +From c276398e43bec444eb207c3184f667b3d97361f8 Mon Sep 17 00:00:00 2001 +From: Tiwei Bie +Date: Wed, 23 Jan 2019 01:01:40 +0800 +Subject: [PATCH 16/18] net/virtio: fix control VQ + +[ upstream commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5 ] + +This patch mainly fixed below issues in the packed ring based +control vq support in virtio driver: + +1. When parsing the used descriptors, we have to track the + number of descs that we need to skip; +2. vq->vq_free_cnt was decreased twice for a same desc; + +Meanwhile, make the function name consistent with other parts. + +Fixes: ec194c2f1895 ("net/virtio: support packed queue in send command") +Fixes: a4270ea4ff79 ("net/virtio: check head desc with correct wrap counter") + +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +[changed parameters to virtio_rmb/_wmb()] +(cherry picked from commit 2923b8f9c41da37d63bd196ba2f037c154a6ebd5) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 62 ++++++++++++++---------------- + drivers/net/virtio/virtqueue.h | 12 +----- + 2 files changed, 31 insertions(+), 43 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index 7bd38a292..c12fb157e 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -142,16 +142,17 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { + struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS]; + + static struct virtio_pmd_ctrl * +-virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, +- int *dlen, int pkt_num) ++virtio_send_command_packed(struct virtnet_ctl *cvq, ++ struct virtio_pmd_ctrl *ctrl, ++ int *dlen, int pkt_num) + { + struct virtqueue *vq = cvq->vq; + int head; + struct vring_packed_desc *desc = vq->ring_packed.desc_packed; + struct virtio_pmd_ctrl *result; +- bool avail_wrap_counter, used_wrap_counter; +- uint16_t flags; ++ bool avail_wrap_counter; + int sum = 0; ++ int nb_descs = 0; + int k; + + /* +@@ -162,11 +163,10 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + */ + head = vq->vq_avail_idx; + avail_wrap_counter = vq->avail_wrap_counter; +- used_wrap_counter = vq->used_wrap_counter; +- desc[head].flags = VRING_DESC_F_NEXT; + desc[head].addr = cvq->virtio_net_hdr_mem; + desc[head].len = sizeof(struct virtio_net_ctrl_hdr); + vq->vq_free_cnt--; ++ nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; +@@ -177,55 +177,51 @@ virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + + sizeof(struct virtio_net_ctrl_hdr) + + sizeof(ctrl->status) + sizeof(uint8_t) * sum; + desc[vq->vq_avail_idx].len = dlen[k]; +- flags = VRING_DESC_F_NEXT; ++ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT | ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); + sum += dlen[k]; + vq->vq_free_cnt--; +- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | +- VRING_DESC_F_USED(!vq->avail_wrap_counter); +- desc[vq->vq_avail_idx].flags = flags; +- rte_smp_wmb(); +- vq->vq_free_cnt--; ++ nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; + } + } + +- + desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem + + sizeof(struct virtio_net_ctrl_hdr); + desc[vq->vq_avail_idx].len = sizeof(ctrl->status); +- flags = VRING_DESC_F_WRITE; +- flags |= VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | +- VRING_DESC_F_USED(!vq->avail_wrap_counter); +- desc[vq->vq_avail_idx].flags = flags; +- flags = VRING_DESC_F_NEXT; +- flags |= VRING_DESC_F_AVAIL(avail_wrap_counter) | +- VRING_DESC_F_USED(!avail_wrap_counter); +- desc[head].flags = flags; +- rte_smp_wmb(); +- ++ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | ++ VRING_DESC_F_AVAIL(vq->avail_wrap_counter) | ++ VRING_DESC_F_USED(!vq->avail_wrap_counter); + vq->vq_free_cnt--; ++ nb_descs++; + if (++vq->vq_avail_idx >= vq->vq_nentries) { + vq->vq_avail_idx -= vq->vq_nentries; + vq->avail_wrap_counter ^= 1; + } + ++ virtio_wmb(); ++ desc[head].flags = VRING_DESC_F_NEXT | ++ VRING_DESC_F_AVAIL(avail_wrap_counter) | ++ VRING_DESC_F_USED(!avail_wrap_counter); ++ ++ virtio_wmb(); + virtqueue_notify(vq); + + /* wait for used descriptors in virtqueue */ +- do { +- rte_rmb(); ++ while (!desc_is_used(&desc[head], vq)) + usleep(100); +- } while (!__desc_is_used(&desc[head], used_wrap_counter)); ++ ++ virtio_rmb(); + + /* now get used descriptors */ +- while (desc_is_used(&desc[vq->vq_used_cons_idx], vq)) { +- vq->vq_free_cnt++; +- if (++vq->vq_used_cons_idx >= vq->vq_nentries) { +- vq->vq_used_cons_idx -= vq->vq_nentries; +- vq->used_wrap_counter ^= 1; +- } ++ vq->vq_free_cnt += nb_descs; ++ vq->vq_used_cons_idx += nb_descs; ++ if (vq->vq_used_cons_idx >= vq->vq_nentries) { ++ vq->vq_used_cons_idx -= vq->vq_nentries; ++ vq->used_wrap_counter ^= 1; + } + + result = cvq->virtio_net_hdr_mz->addr; +@@ -266,7 +262,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, + sizeof(struct virtio_pmd_ctrl)); + + if (vtpci_packed_queue(vq->hw)) { +- result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num); ++ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num); + goto out_unlock; + } + +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index 75f5782bc..9e74b7bd0 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -256,7 +256,7 @@ struct virtio_tx_region { + }; + + static inline int +-__desc_is_used(struct vring_packed_desc *desc, bool wrap_counter) ++desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) + { + uint16_t used, avail, flags; + +@@ -264,16 +264,9 @@ __desc_is_used(struct vring_packed_desc *desc, bool wrap_counter) + used = !!(flags & VRING_DESC_F_USED(1)); + avail = !!(flags & VRING_DESC_F_AVAIL(1)); + +- return avail == used && used == wrap_counter; +-} +- +-static inline int +-desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) +-{ +- return __desc_is_used(desc, vq->used_wrap_counter); ++ return avail == used && used == vq->used_wrap_counter; + } + +- + static inline void + vring_desc_init_packed(struct virtqueue *vq, int n) + { +@@ -329,7 +322,6 @@ virtqueue_enable_intr_packed(struct virtqueue *vq) + { + uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; + +- + if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { + virtio_wmb(); + vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE; +-- +2.21.0 + diff --git a/SOURCES/0017-net-virtio-user-fix-control-VQ.patch b/SOURCES/0017-net-virtio-user-fix-control-VQ.patch new file mode 100644 index 0000000..4022a78 --- /dev/null +++ b/SOURCES/0017-net-virtio-user-fix-control-VQ.patch @@ -0,0 +1,146 @@ +From e5ee642672921b9e83aaa558067b6b685a7af0a3 Mon Sep 17 00:00:00 2001 +From: Tiwei Bie +Date: Wed, 23 Jan 2019 01:01:41 +0800 +Subject: [PATCH 17/18] net/virtio-user: fix control VQ + +[ upstream commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa ] + +This patch fixed below issues in the packed ring based control +vq support in virtio user: + +1. The idx_hdr should be used_idx instead of the id in the desc; +2. We just need to write out a single used descriptor for each + descriptor list; +3. The avail/used bits should be initialized to 0; + +Meanwhile, make the function name consistent with other parts. + +Fixes: 48a4464029a7 ("net/virtio-user: support control VQ for packed") + +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +(cherry picked from commit 45c224e73a3057bf62cb04f83fc1e97457a21ffa) +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ethdev.c | 11 ++++++ + .../net/virtio/virtio_user/virtio_user_dev.c | 37 +++++++++++-------- + drivers/net/virtio/virtio_user_ethdev.c | 7 +--- + 3 files changed, 34 insertions(+), 21 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c +index c12fb157e..a31129484 100644 +--- a/drivers/net/virtio/virtio_ethdev.c ++++ b/drivers/net/virtio/virtio_ethdev.c +@@ -224,6 +224,17 @@ virtio_send_command_packed(struct virtnet_ctl *cvq, + vq->used_wrap_counter ^= 1; + } + ++ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n" ++ "vq->vq_avail_idx=%d\n" ++ "vq->vq_used_cons_idx=%d\n" ++ "vq->avail_wrap_counter=%d\n" ++ "vq->used_wrap_counter=%d\n", ++ vq->vq_free_cnt, ++ vq->vq_avail_idx, ++ vq->vq_used_cons_idx, ++ vq->avail_wrap_counter, ++ vq->used_wrap_counter); ++ + result = cvq->virtio_net_hdr_mz->addr; + return result; + } +diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c +index ea5149929..d1157378d 100644 +--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c ++++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c +@@ -632,9 +632,9 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) + } + + static uint32_t +-virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev, +- struct vring_packed *vring, +- uint16_t idx_hdr) ++virtio_user_handle_ctrl_msg_packed(struct virtio_user_dev *dev, ++ struct vring_packed *vring, ++ uint16_t idx_hdr) + { + struct virtio_net_ctrl_hdr *hdr; + virtio_net_ctrl_ack status = ~0; +@@ -671,6 +671,10 @@ virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev, + *(virtio_net_ctrl_ack *)(uintptr_t) + vring->desc_packed[idx_status].addr = status; + ++ /* Update used descriptor */ ++ vring->desc_packed[idx_hdr].id = vring->desc_packed[idx_status].id; ++ vring->desc_packed[idx_hdr].len = sizeof(status); ++ + return n_descs; + } + +@@ -679,24 +683,25 @@ virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx) + { + struct virtio_user_queue *vq = &dev->packed_queues[queue_idx]; + struct vring_packed *vring = &dev->packed_vrings[queue_idx]; +- uint16_t id, n_descs; ++ uint16_t n_descs; + + while (desc_is_avail(&vring->desc_packed[vq->used_idx], + vq->used_wrap_counter)) { +- id = vring->desc_packed[vq->used_idx].id; + +- n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id); ++ n_descs = virtio_user_handle_ctrl_msg_packed(dev, vring, ++ vq->used_idx); + +- do { +- vring->desc_packed[vq->used_idx].flags = +- VRING_DESC_F_AVAIL(vq->used_wrap_counter) | +- VRING_DESC_F_USED(vq->used_wrap_counter); +- if (++vq->used_idx >= dev->queue_size) { +- vq->used_idx -= dev->queue_size; +- vq->used_wrap_counter ^= 1; +- } +- n_descs--; +- } while (n_descs); ++ rte_smp_wmb(); ++ vring->desc_packed[vq->used_idx].flags = ++ VRING_DESC_F_WRITE | ++ VRING_DESC_F_AVAIL(vq->used_wrap_counter) | ++ VRING_DESC_F_USED(vq->used_wrap_counter); ++ ++ vq->used_idx += n_descs; ++ if (vq->used_idx >= dev->queue_size) { ++ vq->used_idx -= dev->queue_size; ++ vq->used_wrap_counter ^= 1; ++ } + } + } + +diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c +index c01f45cab..6423e1f61 100644 +--- a/drivers/net/virtio/virtio_user_ethdev.c ++++ b/drivers/net/virtio/virtio_user_ethdev.c +@@ -274,7 +274,6 @@ virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) + static void + virtio_user_setup_queue_packed(struct virtqueue *vq, + struct virtio_user_dev *dev) +- + { + uint16_t queue_idx = vq->vq_queue_index; + struct vring_packed *vring; +@@ -300,10 +299,8 @@ virtio_user_setup_queue_packed(struct virtqueue *vq, + dev->packed_queues[queue_idx].avail_wrap_counter = true; + dev->packed_queues[queue_idx].used_wrap_counter = true; + +- for (i = 0; i < vring->num; i++) { +- vring->desc_packed[i].flags = VRING_DESC_F_USED(1) | +- VRING_DESC_F_AVAIL(1); +- } ++ for (i = 0; i < vring->num; i++) ++ vring->desc_packed[i].flags = 0; + } + + static void +-- +2.21.0 + diff --git a/SOURCES/0018-vhost-batch-used-descs-chains-write-back-with-packed.patch b/SOURCES/0018-vhost-batch-used-descs-chains-write-back-with-packed.patch new file mode 100644 index 0000000..3551eed --- /dev/null +++ b/SOURCES/0018-vhost-batch-used-descs-chains-write-back-with-packed.patch @@ -0,0 +1,97 @@ +From f3bf9a1a9b1ad3419b436855306ad8b5d8efab2f Mon Sep 17 00:00:00 2001 +From: Maxime Coquelin +Date: Thu, 20 Dec 2018 17:47:55 +0100 +Subject: [PATCH 18/18] vhost: batch used descs chains write-back with packed + ring + +[ upstream commit b473ec1131ee44ee25e0536a04be65246b93f4f3 ] + +Instead of writing back descriptors chains in order, let's +write the first chain flags last in order to improve batching. + +Also, move the write barrier in logging cache sync, so that it +is done only when logging is enabled. It means there is now +one more barrier for split ring when logging is enabled. + +With Kernel's pktgen benchmark, ~3% performance gain is measured. + +Signed-off-by: Maxime Coquelin +Acked-by: Michael S. Tsirkin +Reviewed-by: Tiwei Bie +(cherry picked from commit b473ec1131ee44ee25e0536a04be65246b93f4f3) +Signed-off-by: Jens Freimann +--- + lib/librte_vhost/vhost.h | 7 ++----- + lib/librte_vhost/virtio_net.c | 19 ++++++++++++++++--- + 2 files changed, 18 insertions(+), 8 deletions(-) + +diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h +index 552b9298d..adc2fb78e 100644 +--- a/lib/librte_vhost/vhost.h ++++ b/lib/librte_vhost/vhost.h +@@ -456,12 +456,9 @@ vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) + !dev->log_base)) + return; + +- log_base = (unsigned long *)(uintptr_t)dev->log_base; ++ rte_smp_wmb(); + +- /* +- * It is expected a write memory barrier has been issued +- * before this function is called. +- */ ++ log_base = (unsigned long *)(uintptr_t)dev->log_base; + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; +diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c +index 15d682c3c..ec70ef947 100644 +--- a/lib/librte_vhost/virtio_net.c ++++ b/lib/librte_vhost/virtio_net.c +@@ -136,6 +136,8 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, + { + int i; + uint16_t used_idx = vq->last_used_idx; ++ uint16_t head_idx = vq->last_used_idx; ++ uint16_t head_flags = 0; + + /* Split loop in two to save memory barriers */ + for (i = 0; i < vq->shadow_used_idx; i++) { +@@ -165,12 +167,17 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, + flags &= ~VRING_DESC_F_AVAIL; + } + +- vq->desc_packed[vq->last_used_idx].flags = flags; ++ if (i > 0) { ++ vq->desc_packed[vq->last_used_idx].flags = flags; + +- vhost_log_cache_used_vring(dev, vq, ++ vhost_log_cache_used_vring(dev, vq, + vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); ++ } else { ++ head_idx = vq->last_used_idx; ++ head_flags = flags; ++ } + + vq->last_used_idx += vq->shadow_used_packed[i].count; + if (vq->last_used_idx >= vq->size) { +@@ -179,7 +186,13 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, + } + } + +- rte_smp_wmb(); ++ vq->desc_packed[head_idx].flags = head_flags; ++ ++ vhost_log_cache_used_vring(dev, vq, ++ head_idx * ++ sizeof(struct vring_packed_desc), ++ sizeof(struct vring_packed_desc)); ++ + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); + } +-- +2.21.0 + diff --git a/SOURCES/0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch b/SOURCES/0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch new file mode 100644 index 0000000..44f900c --- /dev/null +++ b/SOURCES/0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch @@ -0,0 +1,42 @@ +From daa23dec25e8e418cd4e921531c82b5aae39b362 Mon Sep 17 00:00:00 2001 +From: Tiwei Bie +Date: Tue, 19 Mar 2019 14:43:04 +0800 +Subject: [PATCH] net/virtio: fix interrupt helper for packed ring + +When disabling interrupt, the shadow event flags should also be +updated accordingly. The unnecessary wmb is also dropped. + +Fixes: e9f4feb7e622 ("net/virtio: add packed virtqueue helpers") +Cc: stable@dpdk.org + +Signed-off-by: Tiwei Bie +Reviewed-by: Maxime Coquelin +--- + drivers/net/virtio/virtqueue.h | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h +index 9e74b7bd0..c9f1c0afa 100644 +--- a/drivers/net/virtio/virtqueue.h ++++ b/drivers/net/virtio/virtqueue.h +@@ -296,12 +296,13 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) + static inline void + virtqueue_disable_intr_packed(struct virtqueue *vq) + { +- uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags; +- +- *event_flags = RING_EVENT_FLAGS_DISABLE; ++ if (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { ++ vq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE; ++ vq->ring_packed.driver_event->desc_event_flags = ++ vq->event_flags_shadow; ++ } + } + +- + /** + * Tell the backend not to interrupt us. + */ +-- +2.21.0 + diff --git a/SOURCES/0020-net-virtio-fix-calculation-of-device_event-ptr.patch b/SOURCES/0020-net-virtio-fix-calculation-of-device_event-ptr.patch new file mode 100644 index 0000000..1b1c985 --- /dev/null +++ b/SOURCES/0020-net-virtio-fix-calculation-of-device_event-ptr.patch @@ -0,0 +1,30 @@ +From f2e20b51ac6432390ea545e2b6247419dfcaab40 Mon Sep 17 00:00:00 2001 +From: Jens Freimann +Date: Mon, 16 Sep 2019 17:26:16 +0200 +Subject: [PATCH] net/virtio: fix calculation of device_event ptr + +Fix wrong pointer arithmetic. We only need to increment by 1 if we want +to advance it by the size of the driver event area. + +Signed-off-by: Jens Freimann +--- + drivers/net/virtio/virtio_ring.h | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h +index 1760823c6..fdc62194e 100644 +--- a/drivers/net/virtio/virtio_ring.h ++++ b/drivers/net/virtio/virtio_ring.h +@@ -165,8 +165,7 @@ vring_init_packed(struct vring_packed *vr, uint8_t *p, unsigned long align, + vr->driver_event = (struct vring_packed_desc_event *)(p + + vr->num * sizeof(struct vring_packed_desc)); + vr->device_event = (struct vring_packed_desc_event *) +- RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event + +- sizeof(struct vring_packed_desc_event)), align); ++ RTE_ALIGN_CEIL((uintptr_t)(vr->driver_event + 1), align); + } + + /* +-- +2.21.0 + diff --git a/SPECS/dpdk.spec b/SPECS/dpdk.spec index 8cdd3b8..c4e53f9 100644 --- a/SPECS/dpdk.spec +++ b/SPECS/dpdk.spec @@ -8,10 +8,10 @@ #% define date 20181127 #% define shortcommit0 %(c=%{commit0}; echo ${c:0:7}) -%define ver 18.11 +%define ver 18.11.2 %define rel 3 -%define srcname dpdk +%define srcname dpdk-stable Name: dpdk Version: %{ver} @@ -37,9 +37,32 @@ Source505: ppc_64-power8-linuxapp-gcc-config Source506: x86_64-native-linuxapp-gcc-config # Patches only in dpdk package -Patch0: 0001-bus-vmbus-fix-race-in-subchannel-creation.patch -Patch1: 0002-net-netvsc-enable-SR-IOV.patch -Patch2: 0003-net-netvsc-disable-multi-queue-on-older-servers.patch + + +# Bug 1525039 +Patch10: 0001-net-virtio-allocate-vrings-on-device-NUMA-node.patch + +# Bug 1700373 +Patch11: 0001-net-virtio-add-packed-virtqueue-defines.patch +Patch12: 0002-net-virtio-add-packed-virtqueue-helpers.patch +Patch13: 0003-net-virtio-vring-init-for-packed-queues.patch +Patch14: 0004-net-virtio-dump-packed-virtqueue-data.patch +Patch15: 0005-net-virtio-implement-Tx-path-for-packed-queues.patch +Patch16: 0006-net-virtio-implement-Rx-path-for-packed-queues.patch +Patch17: 0007-net-virtio-support-packed-queue-in-send-command.patch +Patch18: 0008-net-virtio-user-add-option-to-use-packed-queues.patch +Patch19: 0009-net-virtio-user-fail-if-cq-used-with-packed-vq.patch +Patch20: 0010-net-virtio-enable-packed-virtqueues-by-default.patch +Patch21: 0011-net-virtio-avoid-double-accounting-of-bytes.patch +Patch22: 0012-net-virtio-user-fix-packed-vq-option-parsing.patch +Patch23: 0013-net-virtio-user-fix-supported-features-list.patch +Patch24: 0014-net-virtio-check-head-desc-with-correct-wrap-counter.patch +Patch25: 0015-net-virtio-user-support-control-VQ-for-packed.patch +Patch26: 0016-net-virtio-fix-control-VQ.patch +Patch27: 0017-net-virtio-user-fix-control-VQ.patch +Patch28: 0018-vhost-batch-used-descs-chains-write-back-with-packed.patch +Patch29: 0019-net-virtio-fix-interrupt-helper-for-packed-ring.patch +Patch30: 0020-net-virtio-fix-calculation-of-device_event-ptr.patch Summary: Set of libraries and drivers for fast packet processing @@ -151,7 +174,20 @@ unset RTE_SDK RTE_INCLUDE RTE_TARGET # Avoid appending second -Wall to everything, it breaks upstream warning # disablers in makefiles. Strip expclit -march= from optflags since they # will only guarantee build failures, DPDK is picky with that. -export EXTRA_CFLAGS="$(echo %{optflags} | sed -e 's:-Wall::g' -e 's:-march=[[:alnum:]]* ::g') -Wformat -fPIC" +# Note: _hardening_ldflags has to go on the extra cflags line because dpdk is +# astoundingly convoluted in how it processes its linker flags. Fixing it in +# dpdk is the preferred solution, but adjusting to allow a gcc option in the +# ldflags, even when gcc is used as the linker, requires large tree-wide changes +touch obj.o +gcc -### obj.o 2>&1 | awk '/.*collect2.*/ { print $0}' | sed -e 's/\S*\.res\S*//g' -e 's/-z \S*//g' -e 's/[^ ]*\.o//g' -e 's/ /\n/g' | sort -u > ./noopts.txt +gcc -### $RPM_LD_FLAGS obj.o 2>&1 | awk '/.*collect2.*/ {print $0}' | sed -e 's/\S*\.res\S*//g' -e 's/-z \S*//g' -e 's/[^ ]*\.o//g' -e 's/ /\n/g' | sort -u > ./opts.txt +EXTRA_RPM_LDFLAGS=$(comm -13 ./noopts.txt ./opts.txt) +rm -f obj.o + +export EXTRA_CFLAGS="$(echo %{optflags} | sed -e 's:-Wall::g' -e 's:-march=[[:alnum:]]* ::g') -Wformat -fPIC %{_hardening_ldflags}" +export EXTRA_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-Wl,//g' -e's/-spec.*//') +export HOST_EXTRA_CFLAGS="$EXTRA_CFLAGS $EXTRA_RPM_LDFLAGS" +export EXTRA_HOST_LDFLAGS=$(echo %{__global_ldflags} | sed -e's/-spec.*//') # DPDK defaults to using builder-specific compiler flags. However, # the config has been changed by specifying CONFIG_RTE_MACHINE=default @@ -287,6 +323,32 @@ sed -i -e 's:-%{machine_tmpl}-:-%{machine}-:g' %{buildroot}/%{_sysconfdir}/profi %endif %changelog +* Mon Sep 16 2019 Jens Freimann - 18.11.2-3 +- Add fix for wrong pointer calculation to fix Covscan issue +- https://cov01.lab.eng.brq.redhat.com/covscanhub/task/135452/log/added.html + +* Wed Aug 14 2019 Jens Freimann - 18.11.2-2 +- Backport "net/virtio: allocate vrings on device NUMA node" (#1700373) + +* Thu Jun 27 2019 Timothy Redaelli - 18.11.2-1 +- Updated to DPDK 18.11.2 (#1713704) + +* Fri May 24 2019 Maxime Coquelin - 18.11.8 +- Backport "net/virtio: allocate vrings on device NUMA node" (#1525039) + +* Thu May 23 2019 Timothy Redaelli - 18.11-7 +- Really use the security cflags (copied from Fedora RPM) (#1703985) + +* Fri May 17 2019 Maxime Coquelin - 18.11-6 +- Fix basic CI gating test (#1682308) +- Add manual gating test (#1682308) + +* Tue Mar 26 2019 Maxime Coquelin - 18.11-5 +- Add basic CI gating test (#1682308) + +* Mon Feb 18 2019 Jens Freimann - 18.11-4 +- Set correct offload flags for virtio and allow jumbo frames (#1676646) + * Mon Feb 18 2019 Maxime Coquelin - 18.11.3 - Backport NETVSC pmd fixes (#1676534)