Import of kernel-6.12.0-55.37.1.el10_0

This commit is contained in:
eabdullin 2025-10-07 08:13:55 +00:00
parent 867e2c28af
commit 25c106a7ce
25 changed files with 852 additions and 421 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 0
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 55.34.1
RHEL_RELEASE = 55.37.1
#
# RHEL_REBASE_NUM

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@ -14,32 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
#define INDEX_FIELD_DEV GENMASK(31, 16)
#define INDEX_FIELD_BUS GENMASK(12, 5)
#define INDEX_FIELD_SLOT GENMASK(4, 0)
#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
static unsigned long ice_adapter_index(const struct pci_dev *pdev)
#define ICE_ADAPTER_INDEX_E825C \
(ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
static u64 ice_adapter_index(struct pci_dev *pdev)
{
unsigned int domain = pci_domain_nr(pdev->bus);
WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
switch (pdev->device) {
case ICE_DEV_ID_E825C_BACKPLANE:
case ICE_DEV_ID_E825C_QSFP:
case ICE_DEV_ID_E825C_SFP:
case ICE_DEV_ID_E825C_SGMII:
return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
/* E825C devices have multiple NACs which are connected to the
* same clock source, and which must share the same
* ice_adapter structure. We can't use the serial number since
* each NAC has its own NVM generated with its own unique
* Device Serial Number. Instead, rely on the embedded nature
* of the E825C devices, and use a fixed index. This relies on
* the fact that all E825C physical functions in a given
* system are part of the same overall device.
*/
return ICE_ADAPTER_INDEX_E825C;
default:
return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
}
}
static struct ice_adapter *ice_adapter_new(void)
static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
{
u64 index = ice_adapter_index(pdev);
#if BITS_PER_LONG == 64
return index;
#else
return (u32)index ^ (u32)(index >> 32);
#endif
}
static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@ -47,6 +59,7 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
refcount_set(&adapter->refcount, 1);
@ -77,23 +90,25 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
unsigned long index;
int err;
index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
if (err == -EBUSY) {
adapter = xa_load(&ice_adapters, index);
refcount_inc(&adapter->refcount);
WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
if (err)
return ERR_PTR(err);
adapter = ice_adapter_new();
adapter = ice_adapter_new(pdev);
if (!adapter)
return ERR_PTR(-ENOMEM);
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
@ -110,11 +125,12 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
void ice_adapter_put(const struct pci_dev *pdev)
void ice_adapter_put(struct pci_dev *pdev)
{
unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
unsigned long index;
index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))

View File

@ -32,6 +32,7 @@ struct ice_port_list {
* @refcount: Reference count. struct ice_pf objects hold the references.
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
* @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
@ -40,9 +41,10 @@ struct ice_adapter {
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
u64 index;
};
struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
void ice_adapter_put(const struct pci_dev *pdev);
struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */

View File

@ -4217,8 +4217,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_txq_stats *txq_stats;
struct stmmac_tx_queue *tx_q;
u32 pay_len, mss, queue;
dma_addr_t tso_des, des;
u8 proto_hdr_len, hdr;
dma_addr_t des;
bool set_ic;
int i;
@ -4314,14 +4314,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* If needed take extra descriptors to fill the remaining payload */
tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
tso_des = des;
} else {
stmmac_set_desc_addr(priv, first, des);
tmp_pay_len = pay_len;
des += proto_hdr_len;
tso_des = des + proto_hdr_len;
pay_len = 0;
}
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
stmmac_tso_allocator(priv, tso_des, tmp_pay_len, (nfrags == 0), queue);
/* In case two or more DMA transmit descriptors are allocated for this
* non-paged SKB data, the DMA buffer address should be saved to

View File

@ -76,6 +76,13 @@ unsigned long sclp_console_full;
/* The currently active SCLP command word. */
static sclp_cmdw_t active_cmd;
static inline struct sccb_header *sclpint_to_sccb(u32 sccb_int)
{
if (sccb_int)
return __va(sccb_int);
return NULL;
}
static inline void sclp_trace(int prio, char *id, u32 a, u64 b, bool err)
{
struct sclp_trace_entry e;
@ -620,7 +627,7 @@ __sclp_find_req(u32 sccb)
static bool ok_response(u32 sccb_int, sclp_cmdw_t cmd)
{
struct sccb_header *sccb = (struct sccb_header *)__va(sccb_int);
struct sccb_header *sccb = sclpint_to_sccb(sccb_int);
struct evbuf_header *evbuf;
u16 response;
@ -659,7 +666,7 @@ static void sclp_interrupt_handler(struct ext_code ext_code,
/* INT: Interrupt received (a=intparm, b=cmd) */
sclp_trace_sccb(0, "INT", param32, active_cmd, active_cmd,
(struct sccb_header *)__va(finished_sccb),
sclpint_to_sccb(finished_sccb),
!ok_response(finished_sccb, active_cmd));
if (finished_sccb) {

View File

@ -55,15 +55,26 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super,
};
static struct inode *anon_inode_make_secure_inode(
const char *name,
const struct inode *context_inode)
/**
* anon_inode_make_secure_inode - allocate an anonymous inode with security context
* @sb: [in] Superblock to allocate from
* @name: [in] Name of the class of the newfile (e.g., "secretmem")
* @context_inode:
* [in] Optional parent inode for security inheritance
*
* The function ensures proper security initialization through the LSM hook
* security_inode_init_security_anon().
*
* Return: Pointer to new inode on success, ERR_PTR on failure.
*/
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode)
{
struct inode *inode;
const struct qstr qname = QSTR_INIT(name, strlen(name));
int error;
inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
inode = alloc_anon_inode(sb);
if (IS_ERR(inode))
return inode;
inode->i_flags &= ~S_PRIVATE;
@ -74,6 +85,7 @@ static struct inode *anon_inode_make_secure_inode(
}
return inode;
}
EXPORT_SYMBOL_GPL(anon_inode_make_secure_inode);
static struct file *__anon_inode_getfile(const char *name,
const struct file_operations *fops,
@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT);
if (make_inode) {
inode = anon_inode_make_secure_inode(name, context_inode);
inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
name, context_inode);
if (IS_ERR(inode)) {
file = ERR_CAST(inode);
goto err;

View File

@ -362,6 +362,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbdirect_socket_parameters *sp;
#endif
/* channel info will be printed as a part of sessions below */
if (SERVER_IS_CHAN(server))
continue;
@ -383,25 +387,26 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\nSMBDirect transport not available");
goto skip_rdma;
}
sp = &server->smbd_conn->socket.parameters;
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
"transport status: %x",
server->smbd_conn->protocol,
server->smbd_conn->transport_status);
server->smbd_conn->socket.status);
seq_printf(m, "\nConn receive_credit_max: %x "
"send_credit_target: %x max_send_size: %x",
server->smbd_conn->receive_credit_max,
server->smbd_conn->send_credit_target,
server->smbd_conn->max_send_size);
sp->recv_credit_max,
sp->send_credit_target,
sp->max_send_size);
seq_printf(m, "\nConn max_fragmented_recv_size: %x "
"max_fragmented_send_size: %x max_receive_size:%x",
server->smbd_conn->max_fragmented_recv_size,
server->smbd_conn->max_fragmented_send_size,
server->smbd_conn->max_receive_size);
sp->max_fragmented_recv_size,
sp->max_fragmented_send_size,
sp->max_recv_size);
seq_printf(m, "\nConn keep_alive_interval: %x "
"max_readwrite_size: %x rdma_readwrite_threshold: %x",
server->smbd_conn->keep_alive_interval,
server->smbd_conn->max_readwrite_size,
sp->keepalive_interval_msec * 1000,
sp->max_read_write_size,
server->smbd_conn->rdma_readwrite_threshold);
seq_printf(m, "\nDebug count_get_receive_buffer: %x "
"count_put_receive_buffer: %x count_send_empty: %x",

View File

@ -3051,7 +3051,8 @@ void cifs_oplock_break(struct work_struct *work)
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
struct inode *inode = d_inode(cfile->dentry);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct super_block *sb = inode->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
@ -3061,6 +3062,12 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
/*
* Hold a reference to the superblock to prevent it and its inodes from
* being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
* may release the last reference to the sb and trigger inode eviction.
*/
cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@ -3133,6 +3140,7 @@ oplock_break_ack:
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
cifs_sb_deactive(sb);
}
static int cifs_swap_activate(struct swap_info_struct *sis,

View File

@ -496,6 +496,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
struct smbdirect_socket_parameters *sp =
&server->smbd_conn->socket.parameters;
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@ -503,12 +506,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
wsize = min_t(unsigned int,
wsize,
server->smbd_conn->max_fragmented_send_size -
sp->max_fragmented_send_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
wsize = min_t(unsigned int,
wsize, server->smbd_conn->max_readwrite_size);
wsize, sp->max_read_write_size);
}
#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
@ -544,6 +547,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
struct smbdirect_socket_parameters *sp =
&server->smbd_conn->socket.parameters;
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@ -551,12 +557,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
rsize = min_t(unsigned int,
rsize,
server->smbd_conn->max_fragmented_recv_size -
sp->max_fragmented_recv_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
rsize = min_t(unsigned int,
rsize, server->smbd_conn->max_readwrite_size);
rsize, sp->max_read_write_size);
}
#endif

View File

@ -36,6 +36,7 @@
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
#include "../common/smbdirect/smbdirect.h"
#include "smbdirect.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
@ -4427,10 +4428,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
* smbd_buffer_descriptor_v1 to the end of read request
* smbdirect_buffer_descriptor_v1 to the end of read request
*/
if (rdata && smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
@ -4444,8 +4445,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->ReadChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
req->ReadChannelInfoLength =
cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(rdata->mr->mr->iova);
v1->token = cpu_to_le32(rdata->mr->mr->rkey);
v1->length = cpu_to_le32(rdata->mr->mr->length);
@ -4957,10 +4958,10 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a server RDMA read, fill in and append
* smbd_buffer_descriptor_v1 to the end of write request
* smbdirect_buffer_descriptor_v1 to the end of write request
*/
if (smb3_use_rdma_offload(io_parms)) {
struct smbd_buffer_descriptor_v1 *v1;
struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
@ -4979,8 +4980,8 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
req->WriteChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
req->WriteChannelInfoLength =
cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,9 @@
#include <rdma/rdma_cm.h>
#include <linux/mempool.h>
#include "../common/smbdirect/smbdirect.h"
#include "../common/smbdirect/smbdirect_socket.h"
extern int rdma_readwrite_threshold;
extern int smbd_max_frmr_depth;
extern int smbd_keep_alive_interval;
@ -50,14 +53,8 @@ enum smbd_connection_status {
* 5. mempools for allocating packets
*/
struct smbd_connection {
enum smbd_connection_status transport_status;
struct smbdirect_socket socket;
/* RDMA related */
struct rdma_cm_id *id;
struct ib_qp_init_attr qp_attr;
struct ib_pd *pd;
struct ib_cq *send_cq, *recv_cq;
struct ib_device_attr dev_attr;
int ri_rc;
struct completion ri_done;
wait_queue_head_t conn_wait;
@ -72,15 +69,7 @@ struct smbd_connection {
spinlock_t lock_new_credits_offered;
int new_credits_offered;
/* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
int receive_credit_max;
int send_credit_target;
int max_send_size;
int max_fragmented_recv_size;
int max_fragmented_send_size;
int max_receive_size;
int keep_alive_interval;
int max_readwrite_size;
/* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
enum keep_alive_status keep_alive_requested;
int protocol;
atomic_t send_credits;
@ -177,54 +166,6 @@ enum smbd_message_type {
SMBD_TRANSFER_DATA,
};
#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
struct smbd_negotiate_req {
__le16 min_version;
__le16 max_version;
__le16 reserved;
__le16 credits_requested;
__le32 preferred_send_size;
__le32 max_receive_size;
__le32 max_fragmented_size;
} __packed;
/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
struct smbd_negotiate_resp {
__le16 min_version;
__le16 max_version;
__le16 negotiated_version;
__le16 reserved;
__le16 credits_requested;
__le16 credits_granted;
__le32 status;
__le32 max_readwrite_size;
__le32 preferred_send_size;
__le32 max_receive_size;
__le32 max_fragmented_size;
} __packed;
/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
struct smbd_data_transfer {
__le16 credits_requested;
__le16 credits_granted;
__le16 flags;
__le16 reserved;
__le32 remaining_data_length;
__le32 data_offset;
__le32 data_length;
__le32 padding;
__u8 buffer[];
} __packed;
/* The packet fields for a registered RDMA buffer */
struct smbd_buffer_descriptor_v1 {
__le64 offset;
__le32 token;
__le32 length;
} __packed;
/* Maximum number of SGEs used by smbdirect.c in any send work request */
#define SMBDIRECT_MAX_SEND_SGE 6

View File

@ -0,0 +1,37 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (C) 2017, Microsoft Corporation.
* Copyright (C) 2018, LG Electronics.
*/
#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */
struct smbdirect_buffer_descriptor_v1 {
__le64 offset;
__le32 token;
__le32 length;
} __packed;
/*
* Connection parameters mostly from [MS-SMBD] 3.1.1.1
*
* These are setup and negotiated at the beginning of a
* connection and remain constant unless explicitly changed.
*
* Some values are important for the upper layer.
*/
struct smbdirect_socket_parameters {
__u16 recv_credit_max;
__u16 send_credit_target;
__u32 max_send_size;
__u32 max_fragmented_send_size;
__u32 max_recv_size;
__u32 max_fragmented_recv_size;
__u32 max_read_write_size;
__u32 keepalive_interval_msec;
__u32 keepalive_timeout_msec;
} __packed;
#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */

View File

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2017 Stefan Metzmacher
*/
#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
#define SMBDIRECT_V1 0x0100
/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
struct smbdirect_negotiate_req {
__le16 min_version;
__le16 max_version;
__le16 reserved;
__le16 credits_requested;
__le32 preferred_send_size;
__le32 max_receive_size;
__le32 max_fragmented_size;
} __packed;
/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
struct smbdirect_negotiate_resp {
__le16 min_version;
__le16 max_version;
__le16 negotiated_version;
__le16 reserved;
__le16 credits_requested;
__le16 credits_granted;
__le32 status;
__le32 max_readwrite_size;
__le32 preferred_send_size;
__le32 max_receive_size;
__le32 max_fragmented_size;
} __packed;
#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14
#define SMBDIRECT_DATA_OFFSET 0x18
#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001
/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
struct smbdirect_data_transfer {
__le16 credits_requested;
__le16 credits_granted;
__le16 flags;
__le16 reserved;
__le32 remaining_data_length;
__le32 data_offset;
__le32 data_length;
__le32 padding;
__u8 buffer[];
} __packed;
#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */

View File

@ -0,0 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Copyright (c) 2025 Stefan Metzmacher
*/
#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
enum smbdirect_socket_status {
SMBDIRECT_SOCKET_CREATED,
SMBDIRECT_SOCKET_CONNECTING,
SMBDIRECT_SOCKET_CONNECTED,
SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
SMBDIRECT_SOCKET_DISCONNECTING,
SMBDIRECT_SOCKET_DISCONNECTED,
SMBDIRECT_SOCKET_DESTROYED
};
struct smbdirect_socket {
enum smbdirect_socket_status status;
/* RDMA related */
struct {
struct rdma_cm_id *cm_id;
} rdma;
/* IB verbs related */
struct {
struct ib_pd *pd;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
/*
* shortcuts for rdma.cm_id->{qp,device};
*/
struct ib_qp *qp;
struct ib_device *dev;
} ib;
struct smbdirect_socket_parameters parameters;
};
#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */

View File

@ -3405,6 +3405,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
extern int simple_nosetlease(struct file *, int, struct file_lease **, void **);
extern const struct dentry_operations simple_dentry_operations;

View File

@ -41,6 +41,7 @@
#include <net/inet_ecn.h>
#include <net/dst.h>
#include <net/mptcp.h>
#include RH_KABI_HIDE_INCLUDE(<net/xfrm.h>)
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@ -683,6 +684,19 @@ void tcp_fin(struct sock *sk);
void tcp_check_space(struct sock *sk);
void tcp_sack_compress_send_ack(struct sock *sk);
static inline void tcp_cleanup_skb(struct sk_buff *skb)
{
skb_dst_drop(skb);
secpath_reset(skb);
}
static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
{
DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
/* tcp_timer.c */
void tcp_init_xmit_timers(struct sock *);
static inline void tcp_clear_xmit_timers(struct sock *sk)

View File

@ -337,6 +337,7 @@ int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags)
goto done_unlock;
}
req->flags |= REQ_F_ASYNC_DATA;
req->async_data = ifd;
ifd->q = futex_q_init;
ifd->q.bitset = iof->futex_mask;
@ -359,6 +360,8 @@ done:
if (ret < 0)
req_set_fail(req);
io_req_set_res(req, ret, 0);
req->async_data = NULL;
req->flags &= ~REQ_F_ASYNC_DATA;
kfree(ifd);
return IOU_OK;
}

View File

@ -195,19 +195,11 @@ static struct file *secretmem_file_create(unsigned long flags)
struct file *file;
struct inode *inode;
const char *anon_name = "[secretmem]";
const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
int err;
inode = alloc_anon_inode(secretmem_mnt->mnt_sb);
inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL);
if (IS_ERR(inode))
return ERR_CAST(inode);
err = security_inode_init_security_anon(inode, &qname, NULL);
if (err) {
file = ERR_PTR(err);
goto err_free_inode;
}
file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
O_RDWR, &secretmem_fops);
if (IS_ERR(file))

View File

@ -178,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
if (!skb)
return;
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
/* segs_in has been initialized to 1 in tcp_create_openreq_child().
* Hence, reset segs_in to 0 before calling tcp_segs_in()
* to avoid double counting. Also, tcp_segs_in() expects
@ -195,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
tp->syn_data_acked = 1;
/* u64_stats_update_begin(&tp->syncp) not needed here,

View File

@ -4970,7 +4970,7 @@ static void tcp_ofo_queue(struct sock *sk)
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
else
kfree_skb_partial(skb, fragstolen);
@ -5162,7 +5162,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
skb, fragstolen)) ? 1 : 0;
tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
if (!eaten) {
__skb_queue_tail(&sk->sk_receive_queue, skb);
tcp_add_receive_queue(sk, skb);
skb_set_owner_r(skb, sk);
}
return eaten;
@ -5245,7 +5245,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
return;
}
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
reason = SKB_DROP_REASON_NOT_SPECIFIED;
@ -6214,7 +6214,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
__skb_pull(skb, tcp_header_len);
eaten = tcp_queue_rcv(sk, skb, &fragstolen);

View File

@ -2024,7 +2024,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
*/
skb_condense(skb);
skb_dst_drop(skb);
tcp_cleanup_skb(skb);
if (unlikely(tcp_checksum_complete(skb))) {
bh_unlock_sock(sk);

View File

@ -1749,6 +1749,9 @@ int decrypt_skb(struct sock *sk, struct scatterlist *sgout)
return tls_decrypt_sg(sk, NULL, sgout, &darg);
}
/* All records returned from a recvmsg() call must have the same type.
* 0 is not a valid content type. Use it as "no type reported, yet".
*/
static int tls_record_content_type(struct msghdr *msg, struct tls_msg *tlm,
u8 *control)
{
@ -1992,8 +1995,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0)
goto end;
/* process_rx_list() will set @control if it processed any records */
copied = err;
if (len <= copied || (copied && control != TLS_RECORD_TYPE_DATA) || rx_more)
if (len <= copied || rx_more ||
(control && control != TLS_RECORD_TYPE_DATA))
goto end;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);

View File

@ -179,13 +179,12 @@ static int tls_send_cmsg(int fd, unsigned char record_type,
return sendmsg(fd, &msg, flags);
}
static int tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char record_type,
void *data, size_t len, int flags)
static int __tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char *ctype,
void *data, size_t len, int flags)
{
char cbuf[CMSG_SPACE(sizeof(char))];
struct cmsghdr *cmsg;
unsigned char ctype;
struct msghdr msg;
struct iovec vec;
int n;
@ -204,7 +203,20 @@ static int tls_recv_cmsg(struct __test_metadata *_metadata,
EXPECT_NE(cmsg, NULL);
EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
ctype = *((unsigned char *)CMSG_DATA(cmsg));
if (ctype)
*ctype = *((unsigned char *)CMSG_DATA(cmsg));
return n;
}
static int tls_recv_cmsg(struct __test_metadata *_metadata,
int fd, unsigned char record_type,
void *data, size_t len, int flags)
{
unsigned char ctype;
int n;
n = __tls_recv_cmsg(_metadata, fd, &ctype, data, len, flags);
EXPECT_EQ(ctype, record_type);
return n;
@ -1668,6 +1680,283 @@ TEST_F(tls, recv_efault)
EXPECT_EQ(memcmp(rec2, recv_mem + 9, ret - 9), 0);
}
struct raw_rec {
unsigned int plain_len;
unsigned char plain_data[100];
unsigned int cipher_len;
unsigned char cipher_data[128];
};
/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: 'Hello world' */
static const struct raw_rec id0_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0xa2, 0x33,
0xde, 0x8d, 0x94, 0xf0, 0x29, 0x6c, 0xb1, 0xaf,
0x6a, 0x75, 0xb2, 0x93, 0xad, 0x45, 0xd5, 0xfd,
0x03, 0x51, 0x57, 0x8f, 0xf9, 0xcc, 0x3b, 0x42,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:0, plaintext: '' */
static const struct raw_rec id0_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x38, 0x7b,
0xa6, 0x1c, 0xdd, 0xa7, 0x19, 0x33, 0xab, 0xae,
0x88, 0xe1, 0xd2, 0x08, 0x4f,
},
};
/* TLS 1.2, AES_CCM, data, seqno:0, plaintext: '' */
static const struct raw_rec id0_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x37, 0x90,
0x70, 0x45, 0x89, 0xfb, 0x5c, 0xc7, 0x89, 0x03,
0x68, 0x80, 0xd3, 0xd8, 0xcc,
},
};
/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: 'Hello world' */
static const struct raw_rec id1_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x1a, 0x9c,
0xd0, 0xa8, 0x9a, 0xd6, 0x69, 0xd6, 0x1a, 0xe3,
0xb5, 0x1f, 0x0d, 0x2c, 0xe2, 0x97, 0x46, 0xff,
0x2b, 0xcc, 0x5a, 0xc4, 0xa3, 0xb9, 0xef, 0xba,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:1, plaintext: '' */
static const struct raw_rec id1_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x3e, 0xf0, 0xfe,
0xee, 0xd9, 0xe2, 0x5d, 0xc7, 0x11, 0x4c, 0xe6,
0xb4, 0x7e, 0xef, 0x40, 0x2b,
},
};
/* TLS 1.2, AES_CCM, data, seqno:1, plaintext: '' */
static const struct raw_rec id1_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0xfc, 0x86,
0xc8, 0xf0, 0x55, 0xf9, 0x47, 0x3f, 0x74, 0xdc,
0xc9, 0xbf, 0xfe, 0x5b, 0xb1,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: 'Hello world' */
static const struct raw_rec id2_ctrl_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
0x2a, 0x04, 0x11, 0x3d, 0xf8, 0x64, 0x5f, 0x36,
0x8b, 0xa8, 0xee, 0x4c, 0x6d, 0x62, 0xa5, 0x00,
},
};
/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: 'Hello world' */
static const struct raw_rec id2_data_l11 = {
.plain_len = 11,
.plain_data = {
0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f,
0x72, 0x6c, 0x64,
},
.cipher_len = 40,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x23, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xe5, 0x3d, 0x19,
0x3d, 0xca, 0xb8, 0x16, 0xb6, 0xff, 0x79, 0x87,
0x8e, 0xa1, 0xd0, 0xcd, 0x33, 0xb5, 0x86, 0x2b,
0x17, 0xf1, 0x52, 0x2a, 0x55, 0x62, 0x65, 0x11,
},
};
/* TLS 1.2, AES_CCM, ctrl, seqno:2, plaintext: '' */
static const struct raw_rec id2_ctrl_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x16, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xdc, 0x5c, 0x0e,
0x41, 0xdd, 0xba, 0xd3, 0xcc, 0xcf, 0x6d, 0xd9,
0x06, 0xdb, 0x79, 0xe5, 0x5d,
},
};
/* TLS 1.2, AES_CCM, data, seqno:2, plaintext: '' */
static const struct raw_rec id2_data_l0 = {
.plain_len = 0,
.plain_data = {
},
.cipher_len = 29,
.cipher_data = {
0x17, 0x03, 0x03, 0x00, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0xc3, 0xca, 0x26,
0x22, 0xe4, 0x25, 0xfb, 0x5f, 0x6d, 0xbf, 0x83,
0x30, 0x48, 0x69, 0x1a, 0x47,
},
};
FIXTURE(zero_len)
{
int fd, cfd;
bool notls;
};
FIXTURE_VARIANT(zero_len)
{
const struct raw_rec *recs[4];
ssize_t recv_ret[4];
};
FIXTURE_VARIANT_ADD(zero_len, data_data_data)
{
.recs = { &id0_data_l11, &id1_data_l11, &id2_data_l11, },
.recv_ret = { 33, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, data_0ctrl_data)
{
.recs = { &id0_data_l11, &id1_ctrl_l0, &id2_data_l11, },
.recv_ret = { 11, 0, 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0data)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_data_l0, },
.recv_ret = { -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_ctrl)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l11, },
.recv_ret = { 0, 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_0ctrl)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_ctrl_l0, },
.recv_ret = { 0, 0, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0ctrl_0ctrl_0ctrl)
{
.recs = { &id0_ctrl_l0, &id1_ctrl_l0, &id2_ctrl_l0, },
.recv_ret = { 0, 0, 0, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, 0data_0data_data)
{
.recs = { &id0_data_l0, &id1_data_l0, &id2_data_l11, },
.recv_ret = { 11, -EAGAIN, },
};
FIXTURE_VARIANT_ADD(zero_len, data_0data_0data)
{
.recs = { &id0_data_l11, &id1_data_l0, &id2_data_l0, },
.recv_ret = { 11, -EAGAIN, },
};
FIXTURE_SETUP(zero_len)
{
struct tls_crypto_info_keys tls12;
int ret;
tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128, &tls12);
ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
if (self->notls)
return;
/* Don't install keys on fd, we'll send raw records */
ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
}
FIXTURE_TEARDOWN(zero_len)
{
close(self->fd);
close(self->cfd);
}
TEST_F(zero_len, test)
{
const struct raw_rec *const *rec;
unsigned char buf[128];
int rec_off;
int i;
for (i = 0; i < 4 && variant->recs[i]; i++)
EXPECT_EQ(send(self->fd, variant->recs[i]->cipher_data,
variant->recs[i]->cipher_len, 0),
variant->recs[i]->cipher_len);
rec = &variant->recs[0];
rec_off = 0;
for (i = 0; i < 4; i++) {
int j, ret;
ret = variant->recv_ret[i] >= 0 ? variant->recv_ret[i] : -1;
EXPECT_EQ(__tls_recv_cmsg(_metadata, self->cfd, NULL,
buf, sizeof(buf), MSG_DONTWAIT), ret);
if (ret == -1)
EXPECT_EQ(errno, -variant->recv_ret[i]);
if (variant->recv_ret[i] == -EAGAIN)
break;
for (j = 0; j < ret; j++) {
while (rec_off == (*rec)->plain_len) {
rec++;
rec_off = 0;
}
EXPECT_EQ(buf[j], (*rec)->plain_data[rec_off]);
rec_off++;
}
}
};
FIXTURE(tls_err)
{
int fd, cfd;