Import of kernel-4.18.0-553.82.1.el8_10

This commit is contained in:
almalinux-bot-kernel 2025-11-08 04:06:22 +00:00
parent b0aa308873
commit baad5c6bff
55 changed files with 2879 additions and 1309 deletions

View File

@ -4757,6 +4757,20 @@
rhash_entries= [KNL,NET] rhash_entries= [KNL,NET]
Set number of hash buckets for route cache Set number of hash buckets for route cache
rh_waived=
Enable waived items in RHEL.
Some specific features, or security mitigations, can be
waived (toggled on/off) on demand in RHEL. However,
waiving any of these items should be used judiciously,
as it generally means the system might end up being
considered insecure or even out-of-scope for support.
Format: <item-1>,<item-2>...<item-n>
Use 'rh_waived' to enable all waived features listed at
Documentation/admin-guide/rh-waived-features.rst
ring3mwait=disable ring3mwait=disable
[KNL] Disable ring 3 MONITOR/MWAIT feature on supported [KNL] Disable ring 3 MONITOR/MWAIT feature on supported
CPUs. CPUs.

View File

@ -0,0 +1,35 @@
.. _rh_waived_items:
====================
Red Hat Waived Items
====================
Waived Items is a mechanism offered by Red Hat which allows customers to "waive"
and utilize features that are not enabled by default as these are considered as
unmaintained, insecure, rudimentary, or deprecated, but are shipped with the
RHEL kernel for customer's convinience only.
Waived Items can range from features that can be enabled on demand to specific
security mitigations that can be disabled on demand.
To explicitly "waive" any of these items, RHEL offers the ``rh_waived``
kernel boot parameter. To allow set of waived items, append
``rh_waived=<item name>,...,<item name>`` to the kernel
cmdline.
Appending ``rh_waived=features`` will waive all features listed below,
and appending ``rh_waived=cves`` will waive all security mitigations
listed below.
The waived items listed in the next session follow the pattern below:
- item name
item description
List of Red Hat Waived Items
============================
- CVE-2025-38085
Waiving this mitigation can help with addressing perceived performace
degradation on some workloads utilizing huge-pages [1] at the expense
of re-introducing conditions to allow for the data race that leads to
the enumerated common vulnerability.
[1] https://access.redhat.com/solutions/7132440

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
# #
# Use this spot to avoid future merge conflicts. # Use this spot to avoid future merge conflicts.
# Do not trim this comment. # Do not trim this comment.
RHEL_RELEASE = 553.81.1 RHEL_RELEASE = 553.82.1
# #
# ZSTREAM # ZSTREAM

View File

@ -1339,7 +1339,8 @@ static int veth_alloc_queues(struct net_device *dev)
struct veth_priv *priv = netdev_priv(dev); struct veth_priv *priv = netdev_priv(dev);
int i; int i;
priv->rq = kcalloc(dev->num_rx_queues, sizeof(*priv->rq), GFP_KERNEL); priv->rq = kvcalloc(dev->num_rx_queues, sizeof(*priv->rq),
GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
if (!priv->rq) if (!priv->rq)
return -ENOMEM; return -ENOMEM;
@ -1355,7 +1356,7 @@ static void veth_free_queues(struct net_device *dev)
{ {
struct veth_priv *priv = netdev_priv(dev); struct veth_priv *priv = netdev_priv(dev);
kfree(priv->rq); kvfree(priv->rq);
} }
static int veth_dev_init(struct net_device *dev) static int veth_dev_init(struct net_device *dev)

View File

@ -86,12 +86,23 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length); rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off; rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return -1;
}
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) || sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
sizeof(rfc1042_header)) && sizeof(rfc1042_header)) &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP && ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) { ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/* /*
* Replace the 803 header and rfc1042 header (llc/snap) with an * Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type * EthernetII header, keep the src/dst and snap_type
@ -194,7 +205,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset; rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_offset;
if ((rx_pkt_offset + rx_pkt_length) > (u16) skb->len) { if ((rx_pkt_offset + rx_pkt_length) > skb->len ||
sizeof(rx_pkt_hdr->eth803_hdr) + rx_pkt_offset > skb->len) {
mwifiex_dbg(adapter, ERROR, mwifiex_dbg(adapter, ERROR,
"wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n", "wrong rx packet: len=%d, rx_pkt_offset=%d, rx_pkt_length=%d\n",
skb->len, rx_pkt_offset, rx_pkt_length); skb->len, rx_pkt_offset, rx_pkt_length);

View File

@ -103,6 +103,16 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
return; return;
} }
if (sizeof(*rx_pkt_hdr) +
le16_to_cpu(uap_rx_pd->rx_pkt_offset) > skb->len) {
mwifiex_dbg(adapter, ERROR,
"wrong rx packet offset: len=%d,rx_pkt_offset=%d\n",
skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return;
}
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header, if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) || sizeof(bridge_tunnel_header))) ||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header, (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
@ -367,6 +377,16 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type); rx_pkt_type = le16_to_cpu(uap_rx_pd->rx_pkt_type);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset); rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
if (le16_to_cpu(uap_rx_pd->rx_pkt_offset) +
sizeof(rx_pkt_hdr->eth803_hdr) > skb->len) {
mwifiex_dbg(adapter, ERROR,
"wrong rx packet for struct ethhdr: len=%d, offset=%d\n",
skb->len, le16_to_cpu(uap_rx_pd->rx_pkt_offset));
priv->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return 0;
}
ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source); ether_addr_copy(ta, rx_pkt_hdr->eth803_hdr.h_source);
if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) + if ((le16_to_cpu(uap_rx_pd->rx_pkt_offset) +

View File

@ -393,11 +393,15 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
} }
rx_pd = (struct rxpd *)skb->data; rx_pd = (struct rxpd *)skb->data;
pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
if (pkt_len < sizeof(struct ieee80211_hdr) + sizeof(pkt_len)) {
mwifiex_dbg(priv->adapter, ERROR, "invalid rx_pkt_length");
return -1;
}
skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset)); skb_pull(skb, le16_to_cpu(rx_pd->rx_pkt_offset));
skb_pull(skb, sizeof(pkt_len)); skb_pull(skb, sizeof(pkt_len));
pkt_len -= sizeof(pkt_len);
pkt_len = le16_to_cpu(rx_pd->rx_pkt_length);
ieee_hdr = (void *)skb->data; ieee_hdr = (void *)skb->data;
if (ieee80211_is_mgmt(ieee_hdr->frame_control)) { if (ieee80211_is_mgmt(ieee_hdr->frame_control)) {
@ -410,7 +414,7 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
skb->data + sizeof(struct ieee80211_hdr), skb->data + sizeof(struct ieee80211_hdr),
pkt_len - sizeof(struct ieee80211_hdr)); pkt_len - sizeof(struct ieee80211_hdr));
pkt_len -= ETH_ALEN + sizeof(pkt_len); pkt_len -= ETH_ALEN;
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len); rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq, cfg80211_rx_mgmt(&priv->wdev, priv->roc_cfg.chan.center_freq,

View File

@ -49,14 +49,14 @@ cifs_dump_mem(char *label, void *data, int length)
void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) void cifs_dump_detail(void *buf, struct TCP_Server_Info *server)
{ {
#ifdef CONFIG_CIFS_DEBUG2 #ifdef CONFIG_CIFS_DEBUG2
struct smb_hdr *smb = (struct smb_hdr *)buf; struct smb_hdr *smb = buf;
cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n", cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d Wct: %d\n",
smb->Command, smb->Status.CifsError, smb->Flags, smb->Command, smb->Status.CifsError, smb->Flags,
smb->Flags2, smb->Mid, smb->Pid, smb->WordCount); smb->Flags2, smb->Mid, smb->Pid, smb->WordCount);
if (!server->ops->check_message(buf, server->total_read, server)) { if (!server->ops->check_message(buf, server->total_read, server)) {
cifs_dbg(VFS, "smb buf %p len %u\n", smb, cifs_dbg(VFS, "smb buf %p len %u\n", smb,
server->ops->calc_smb_size(smb, server)); server->ops->calc_smb_size(smb));
} }
#endif /* CONFIG_CIFS_DEBUG2 */ #endif /* CONFIG_CIFS_DEBUG2 */
} }
@ -70,7 +70,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
return; return;
cifs_dbg(VFS, "Dump pending requests:\n"); cifs_dbg(VFS, "Dump pending requests:\n");
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state, mid_entry->mid_state,
@ -93,7 +93,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
mid_entry->resp_buf, 62); mid_entry->resp_buf, 62);
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
#endif /* CONFIG_CIFS_DEBUG2 */ #endif /* CONFIG_CIFS_DEBUG2 */
} }
@ -109,7 +109,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics), le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics),
le32_to_cpu(tcon->fsAttrInfo.Attributes), le32_to_cpu(tcon->fsAttrInfo.Attributes),
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength), le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength),
tcon->tidStatus); tcon->status);
if (dev_type == FILE_DEVICE_DISK) if (dev_type == FILE_DEVICE_DISK)
seq_puts(m, " type: DISK "); seq_puts(m, " type: DISK ");
else if (dev_type == FILE_DEVICE_CD_ROM) else if (dev_type == FILE_DEVICE_CD_ROM)
@ -142,6 +142,11 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
{ {
struct TCP_Server_Info *server = chan->server; struct TCP_Server_Info *server = chan->server;
if (!server) {
seq_printf(m, "\n\n\t\tChannel: %d DISABLED", i+1);
return;
}
seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx" seq_printf(m, "\n\n\t\tChannel: %d ConnectionId: 0x%llx"
"\n\t\tNumber of credits: %d Dialect 0x%x" "\n\t\tNumber of credits: %d Dialect 0x%x"
"\n\t\tTCP status: %d Instance: %d" "\n\t\tTCP status: %d Instance: %d"
@ -176,6 +181,8 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr); seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
else if (iface->sockaddr.ss_family == AF_INET6) else if (iface->sockaddr.ss_family == AF_INET6)
seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr); seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
if (!iface->is_active)
seq_puts(m, "\t\t[for-cleanup]\n");
} }
static int cifs_debug_files_proc_show(struct seq_file *m, void *v) static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
@ -229,6 +236,9 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
struct cifs_ses *ses; struct cifs_ses *ses;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
struct cifs_server_iface *iface;
size_t iface_weight = 0, iface_min_speed = 0;
struct cifs_server_iface *last_iface = NULL;
int c, i, j; int c, i, j;
seq_puts(m, seq_puts(m,
@ -291,8 +301,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\n%d) ConnectionId: 0x%llx ", seq_printf(m, "\n%d) ConnectionId: 0x%llx ",
c, server->conn_id); c, server->conn_id);
spin_lock(&server->srv_lock);
if (server->hostname) if (server->hostname)
seq_printf(m, "Hostname: %s ", server->hostname); seq_printf(m, "Hostname: %s ", server->hostname);
spin_unlock(&server->srv_lock);
#ifdef CONFIG_CIFS_SMB_DIRECT #ifdef CONFIG_CIFS_SMB_DIRECT
if (!server->rdma) if (!server->rdma)
goto skip_rdma; goto skip_rdma;
@ -390,13 +402,18 @@ skip_rdma:
seq_printf(m, "\n\n\tSessions: "); seq_printf(m, "\n\n\tSessions: ");
i = 0; i = 0;
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
continue;
}
i++; i++;
if ((ses->serverDomain == NULL) || if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) || (ses->serverOS == NULL) ||
(ses->serverNOS == NULL)) { (ses->serverNOS == NULL)) {
seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ", seq_printf(m, "\n\t%d) Address: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
i, ses->ip_addr, ses->ses_count, i, ses->ip_addr, ses->ses_count,
ses->capabilities, ses->status); ses->capabilities, ses->ses_status);
if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
seq_printf(m, "Guest "); seq_printf(m, "Guest ");
else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL) else if (ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
@ -408,8 +425,9 @@ skip_rdma:
"\n\tSMB session status: %d ", "\n\tSMB session status: %d ",
i, ses->ip_addr, ses->serverDomain, i, ses->ip_addr, ses->serverDomain,
ses->ses_count, ses->serverOS, ses->serverNOS, ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status); ses->capabilities, ses->ses_status);
} }
spin_unlock(&ses->ses_lock);
seq_printf(m, "\n\tSecurity type: %s ", seq_printf(m, "\n\tSecurity type: %s ",
get_security_type_str(server->ops->select_sectype(server, ses->sectype))); get_security_type_str(server->ops->select_sectype(server, ses->sectype)));
@ -426,11 +444,21 @@ skip_rdma:
from_kuid(&init_user_ns, ses->cred_uid)); from_kuid(&init_user_ns, ses->cred_uid));
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
seq_puts(m, "\tPrimary channel: DISCONNECTED ");
if (CIFS_CHAN_IN_RECONNECT(ses, 0))
seq_puts(m, "\t[RECONNECTING] ");
if (ses->chan_count > 1) { if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu ", seq_printf(m, "\n\n\tExtra Channels: %zu ",
ses->chan_count-1); ses->chan_count-1);
for (j = 1; j < ses->chan_count; j++) for (j = 1; j < ses->chan_count; j++) {
cifs_dump_channel(m, j, &ses->chans[j]); cifs_dump_channel(m, j, &ses->chans[j]);
if (CIFS_CHAN_NEEDS_RECONNECT(ses, j))
seq_puts(m, "\tDISCONNECTED ");
if (CIFS_CHAN_IN_RECONNECT(ses, j))
seq_puts(m, "\t[RECONNECTING] ");
}
} }
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
@ -451,14 +479,29 @@ skip_rdma:
spin_lock(&ses->iface_lock); spin_lock(&ses->iface_lock);
if (ses->iface_count) if (ses->iface_count)
seq_printf(m, "\n\n\tServer interfaces: %zu", seq_printf(m, "\n\n\tServer interfaces: %zu"
ses->iface_count); "\tLast updated: %lu seconds ago",
for (j = 0; j < ses->iface_count; j++) { ses->iface_count,
struct cifs_server_iface *iface; (jiffies - ses->iface_last_update) / HZ);
iface = &ses->iface_list[j]; last_iface = list_last_entry(&ses->iface_list,
seq_printf(m, "\n\t%d)", j+1); struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
j = 0;
list_for_each_entry(iface, &ses->iface_list,
iface_head) {
seq_printf(m, "\n\t%d)", ++j);
cifs_dump_iface(m, iface); cifs_dump_iface(m, iface);
iface_weight = iface->speed / iface_min_speed;
seq_printf(m, "\t\tWeight (cur,total): (%zu,%zu)"
"\n\t\tAllocated channels: %u\n",
iface->weight_fulfilled,
iface_weight,
iface->num_channels);
if (is_ses_using_iface(ses, iface)) if (is_ses_using_iface(ses, iface))
seq_puts(m, "\t\t[CONNECTED]\n"); seq_puts(m, "\t\t[CONNECTED]\n");
} }
@ -468,7 +511,7 @@ skip_rdma:
seq_printf(m, "\n\t\t[NONE]"); seq_printf(m, "\n\t\t[NONE]");
seq_puts(m, "\n\n\tMIDs: "); seq_puts(m, "\n\n\tMIDs: ");
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
seq_printf(m, "\n\tState: %d com: %d pid:" seq_printf(m, "\n\tState: %d com: %d pid:"
" %d cbdata: %p mid %llu\n", " %d cbdata: %p mid %llu\n",
@ -478,7 +521,7 @@ skip_rdma:
mid_entry->callback_data, mid_entry->callback_data,
mid_entry->mid); mid_entry->mid);
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
seq_printf(m, "\n--\n"); seq_printf(m, "\n--\n");
} }
if (c == 0) if (c == 0)
@ -506,8 +549,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
int i; int i;
atomic_set(&totBufAllocCount, 0); atomic_set(&total_buf_alloc_count, 0);
atomic_set(&totSmBufAllocCount, 0); atomic_set(&total_small_buf_alloc_count, 0);
#endif /* CONFIG_CIFS_STATS2 */ #endif /* CONFIG_CIFS_STATS2 */
atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0);
@ -563,17 +606,17 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
seq_printf(m, "Share (unique mount targets): %d\n", seq_printf(m, "Share (unique mount targets): %d\n",
tconInfoAllocCount.counter); tconInfoAllocCount.counter);
seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n", seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n",
bufAllocCount.counter, buf_alloc_count.counter,
cifs_min_rcv + tcpSesAllocCount.counter); cifs_min_rcv + tcpSesAllocCount.counter);
seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n", seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n",
smBufAllocCount.counter, cifs_min_small); small_buf_alloc_count.counter, cifs_min_small);
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
seq_printf(m, "Total Large %d Small %d Allocations\n", seq_printf(m, "Total Large %d Small %d Allocations\n",
atomic_read(&totBufAllocCount), atomic_read(&total_buf_alloc_count),
atomic_read(&totSmBufAllocCount)); atomic_read(&total_small_buf_alloc_count));
#endif /* CONFIG_CIFS_STATS2 */ #endif /* CONFIG_CIFS_STATS2 */
seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count));
seq_printf(m, seq_printf(m,
"\n%d session %d share reconnects\n", "\n%d session %d share reconnects\n",
tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
@ -598,10 +641,13 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
server->fastest_cmd[j], server->fastest_cmd[j],
server->slowest_cmd[j]); server->slowest_cmd[j]);
for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++) for (j = 0; j < NUMBER_OF_SMB2_COMMANDS; j++)
if (atomic_read(&server->smb2slowcmd[j])) if (atomic_read(&server->smb2slowcmd[j])) {
spin_lock(&server->srv_lock);
seq_printf(m, " %d slow responses from %s for command %d\n", seq_printf(m, " %d slow responses from %s for command %d\n",
atomic_read(&server->smb2slowcmd[j]), atomic_read(&server->smb2slowcmd[j]),
server->hostname, j); server->hostname, j);
spin_unlock(&server->srv_lock);
}
#endif /* STATS2 */ #endif /* STATS2 */
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {

View File

@ -95,19 +95,19 @@ do { \
#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \ #define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
do { \ do { \
const char *sn = ""; \ spin_lock(&server->srv_lock); \
if (server && server->hostname) \
sn = server->hostname; \
if ((type) & FYI && cifsFYI & CIFS_INFO) { \ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \ pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
__FILE__, sn, ##__VA_ARGS__); \ __FILE__, server->hostname, \
##__VA_ARGS__); \
} else if ((type) & VFS) { \ } else if ((type) & VFS) { \
pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \ pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
sn, ##__VA_ARGS__); \ server->hostname, ##__VA_ARGS__); \
} else if ((type) & NOISY && (NOISY != 0)) { \ } else if ((type) & NOISY && (NOISY != 0)) { \
pr_debug_ ## ratefunc("\\\\%s " fmt, \ pr_debug_ ## ratefunc("\\\\%s " fmt, \
sn, ##__VA_ARGS__); \ server->hostname, ##__VA_ARGS__); \
} \ } \
spin_unlock(&server->srv_lock); \
} while (0) } while (0)
#define cifs_server_dbg(type, fmt, ...) \ #define cifs_server_dbg(type, fmt, ...) \

View File

@ -84,9 +84,9 @@ struct key_type cifs_spnego_key_type = {
/* get a key struct with a SPNEGO security blob, suitable for session setup */ /* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key * struct key *
cifs_get_spnego_key(struct cifs_ses *sesInfo) cifs_get_spnego_key(struct cifs_ses *sesInfo,
struct TCP_Server_Info *server)
{ {
struct TCP_Server_Info *server = cifs_ses_server(sesInfo);
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
char *description, *dp; char *description, *dp;

View File

@ -29,7 +29,8 @@ struct cifs_spnego_msg {
#ifdef __KERNEL__ #ifdef __KERNEL__
extern struct key_type cifs_spnego_key_type; extern struct key_type cifs_spnego_key_type;
extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo); extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo,
struct TCP_Server_Info *server);
#endif /* KERNEL */ #endif /* KERNEL */
#endif /* _CIFS_SPNEGO_H */ #endif /* _CIFS_SPNEGO_H */

View File

@ -398,11 +398,11 @@ static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const ch
switch (state) { switch (state) {
case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE: case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name); cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
cifs_ses_mark_for_reconnect(swnreg->tcon->ses); cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
break; break;
case CIFS_SWN_RESOURCE_STATE_AVAILABLE: case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name); cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
cifs_ses_mark_for_reconnect(swnreg->tcon->ses); cifs_signal_cifsd_for_reconnect(swnreg->tcon->ses->server, true);
break; break;
case CIFS_SWN_RESOURCE_STATE_UNKNOWN: case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name); cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
@ -467,7 +467,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
int ret = 0; int ret = 0;
/* Store the reconnect address */ /* Store the reconnect address */
mutex_lock(&tcon->ses->server->srv_mutex); cifs_server_lock(tcon->ses->server);
if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr)) if (cifs_sockaddr_equal(&tcon->ses->server->dstaddr, addr))
goto unlock; goto unlock;
@ -500,13 +500,10 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
goto unlock; goto unlock;
} }
spin_lock(&GlobalMid_Lock); cifs_signal_cifsd_for_reconnect(tcon->ses->server, false);
if (tcon->ses->server->tcpStatus != CifsExiting)
tcon->ses->server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
unlock: unlock:
mutex_unlock(&tcon->ses->server->srv_mutex); cifs_server_unlock(tcon->ses->server);
return ret; return ret;
} }

View File

@ -141,9 +141,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL)) if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL; return -EINVAL;
spin_lock(&server->srv_lock);
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
server->tcpStatus == CifsNeedNegotiate) server->tcpStatus == CifsNeedNegotiate) {
spin_unlock(&server->srv_lock);
return rc; return rc;
}
spin_unlock(&server->srv_lock);
if (!server->session_estab) { if (!server->session_estab) {
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
@ -232,9 +236,9 @@ int cifs_verify_signature(struct smb_rqst *rqst,
cpu_to_le32(expected_sequence_number); cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0; cifs_pdu->Signature.Sequence.Reserved = 0;
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be); rc = cifs_calc_signature(rqst, server, what_we_think_sig_should_be);
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
if (rc) if (rc)
return rc; return rc;
@ -703,7 +707,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
memcpy(ses->auth_key.response + baselen, tiblob, tilen); memcpy(ses->auth_key.response + baselen, tiblob, tilen);
mutex_lock(&ses->server->srv_mutex); cifs_server_lock(ses->server);
rc = cifs_alloc_hash("hmac(md5)", rc = cifs_alloc_hash("hmac(md5)",
&ses->server->secmech.hmacmd5, &ses->server->secmech.hmacmd5,
@ -755,7 +759,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
unlock: unlock:
mutex_unlock(&ses->server->srv_mutex); cifs_server_unlock(ses->server);
setup_ntlmv2_rsp_ret: setup_ntlmv2_rsp_ret:
kfree(tiblob); kfree(tiblob);

View File

@ -67,6 +67,34 @@ bool enable_negotiate_signing; /* false by default */
unsigned int global_secflags = CIFSSEC_DEF; unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */ /* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1; unsigned int sign_CIFS_PDUs = 1;
/*
* Global transaction id (XID) information
*/
unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
/*
* Global counters, updated atomically
*/
atomic_t sesInfoAllocCount;
atomic_t tconInfoAllocCount;
atomic_t tcpSesNextId;
atomic_t tcpSesAllocCount;
atomic_t tcpSesReconnectCount;
atomic_t tconInfoReconnectCount;
atomic_t mid_count;
atomic_t buf_alloc_count;
atomic_t small_buf_alloc_count;
#ifdef CONFIG_CIFS_STATS2
atomic_t total_buf_alloc_count;
atomic_t total_small_buf_alloc_count;
#endif/* STATS2 */
struct list_head cifs_tcp_ses_list;
spinlock_t cifs_tcp_ses_lock;
static const struct super_operations cifs_super_ops; static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, uint, 0444); module_param(CIFSMaxBufSize, uint, 0444);
@ -682,14 +710,17 @@ static void cifs_umount_begin(struct super_block *sb)
tcon = cifs_sb_master_tcon(cifs_sb); tcon = cifs_sb_master_tcon(cifs_sb);
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) { spin_lock(&tcon->tc_lock);
if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
/* we have other mounts to same share or we have /* we have other mounts to same share or we have
already tried to force umount this and woken up already tried to force umount this and woken up
all waiting network requests, nothing to do */ all waiting network requests, nothing to do */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return; return;
} else if (tcon->tc_count == 1) } else if (tcon->tc_count == 1)
tcon->tidStatus = CifsExiting; tcon->status = TID_EXITING;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
@ -815,14 +846,12 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct dentry *root; struct dentry *root;
int rc; int rc;
/* if (cifsFYI) {
* Prints in Kernel / CIFS log the attempted mount operation cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
* If CIFS_DEBUG && cifs_FYI old_ctx->source, flags);
*/ } else {
if (cifsFYI) cifs_info("Attempting to mount %s\n", old_ctx->source);
cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags); }
else
cifs_info("Attempting to mount %s\n", old_ctx->UNC);
cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
if (!cifs_sb) if (!cifs_sb)
@ -1498,8 +1527,7 @@ cifs_destroy_request_bufs(void)
kmem_cache_destroy(cifs_sm_req_cachep); kmem_cache_destroy(cifs_sm_req_cachep);
} }
static int static int init_mids(void)
cifs_init_mids(void)
{ {
cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
sizeof(struct mid_q_entry), 0, sizeof(struct mid_q_entry), 0,
@ -1517,8 +1545,7 @@ cifs_init_mids(void)
return 0; return 0;
} }
static void static void destroy_mids(void)
cifs_destroy_mids(void)
{ {
mempool_destroy(cifs_mid_poolp); mempool_destroy(cifs_mid_poolp);
kmem_cache_destroy(cifs_mid_cachep); kmem_cache_destroy(cifs_mid_cachep);
@ -1540,11 +1567,11 @@ init_cifs(void)
atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tcpSesReconnectCount, 0);
atomic_set(&tconInfoReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0);
atomic_set(&bufAllocCount, 0); atomic_set(&buf_alloc_count, 0);
atomic_set(&smBufAllocCount, 0); atomic_set(&small_buf_alloc_count, 0);
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
atomic_set(&totBufAllocCount, 0); atomic_set(&total_buf_alloc_count, 0);
atomic_set(&totSmBufAllocCount, 0); atomic_set(&total_small_buf_alloc_count, 0);
if (slow_rsp_threshold < 1) if (slow_rsp_threshold < 1)
cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
else if (slow_rsp_threshold > 32767) else if (slow_rsp_threshold > 32767)
@ -1552,7 +1579,7 @@ init_cifs(void)
"slow response threshold set higher than recommended (0 to 32767)\n"); "slow response threshold set higher than recommended (0 to 32767)\n");
#endif /* CONFIG_CIFS_STATS2 */ #endif /* CONFIG_CIFS_STATS2 */
atomic_set(&midCount, 0); atomic_set(&mid_count, 0);
GlobalCurrentXid = 0; GlobalCurrentXid = 0;
GlobalTotalActiveXid = 0; GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0; GlobalMaxActiveXid = 0;
@ -1612,7 +1639,7 @@ init_cifs(void)
if (rc) if (rc)
goto out_unreg_fscache; goto out_unreg_fscache;
rc = cifs_init_mids(); rc = init_mids();
if (rc) if (rc)
goto out_destroy_inodecache; goto out_destroy_inodecache;
@ -1669,7 +1696,7 @@ out_destroy_request_bufs:
#endif #endif
cifs_destroy_request_bufs(); cifs_destroy_request_bufs();
out_destroy_mids: out_destroy_mids:
cifs_destroy_mids(); destroy_mids();
out_destroy_inodecache: out_destroy_inodecache:
cifs_destroy_inodecache(); cifs_destroy_inodecache();
out_unreg_fscache: out_unreg_fscache:
@ -1705,7 +1732,7 @@ exit_cifs(void)
dfs_cache_destroy(); dfs_cache_destroy();
#endif #endif
cifs_destroy_request_bufs(); cifs_destroy_request_bufs();
cifs_destroy_mids(); destroy_mids();
cifs_destroy_inodecache(); cifs_destroy_inodecache();
cifs_fscache_unregister(); cifs_fscache_unregister();
destroy_workqueue(cifsoplockd_wq); destroy_workqueue(cifsoplockd_wq);

View File

@ -18,6 +18,7 @@
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/utsname.h> #include <linux/utsname.h>
#include <linux/sched/mm.h>
#include "cifs_fs_sb.h" #include "cifs_fs_sb.h"
#include "cifsacl.h" #include "cifsacl.h"
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
@ -78,9 +79,8 @@
#define SMB_ECHO_INTERVAL_MAX 600 #define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60 #define SMB_ECHO_INTERVAL_DEFAULT 60
/* dns resolution intervals in seconds */ /* smb multichannel query server interfaces interval in seconds */
#define SMB_DNS_RESOLVE_INTERVAL_MIN 120 #define SMB_INTERFACE_POLL_INTERVAL 600
#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
/* maximum number of PDUs in one compound */ /* maximum number of PDUs in one compound */
#define MAX_COMPOUND 5 #define MAX_COMPOUND 5
@ -109,13 +109,35 @@
* CIFS vfs client Status information (based on what we know.) * CIFS vfs client Status information (based on what we know.)
*/ */
/* associated with each tcp and smb session */ /* associated with each connection */
enum statusEnum { enum statusEnum {
CifsNew = 0, CifsNew = 0,
CifsGood, CifsGood,
CifsExiting, CifsExiting,
CifsNeedReconnect, CifsNeedReconnect,
CifsNeedNegotiate CifsNeedNegotiate,
CifsInNegotiate,
};
/* associated with each smb session */
enum ses_status_enum {
SES_NEW = 0,
SES_GOOD,
SES_EXITING,
SES_NEED_RECON,
SES_IN_SETUP
};
/* associated with each tree connection to the server */
enum tid_status_enum {
TID_NEW = 0,
TID_GOOD,
TID_EXITING,
TID_NEED_RECON,
TID_NEED_TCON,
TID_IN_TCON,
TID_NEED_FILES_INVALIDATE, /* currently unused */
TID_IN_FILES_INVALIDATE
}; };
enum securityEnum { enum securityEnum {
@ -267,13 +289,16 @@ struct smb_version_operations {
/* check if we need to negotiate */ /* check if we need to negotiate */
bool (*need_neg)(struct TCP_Server_Info *); bool (*need_neg)(struct TCP_Server_Info *);
/* negotiate to the server */ /* negotiate to the server */
int (*negotiate)(const unsigned int, struct cifs_ses *); int (*negotiate)(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server);
/* set negotiated write size */ /* set negotiated write size */
unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx); unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* set negotiated read size */ /* set negotiated read size */
unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx); unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* setup smb sessionn */ /* setup smb sessionn */
int (*sess_setup)(const unsigned int, struct cifs_ses *, int (*sess_setup)(const unsigned int, struct cifs_ses *,
struct TCP_Server_Info *server,
const struct nls_table *); const struct nls_table *);
/* close smb session */ /* close smb session */
int (*logoff)(const unsigned int, struct cifs_ses *); int (*logoff)(const unsigned int, struct cifs_ses *);
@ -393,7 +418,7 @@ struct smb_version_operations {
int (*close_dir)(const unsigned int, struct cifs_tcon *, int (*close_dir)(const unsigned int, struct cifs_tcon *,
struct cifs_fid *); struct cifs_fid *);
/* calculate a size of SMB message */ /* calculate a size of SMB message */
unsigned int (*calc_smb_size)(void *buf, struct TCP_Server_Info *ptcpi); unsigned int (*calc_smb_size)(void *buf);
/* check for STATUS_PENDING and process the response if yes */ /* check for STATUS_PENDING and process the response if yes */
bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server); bool (*is_status_pending)(char *buf, struct TCP_Server_Info *server);
/* check for STATUS_NETWORK_SESSION_EXPIRED */ /* check for STATUS_NETWORK_SESSION_EXPIRED */
@ -418,7 +443,8 @@ struct smb_version_operations {
void (*set_lease_key)(struct inode *, struct cifs_fid *); void (*set_lease_key)(struct inode *, struct cifs_fid *);
/* generate new lease key */ /* generate new lease key */
void (*new_lease_key)(struct cifs_fid *); void (*new_lease_key)(struct cifs_fid *);
int (*generate_signingkey)(struct cifs_ses *); int (*generate_signingkey)(struct cifs_ses *ses,
struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *, int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
bool allocate_crypto); bool allocate_crypto);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon, int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
@ -580,13 +606,15 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info { struct TCP_Server_Info {
struct list_head tcp_ses_list; struct list_head tcp_ses_list;
struct list_head smb_ses_list; struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */ __u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */ int srv_count; /* reference counter */
/* 15 character server name + 0x20 16th byte indicating type = srv */ /* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
struct smb_version_operations *ops; struct smb_version_operations *ops;
struct smb_version_values *vals; struct smb_version_values *vals;
/* updates to tcpStatus protected by GlobalMid_Lock */ /* updates to tcpStatus protected by cifs_tcp_ses_lock */
enum statusEnum tcpStatus; /* what we think the status is */ enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */ char *hostname; /* hostname portion of UNC string */
struct socket *ssocket; struct socket *ssocket;
@ -597,6 +625,7 @@ struct TCP_Server_Info {
#endif #endif
wait_queue_head_t response_q; wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
spinlock_t mid_lock; /* protect mid queue and it's entries */
struct list_head pending_mid_q; struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */ bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */ bool noautotune; /* do not autotune send buf sizes */
@ -607,7 +636,8 @@ struct TCP_Server_Info {
unsigned int in_flight; /* number of requests on the wire to server */ unsigned int in_flight; /* number of requests on the wire to server */
unsigned int max_in_flight; /* max number of requests that were on wire */ unsigned int max_in_flight; /* max number of requests that were on wire */
spinlock_t req_lock; /* protect the two values above */ spinlock_t req_lock; /* protect the two values above */
struct mutex srv_mutex; struct mutex _srv_mutex;
unsigned int nofs_flag;
struct task_struct *tsk; struct task_struct *tsk;
char server_GUID[16]; char server_GUID[16];
__u16 sec_mode; __u16 sec_mode;
@ -656,7 +686,6 @@ struct TCP_Server_Info {
/* point to the SMBD connection if RDMA is used instead of socket */ /* point to the SMBD connection if RDMA is used instead of socket */
struct smbd_connection *smbd_conn; struct smbd_connection *smbd_conn;
struct delayed_work echo; /* echo ping workqueue job */ struct delayed_work echo; /* echo ping workqueue job */
struct delayed_work resolve; /* dns resolution workqueue job */
char *smallbuf; /* pointer to current "small" buffer */ char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */ char *bigbuf; /* pointer to current "big" buffer */
/* Total size of this PDU. Only valid from cifs_demultiplex_thread */ /* Total size of this PDU. Only valid from cifs_demultiplex_thread */
@ -726,6 +755,22 @@ struct TCP_Server_Info {
#endif #endif
}; };
static inline void cifs_server_lock(struct TCP_Server_Info *server)
{
unsigned int nofs_flag = memalloc_nofs_save();
mutex_lock(&server->_srv_mutex);
server->nofs_flag = nofs_flag;
}
static inline void cifs_server_unlock(struct TCP_Server_Info *server)
{
unsigned int nofs_flag = server->nofs_flag;
mutex_unlock(&server->_srv_mutex);
memalloc_nofs_restore(nofs_flag);
}
struct cifs_credits { struct cifs_credits {
unsigned int value; unsigned int value;
unsigned int instance; unsigned int instance;
@ -904,14 +949,68 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
#endif #endif
struct cifs_server_iface { struct cifs_server_iface {
struct list_head iface_head;
struct kref refcount;
size_t speed; size_t speed;
size_t weight_fulfilled;
unsigned int num_channels;
unsigned int rdma_capable : 1; unsigned int rdma_capable : 1;
unsigned int rss_capable : 1; unsigned int rss_capable : 1;
unsigned int is_active : 1; /* unset if non existent */
struct sockaddr_storage sockaddr; struct sockaddr_storage sockaddr;
}; };
/* release iface when last ref is dropped */
static inline void
release_iface(struct kref *ref)
{
struct cifs_server_iface *iface = container_of(ref,
struct cifs_server_iface,
refcount);
kfree(iface);
}
/*
* compare two interfaces a and b
* return 0 if everything matches.
* return 1 if a has higher link speed, or rdma capable, or rss capable
* return -1 otherwise.
*/
static inline int
iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
{
int cmp_ret = 0;
WARN_ON(!a || !b);
if (a->speed == b->speed) {
if (a->rdma_capable == b->rdma_capable) {
if (a->rss_capable == b->rss_capable) {
cmp_ret = memcmp(&a->sockaddr, &b->sockaddr,
sizeof(a->sockaddr));
if (!cmp_ret)
return 0;
else if (cmp_ret > 0)
return 1;
else
return -1;
} else if (a->rss_capable > b->rss_capable)
return 1;
else
return -1;
} else if (a->rdma_capable > b->rdma_capable)
return 1;
else
return -1;
} else if (a->speed > b->speed)
return 1;
else
return -1;
}
struct cifs_chan { struct cifs_chan {
unsigned int in_reconnect : 1; /* if session setup in progress for this channel */
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
struct cifs_server_iface *iface; /* interface in use */
__u8 signkey[SMB3_SIGN_KEY_SIZE]; __u8 signkey[SMB3_SIGN_KEY_SIZE];
}; };
@ -920,12 +1019,14 @@ struct cifs_chan {
*/ */
struct cifs_ses { struct cifs_ses {
struct list_head smb_ses_list; struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list; struct list_head tcon_list;
struct cifs_tcon *tcon_ipc; struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex; struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */ struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */ int ses_count; /* reference counter */
enum statusEnum status; /* updates protected by GlobalMid_Lock */ enum ses_status_enum ses_status; /* updates protected by cifs_tcp_ses_lock */
unsigned overrideSecFlg; /* if non-zero override global sec flags */ unsigned overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */ char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */ char *serverNOS; /* name of network operating system of server */
@ -944,17 +1045,13 @@ struct cifs_ses {
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */ struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */ enum securityEnum sectype; /* what security flavor was specified? */
bool sign; /* is signing required? */ bool sign; /* is signing required? */
bool need_reconnect:1; /* connection reset, uid now invalid */
bool domainAuto:1; bool domainAuto:1;
bool binding:1; /* are we binding the session? */
__u16 session_flags; __u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE]; __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
__u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
/* /*
* Network interfaces available on the server this session is * Network interfaces available on the server this session is
* connected to. * connected to.
@ -966,7 +1063,7 @@ struct cifs_ses {
*/ */
spinlock_t iface_lock; spinlock_t iface_lock;
/* ========= begin: protected by iface_lock ======== */ /* ========= begin: protected by iface_lock ======== */
struct cifs_server_iface *iface_list; struct list_head iface_list;
size_t iface_count; size_t iface_count;
unsigned long iface_last_update; /* jiffies */ unsigned long iface_last_update; /* jiffies */
/* ========= end: protected by iface_lock ======== */ /* ========= end: protected by iface_lock ======== */
@ -974,45 +1071,39 @@ struct cifs_ses {
spinlock_t chan_lock; spinlock_t chan_lock;
/* ========= begin: protected by chan_lock ======== */ /* ========= begin: protected by chan_lock ======== */
#define CIFS_MAX_CHANNELS 16 #define CIFS_MAX_CHANNELS 16
#define CIFS_INVAL_CHAN_INDEX (-1)
#define CIFS_ALL_CHANNELS_SET(ses) \
((1UL << (ses)->chan_count) - 1)
#define CIFS_ALL_CHANS_GOOD(ses) \
(!(ses)->chans_need_reconnect)
#define CIFS_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses) \
((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
#define CIFS_CHAN_NEEDS_RECONNECT(ses, index) \
test_bit((index), &(ses)->chans_need_reconnect)
#define CIFS_CHAN_IN_RECONNECT(ses, index) \
((ses)->chans[(index)].in_reconnect)
struct cifs_chan chans[CIFS_MAX_CHANNELS]; struct cifs_chan chans[CIFS_MAX_CHANNELS];
struct cifs_chan *binding_chan;
size_t chan_count; size_t chan_count;
size_t chan_max; size_t chan_max;
atomic_t chan_seq; /* round robin state */ atomic_t chan_seq; /* round robin state */
/*
* chans_need_reconnect is a bitmap indicating which of the channels
* under this smb session needs to be reconnected.
* If not multichannel session, only one bit will be used.
*
* We will ask for sess and tcon reconnection only if all the
* channels are marked for needing reconnection. This will
* enable the sessions on top to continue to live till any
* of the channels below are active.
*/
unsigned long chans_need_reconnect;
/* ========= end: protected by chan_lock ======== */ /* ========= end: protected by chan_lock ======== */
}; };
/*
* When binding a new channel, we need to access the channel which isn't fully
* established yet.
*/
static inline
struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
{
if (ses->binding)
return ses->binding_chan;
else
return NULL;
}
/*
* Returns the server pointer of the session. When binding a new
* channel this returns the last channel which isn't fully established
* yet.
*
* This function should be use for negprot/sess.setup codepaths. For
* the other requests see cifs_pick_channel().
*/
static inline
struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
{
if (ses->binding)
return ses->binding_chan->server;
else
return ses->server;
}
static inline bool static inline bool
cap_unix(struct cifs_ses *ses) cap_unix(struct cifs_ses *ses)
{ {
@ -1039,6 +1130,7 @@ struct cifs_tcon {
struct list_head tcon_list; struct list_head tcon_list;
int tc_count; int tc_count;
struct list_head rlist; /* reconnect list */ struct list_head rlist; /* reconnect list */
spinlock_t tc_lock; /* protect anything here that is not protected */
atomic_t num_local_opens; /* num of all opens including disconnected */ atomic_t num_local_opens; /* num of all opens including disconnected */
atomic_t num_remote_opens; /* num of all network opens on server */ atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList; struct list_head openFileList;
@ -1049,7 +1141,7 @@ struct cifs_tcon {
char *password; /* for share-level security */ char *password; /* for share-level security */
__u32 tid; /* The 4 byte tree id */ __u32 tid; /* The 4 byte tree id */
__u16 Flags; /* optional support bits */ __u16 Flags; /* optional support bits */
enum statusEnum tidStatus; enum tid_status_enum status;
atomic_t num_smbs_sent; atomic_t num_smbs_sent;
union { union {
struct { struct {
@ -1128,6 +1220,7 @@ struct cifs_tcon {
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
struct list_head ulist; /* cache update list */ struct list_head ulist; /* cache update list */
#endif #endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
}; };
/* /*
@ -1806,34 +1899,78 @@ require use of the stronger protocol */
*/ */
/**************************************************************************** /****************************************************************************
* Locking notes. All updates to global variables and lists should be * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according
* protected by spinlocks or semaphores. * to the locking order. i.e. if two locks are to be held together, the lock that
* appears higher in this list needs to be taken before the other.
* *
* Spinlocks * If you hold a lock that is lower in this list, and you need to take a higher lock
* --------- * (or if you think that one of the functions that you're calling may need to), first
* GlobalMid_Lock protects: * drop the lock you hold, pick up the higher lock, then the lower one. This will
* list operations on pending_mid_q and oplockQ * ensure that locks are picked up only in one direction in the below table
* updates to XID counters, multiplex id and SMB sequence numbers * (top to bottom).
* list operations on global DnotifyReqList
* updates to ses->status and TCP_Server_Info->tcpStatus
* updates to server->CurrentMid
* tcp_ses_lock protects:
* list operations on tcp and SMB session lists
* tcon->open_file_lock protects the list of open files hanging off the tcon
* inode->open_file_lock protects the openFileList hanging off the inode
* cfile->file_info_lock protects counters and fields in cifs file struct
* f_owner.lock protects certain per file struct operations
* mapping->page_lock protects certain per page operations
* *
* Note that the cifs_tcon.open_file_lock should be taken before * Also, if you expect a function to be called with a lock held, explicitly document
* not after the cifsInodeInfo.open_file_lock * this in the comments on top of your function definition.
* *
* Semaphores * And also, try to keep the critical sections (lock hold time) to be as minimal as
* ---------- * possible. Blocking / calling other functions with a lock held always increase
* sesSem operations on smb session * the risk of a possible deadlock.
* tconSem operations on tree connection
* fh_sem file handle reconnection operations
* *
* Following this rule will avoid unnecessary deadlocks, which can get really hard to
* debug. Also, any new lock that you introduce, please add to this list in the correct
* order.
*
* Please populate this list whenever you introduce new locks in your changes. Or in
* case I've missed some existing locks. Please ensure that it's added in the list
* based on the locking order expected.
*
* =====================================================================================
* Lock Protects Initialization fn
* =====================================================================================
* vol_list_lock
* vol_info->ctx_lock vol_info->ctx
* cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb
* TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
* reconnect_mutex
* TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
* cifs_ses->session_mutex cifs_ses sesInfoAlloc
* cifs_tcon
* cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
* cifs_tcon->pending_opens
* cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
* cifs_tcon->bytes_written
* cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc
* GlobalMid_Lock GlobalMaxActiveXid init_cifs
* GlobalCurrentXid
* GlobalTotalActiveXid
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
* TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
* ->CurrentMid
* (any changes in mid_q_entry fields)
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
* ->credits
* ->echo_credits
* ->oplock_credits
* ->reconnect_instance
* cifs_ses->ses_lock (anything that is not protected by another lock and can change)
* cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
* ->iface_count
* ->iface_last_update
* cifs_ses->chan_lock cifs_ses->chans
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
* cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
* cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/ ****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE #ifdef DECLARE_GLOBALS_HERE
@ -1849,47 +1986,44 @@ require use of the stronger protocol */
* sessions (and from that the tree connections) can be found * sessions (and from that the tree connections) can be found
* by iterating over cifs_tcp_ses_list * by iterating over cifs_tcp_ses_list
*/ */
GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; extern struct list_head cifs_tcp_ses_list;
/* /*
* This lock protects the cifs_tcp_ses_list, the list of smb sessions per * This lock protects the cifs_tcp_ses_list, the list of smb sessions per
* tcp session, and the list of tcon's per smb session. It also protects * tcp session, and the list of tcon's per smb session. It also protects
* the reference counters for the server, smb session, and tcon. It also * the reference counters for the server, smb session, and tcon.
* protects some fields in the TCP_Server_Info struct such as dstaddr. Finally,
* changes to the tcon->tidStatus should be done while holding this lock.
* generally the locks should be taken in order tcp_ses_lock before * generally the locks should be taken in order tcp_ses_lock before
* tcon->open_file_lock and that before file->file_info_lock since the * tcon->open_file_lock and that before file->file_info_lock since the
* structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file
*/ */
GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; extern spinlock_t cifs_tcp_ses_lock;
/* /*
* Global transaction id (XID) information * Global transaction id (XID) information
*/ */
GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
/* on midQ entries */
/* /*
* Global counters, updated atomically * Global counters, updated atomically
*/ */
GLOBAL_EXTERN atomic_t sesInfoAllocCount; extern atomic_t sesInfoAllocCount;
GLOBAL_EXTERN atomic_t tconInfoAllocCount; extern atomic_t tconInfoAllocCount;
GLOBAL_EXTERN atomic_t tcpSesNextId; extern atomic_t tcpSesNextId;
GLOBAL_EXTERN atomic_t tcpSesAllocCount; extern atomic_t tcpSesAllocCount;
GLOBAL_EXTERN atomic_t tcpSesReconnectCount; extern atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount; extern atomic_t tconInfoReconnectCount;
/* Various Debug counters */ /* Various Debug counters */
GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ extern atomic_t buf_alloc_count; /* current number allocated */
extern atomic_t small_buf_alloc_count;
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ extern atomic_t total_buf_alloc_count; /* total allocated over all time */
GLOBAL_EXTERN atomic_t totSmBufAllocCount; extern atomic_t total_small_buf_alloc_count;
extern unsigned int slow_rsp_threshold; /* number of secs before logging */ extern unsigned int slow_rsp_threshold; /* number of secs before logging */
#endif #endif
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */ /* Misc globals */
extern bool enable_oplocks; /* enable or disable oplocks */ extern bool enable_oplocks; /* enable or disable oplocks */
@ -1906,6 +2040,7 @@ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
extern unsigned int cifs_min_small; /* min size of small buf pool */ extern unsigned int cifs_min_small; /* min size of small buf pool */
extern unsigned int cifs_max_pending; /* MAX requests at once to server*/ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/
extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */
extern atomic_t mid_count;
void cifs_oplock_break(struct work_struct *work); void cifs_oplock_break(struct work_struct *work);
void cifs_queue_oplock_break(struct cifsFileInfo *cfile); void cifs_queue_oplock_break(struct cifsFileInfo *cfile);

View File

@ -134,6 +134,11 @@
*/ */
#define SMB3_SIGN_KEY_SIZE (16) #define SMB3_SIGN_KEY_SIZE (16)
/*
* Size of the smb3 encryption/decryption keys
*/
#define SMB3_ENC_DEC_KEY_SIZE (32)
#define CIFS_CLIENT_CHALLENGE_SIZE (8) #define CIFS_CLIENT_CHALLENGE_SIZE (8)
#define CIFS_SERVER_CHALLENGE_SIZE (8) #define CIFS_SERVER_CHALLENGE_SIZE (8)
#define CIFS_HMAC_MD5_HASH_SIZE (16) #define CIFS_HMAC_MD5_HASH_SIZE (16)

View File

@ -78,9 +78,6 @@ extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
extern char *cifs_compose_mount_options(const char *sb_mountdata, extern char *cifs_compose_mount_options(const char *sb_mountdata,
const char *fullpath, const struct dfs_info3_param *ref, const char *fullpath, const struct dfs_info3_param *ref,
char **devname); char **devname);
/* extern void renew_parental_timestamps(struct dentry *direntry);*/
extern struct mid_q_entry *alloc_mid(const struct smb_hdr *,
struct TCP_Server_Info *);
extern void delete_mid(struct mid_q_entry *mid); extern void delete_mid(struct mid_q_entry *mid);
void __release_mid(struct kref *refcount); void __release_mid(struct kref *refcount);
extern void cifs_wake_up_task(struct mid_q_entry *mid); extern void cifs_wake_up_task(struct mid_q_entry *mid);
@ -130,7 +127,14 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf , struct smb_hdr *in_buf ,
struct smb_hdr *out_buf, struct smb_hdr *out_buf,
int *bytes_returned); int *bytes_returned);
extern int cifs_reconnect(struct TCP_Server_Info *server); void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
bool all_channels);
void
cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
bool mark_smb_session);
extern int cifs_reconnect(struct TCP_Server_Info *server,
bool mark_smb_session);
extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr); extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *); extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *); extern bool backup_cred(struct cifs_sb_info *);
@ -147,7 +151,7 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file); struct cifsFileInfo **ret_file);
extern unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server); extern unsigned int smbCalcSize(void *buf);
extern int decode_negTokenInit(unsigned char *security_blob, int length, extern int decode_negTokenInit(unsigned char *security_blob, int length,
struct TCP_Server_Info *server); struct TCP_Server_Info *server);
extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
@ -163,6 +167,7 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
extern enum securityEnum select_sectype(struct TCP_Server_Info *server, extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
enum securityEnum requested); enum securityEnum requested);
extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp); const struct nls_table *nls_cp);
extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec64); extern u64 cifs_UnixTimeToNT(struct timespec64);
@ -277,11 +282,15 @@ extern int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nlsc); const struct nls_table *nlsc);
extern int cifs_negotiate_protocol(const unsigned int xid, extern int cifs_negotiate_protocol(const unsigned int xid,
struct cifs_ses *ses); struct cifs_ses *ses,
struct TCP_Server_Info *server);
extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
struct nls_table *nls_info); struct nls_table *nls_info);
extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required); extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required);
extern int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses); extern int CIFSSMBNegotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server);
extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses, extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon, const char *tree, struct cifs_tcon *tcon,
@ -491,8 +500,10 @@ extern int setup_ntlm_response(struct cifs_ses *, const struct nls_table *);
extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *); extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server); extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server);
extern int calc_seckey(struct cifs_ses *); extern int calc_seckey(struct cifs_ses *);
extern int generate_smb30signingkey(struct cifs_ses *); extern int generate_smb30signingkey(struct cifs_ses *ses,
extern int generate_smb311signingkey(struct cifs_ses *); struct TCP_Server_Info *server);
extern int generate_smb311signingkey(struct cifs_ses *ses,
struct TCP_Server_Info *server);
#ifdef CONFIG_CIFS_WEAK_PW_HASH #ifdef CONFIG_CIFS_WEAK_PW_HASH
extern int calc_lanman_hash(const char *password, const char *cryptkey, extern int calc_lanman_hash(const char *password, const char *cryptkey,
@ -595,6 +606,35 @@ bool is_server_using_iface(struct TCP_Server_Info *server,
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface); bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void cifs_ses_mark_for_reconnect(struct cifs_ses *ses); void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
int
cifs_ses_get_chan_index(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
cifs_chan_set_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
bool
cifs_chan_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
cifs_chan_set_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
bool
cifs_chan_needs_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server);
bool
cifs_chan_is_iface_active(struct cifs_ses *ses,
struct TCP_Server_Info *server);
int
cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount);
void extract_unc_hostname(const char *unc, const char **h, size_t *len); void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src); int copy_path_name(char *dst, const char *src);
int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov, int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
@ -632,6 +672,7 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon); struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
void cifs_put_tcon_super(struct super_block *sb); void cifs_put_tcon_super(struct super_block *sb);
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
static inline void release_mid(struct mid_q_entry *mid) static inline void release_mid(struct mid_q_entry *mid)
{ {

View File

@ -89,6 +89,15 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp; struct list_head *tmp;
struct list_head *tmp1; struct list_head *tmp1;
/* only send once per connect */
spin_lock(&tcon->ses->ses_lock);
if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
spin_unlock(&tcon->ses->ses_lock);
return;
}
tcon->status = TID_IN_FILES_INVALIDATE;
spin_unlock(&tcon->ses->ses_lock);
/* list all files open on tree connection and mark them invalid */ /* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock); spin_lock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tcon->openFileList) { list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
@ -105,6 +114,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
mutex_unlock(&tcon->crfid.fid_mutex); mutex_unlock(&tcon->crfid.fid_mutex);
spin_lock(&tcon->tc_lock);
if (tcon->status == TID_IN_FILES_INVALIDATE)
tcon->status = TID_NEED_TCON;
spin_unlock(&tcon->tc_lock);
/* /*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted * BB Add call to invalidate_inodes(sb) for all superblocks mounted
* to this tcon. * to this tcon.
@ -118,8 +132,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
int rc; int rc;
struct cifs_ses *ses; struct cifs_ses *ses;
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
struct nls_table *nls_codepage; struct nls_table *nls_codepage = NULL;
int retries;
/* /*
* SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for
@ -136,54 +149,48 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* only tree disconnect, open, and write, (and ulogoff which does not * only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount * have tcon) are allowed as we start force umount
*/ */
if (tcon->tidStatus == CifsExiting) { spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
if (smb_command != SMB_COM_WRITE_ANDX && if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) { smb_command != SMB_COM_TREE_DISCONNECT) {
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n", cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command); smb_command);
return -ENODEV; return -ENODEV;
} }
} }
spin_unlock(&tcon->tc_lock);
retries = server->nr_targets; again:
rc = cifs_wait_for_server_reconnect(server, tcon->retry);
if (rc)
return rc;
/* spin_lock(&ses->chan_lock);
* Give demultiplex thread up to 10 seconds to each target available for if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
* reconnect -- should be greater than cifs socket timeout which is 7 spin_unlock(&ses->chan_lock);
* seconds.
*/
while (server->tcpStatus == CifsNeedReconnect) {
rc = wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
__func__);
return -ERESTARTSYS;
}
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
if (retries && --retries)
continue;
/*
* on "soft" mounts we wait once. Hard mounts keep
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry) {
cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
return -EHOSTDOWN;
}
retries = server->nr_targets;
}
if (!ses->need_reconnect && !tcon->need_reconnect)
return 0; return 0;
}
spin_unlock(&ses->chan_lock);
mutex_lock(&ses->session_mutex);
/*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
mutex_unlock(&ses->session_mutex);
if (tcon->retry)
goto again;
rc = -EHOSTDOWN;
goto out;
}
spin_unlock(&server->srv_lock);
nls_codepage = load_nls_default(); nls_codepage = load_nls_default();
@ -191,22 +198,26 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* need to prevent multiple threads trying to simultaneously * need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session * reconnect the same SMB session
*/ */
mutex_lock(&ses->session_mutex); spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
if (!cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
/* this means that we only need to tree connect */
if (tcon->need_reconnect)
goto skip_sess_setup;
/*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
if (server->tcpStatus == CifsNeedReconnect) {
rc = -EHOSTDOWN;
mutex_unlock(&ses->session_mutex); mutex_unlock(&ses->session_mutex);
goto out; goto out;
} }
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses); rc = cifs_negotiate_protocol(0, ses, server);
if (rc == 0 && ses->need_reconnect) if (!rc)
rc = cifs_setup_session(0, ses, nls_codepage); rc = cifs_setup_session(0, ses, server, nls_codepage);
/* do we need to reconnect tcon? */ /* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) { if (rc || !tcon->need_reconnect) {
@ -214,6 +225,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
goto out; goto out;
} }
skip_sess_setup:
cifs_mark_open_files_invalid(tcon); cifs_mark_open_files_invalid(tcon);
rc = cifs_tree_connect(0, tcon, nls_codepage); rc = cifs_tree_connect(0, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex); mutex_unlock(&ses->session_mutex);
@ -353,8 +365,13 @@ static int
smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf) void **request_buf, void **response_buf)
{ {
if (tcon->ses->need_reconnect || tcon->need_reconnect) spin_lock(&tcon->ses->chan_lock);
if (cifs_chan_needs_reconnect(tcon->ses, tcon->ses->server) ||
tcon->need_reconnect) {
spin_unlock(&tcon->ses->chan_lock);
return -EHOSTDOWN; return -EHOSTDOWN;
}
spin_unlock(&tcon->ses->chan_lock);
return __smb_init(smb_command, wct, tcon, request_buf, response_buf); return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
} }
@ -433,7 +450,7 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
{ {
bool srv_sign_required = server->sec_mode & server->vals->signing_required; bool srv_sign_required = server->sec_mode & server->vals->signing_required;
bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; bool mnt_sign_enabled;
/* /*
* Is signing required by mnt options? If not then check * Is signing required by mnt options? If not then check
@ -575,14 +592,15 @@ should_set_ext_sec_flag(enum securityEnum sectype)
} }
int int
CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses) CIFSSMBNegotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
NEGOTIATE_REQ *pSMB; NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr; NEGOTIATE_RSP *pSMBr;
int rc = 0; int rc = 0;
int bytes_returned; int bytes_returned;
int i; int i;
struct TCP_Server_Info *server = ses->server;
u16 count; u16 count;
if (!server) { if (!server) {
@ -704,8 +722,12 @@ CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon)
* the tcon is no longer on the list, so no need to take lock before * the tcon is no longer on the list, so no need to take lock before
* checking this. * checking this.
*/ */
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) spin_lock(&tcon->ses->chan_lock);
return 0; if ((tcon->need_reconnect) || CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses)) {
spin_unlock(&tcon->ses->chan_lock);
return -EIO;
}
spin_unlock(&tcon->ses->chan_lock);
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon, rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
(void **)&smb_buffer); (void **)&smb_buffer);
@ -800,9 +822,14 @@ CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses)
return -EIO; return -EIO;
mutex_lock(&ses->session_mutex); mutex_lock(&ses->session_mutex);
if (ses->need_reconnect) spin_lock(&ses->chan_lock);
if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
spin_unlock(&ses->chan_lock);
goto session_already_dead; /* no need to send SMBlogoff if uid goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */ already closed due to reconnect */
}
spin_unlock(&ses->chan_lock);
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB); rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) { if (rc) {
mutex_unlock(&ses->session_mutex); mutex_unlock(&ses->session_mutex);
@ -1501,7 +1528,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (server->ops->is_session_expired && if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) { server->ops->is_session_expired(buf)) {
cifs_reconnect(server); cifs_reconnect(server, true);
return -1; return -1;
} }

File diff suppressed because it is too large Load Diff

View File

@ -1355,9 +1355,9 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n", cifs_dbg(VFS, "%s: failed to convert address \'%s\'. skip address matching.\n",
__func__, ip); __func__, ip);
} else { } else {
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa); match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, &sa);
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
} }
kfree(ip); kfree(ip);
@ -1383,7 +1383,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
} }
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__); cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
cifs_ses_mark_for_reconnect(tcon->ses); cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
} }
/* Refresh dfs referral of tcon and mark it for reconnect if needed */ /* Refresh dfs referral of tcon and mark it for reconnect if needed */
@ -1540,15 +1540,21 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
if (!server->is_dfs_conn) spin_lock(&server->srv_lock);
if (!server->is_dfs_conn) {
spin_unlock(&server->srv_lock);
continue; continue;
}
spin_unlock(&server->srv_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->tc_lock);
if (!tcon->ipc && !tcon->need_reconnect) { if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++; tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons); list_add_tail(&tcon->ulist, &tcons);
} }
spin_unlock(&tcon->tc_lock);
} }
} }
} }

View File

@ -1210,7 +1210,7 @@ static const struct inode_operations cifs_ipc_inode_ops = {
static int static int
cifs_find_inode(struct inode *inode, void *opaque) cifs_find_inode(struct inode *inode, void *opaque)
{ {
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; struct cifs_fattr *fattr = opaque;
/* don't match inode with different uniqueid */ /* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
@ -1234,7 +1234,7 @@ cifs_find_inode(struct inode *inode, void *opaque)
static int static int
cifs_init_inode(struct inode *inode, void *opaque) cifs_init_inode(struct inode *inode, void *opaque)
{ {
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; struct cifs_fattr *fattr = opaque;
CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
CIFS_I(inode)->createtime = fattr->cf_createtime; CIFS_I(inode)->createtime = fattr->cf_createtime;
@ -2380,7 +2380,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
* We need to be sure that all dirty pages are written and the server * We need to be sure that all dirty pages are written and the server
* has actual ctime, mtime and file length. * has actual ctime, mtime and file length.
*/ */
if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) && if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
!CIFS_CACHE_READ(CIFS_I(inode)) && !CIFS_CACHE_READ(CIFS_I(inode)) &&
inode->i_mapping && inode->i_mapping->nrpages != 0) { inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping); rc = filemap_fdatawait(inode->i_mapping);
@ -2570,6 +2570,14 @@ set_size_out:
if (rc == 0) { if (rc == 0) {
cifsInode->server_eof = attrs->ia_size; cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size); cifs_setsize(inode, attrs->ia_size);
/*
* i_blocks is not related to (i_size / i_blksize), but instead
* 512 byte (2**9) size is required for calculating num blocks.
* Until we can query the server for actual allocation size,
* this is best estimate we have for blocks allocated for a file
* Number of blocks must be rounded up so size 1 is not 0 blocks
*/
inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
/* /*
* The man page of truncate says if the size changed, * The man page of truncate says if the size changed,

View File

@ -69,12 +69,14 @@ sesInfoAlloc(void)
ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
if (ret_buf) { if (ret_buf) {
atomic_inc(&sesInfoAllocCount); atomic_inc(&sesInfoAllocCount);
ret_buf->status = CifsNew; spin_lock_init(&ret_buf->ses_lock);
ret_buf->ses_status = SES_NEW;
++ret_buf->ses_count; ++ret_buf->ses_count;
INIT_LIST_HEAD(&ret_buf->smb_ses_list); INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list); INIT_LIST_HEAD(&ret_buf->tcon_list);
mutex_init(&ret_buf->session_mutex); mutex_init(&ret_buf->session_mutex);
spin_lock_init(&ret_buf->iface_lock); spin_lock_init(&ret_buf->iface_lock);
INIT_LIST_HEAD(&ret_buf->iface_list);
spin_lock_init(&ret_buf->chan_lock); spin_lock_init(&ret_buf->chan_lock);
} }
return ret_buf; return ret_buf;
@ -83,6 +85,8 @@ sesInfoAlloc(void)
void void
sesInfoFree(struct cifs_ses *buf_to_free) sesInfoFree(struct cifs_ses *buf_to_free)
{ {
struct cifs_server_iface *iface = NULL, *niface = NULL;
if (buf_to_free == NULL) { if (buf_to_free == NULL) {
cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n"); cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
return; return;
@ -96,7 +100,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->user_name); kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName); kfree(buf_to_free->domainName);
kfree_sensitive(buf_to_free->auth_key.response); kfree_sensitive(buf_to_free->auth_key.response);
kfree(buf_to_free->iface_list); spin_lock(&buf_to_free->iface_lock);
list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
iface_head)
kref_put(&iface->refcount, release_iface);
spin_unlock(&buf_to_free->iface_lock);
kfree_sensitive(buf_to_free); kfree_sensitive(buf_to_free);
} }
@ -115,8 +123,9 @@ tconInfoAlloc(void)
} }
atomic_inc(&tconInfoAllocCount); atomic_inc(&tconInfoAllocCount);
ret_buf->tidStatus = CifsNew; ret_buf->status = TID_NEW;
++ret_buf->tc_count; ++ret_buf->tc_count;
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list); INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock); spin_lock_init(&ret_buf->open_file_lock);
@ -163,9 +172,9 @@ cifs_buf_get(void)
/* clear the first few header bytes */ /* clear the first few header bytes */
/* for most paths, more is cleared in header_assemble */ /* for most paths, more is cleared in header_assemble */
memset(ret_buf, 0, buf_size + 3); memset(ret_buf, 0, buf_size + 3);
atomic_inc(&bufAllocCount); atomic_inc(&buf_alloc_count);
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
atomic_inc(&totBufAllocCount); atomic_inc(&total_buf_alloc_count);
#endif /* CONFIG_CIFS_STATS2 */ #endif /* CONFIG_CIFS_STATS2 */
return ret_buf; return ret_buf;
@ -180,7 +189,7 @@ cifs_buf_release(void *buf_to_free)
} }
mempool_free(buf_to_free, cifs_req_poolp); mempool_free(buf_to_free, cifs_req_poolp);
atomic_dec(&bufAllocCount); atomic_dec(&buf_alloc_count);
return; return;
} }
@ -196,9 +205,9 @@ cifs_small_buf_get(void)
ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
/* No need to clear memory here, cleared in header assemble */ /* No need to clear memory here, cleared in header assemble */
/* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
atomic_inc(&smBufAllocCount); atomic_inc(&small_buf_alloc_count);
#ifdef CONFIG_CIFS_STATS2 #ifdef CONFIG_CIFS_STATS2
atomic_inc(&totSmBufAllocCount); atomic_inc(&total_small_buf_alloc_count);
#endif /* CONFIG_CIFS_STATS2 */ #endif /* CONFIG_CIFS_STATS2 */
return ret_buf; return ret_buf;
@ -214,7 +223,7 @@ cifs_small_buf_release(void *buf_to_free)
} }
mempool_free(buf_to_free, cifs_sm_req_poolp); mempool_free(buf_to_free, cifs_sm_req_poolp);
atomic_dec(&smBufAllocCount); atomic_dec(&small_buf_alloc_count);
return; return;
} }
@ -349,7 +358,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
/* otherwise, there is enough to get to the BCC */ /* otherwise, there is enough to get to the BCC */
if (check_smb_hdr(smb)) if (check_smb_hdr(smb))
return -EIO; return -EIO;
clc_len = smbCalcSize(smb, server); clc_len = smbCalcSize(smb);
if (4 + rfclen != total_read) { if (4 + rfclen != total_read) {
cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n", cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
@ -1123,8 +1132,10 @@ int match_target_ip(struct TCP_Server_Info *server,
goto out; goto out;
} }
spin_lock(&server->srv_lock);
*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
&tipaddr); &tipaddr);
spin_unlock(&server->srv_lock);
cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result); cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
rc = 0; rc = 0;
@ -1152,3 +1163,47 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
return 0; return 0;
} }
#endif #endif
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
{
int timeout = 10;
int rc;
spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
return 0;
}
timeout *= server->nr_targets;
spin_unlock(&server->srv_lock);
/*
* Give demultiplex thread up to 10 seconds to each target available for
* reconnect -- should be greater than cifs socket timeout which is 7
* seconds.
*
* On "soft" mounts we wait once. Hard mounts keep retrying until
* process is killed or server comes back on-line.
*/
do {
rc = wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect),
timeout * HZ);
if (rc < 0) {
cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
__func__);
return -ERESTARTSYS;
}
/* are we still trying to reconnect? */
spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
return 0;
}
spin_unlock(&server->srv_lock);
} while (retry);
cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
return -EHOSTDOWN;
}

View File

@ -909,10 +909,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
if (class == ERRSRV && code == ERRbaduid) { if (class == ERRSRV && code == ERRbaduid) {
cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n", cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
code); code);
spin_lock(&GlobalMid_Lock); cifs_signal_cifsd_for_reconnect(mid->server, false);
if (mid->server->tcpStatus != CifsExiting)
mid->server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
} }
} }
@ -925,9 +922,9 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
* portion, the number of word parameters and the data portion of the message * portion, the number of word parameters and the data portion of the message
*/ */
unsigned int unsigned int
smbCalcSize(void *buf, struct TCP_Server_Info *server) smbCalcSize(void *buf)
{ {
struct smb_hdr *ptr = (struct smb_hdr *)buf; struct smb_hdr *ptr = buf;
return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
2 /* size of the bcc field */ + get_bcc(ptr)); 2 /* size of the bcc field */ + get_bcc(ptr));
} }

View File

@ -121,7 +121,9 @@ typedef struct _AUTHENTICATE_MESSAGE {
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses); int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen, int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp); const struct nls_table *nls_cp);
int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen, int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp); const struct nls_table *nls_cp);

View File

@ -805,8 +805,7 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
end_of_smb = cfile->srch_inf.ntwrk_buf_start + end_of_smb = cfile->srch_inf.ntwrk_buf_start +
server->ops->calc_smb_size( server->ops->calc_smb_size(
cfile->srch_inf.ntwrk_buf_start, cfile->srch_inf.ntwrk_buf_start);
server);
cur_ent = cfile->srch_inf.srch_entries_start; cur_ent = cfile->srch_inf.srch_entries_start;
first_entry_in_buffer = cfile->srch_inf.index_of_last_entry first_entry_in_buffer = cfile->srch_inf.index_of_last_entry
@ -1005,8 +1004,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n", cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
num_to_fill, cifsFile->srch_inf.ntwrk_buf_start); num_to_fill, cifsFile->srch_inf.ntwrk_buf_start);
max_len = tcon->ses->server->ops->calc_smb_size( max_len = tcon->ses->server->ops->calc_smb_size(
cifsFile->srch_inf.ntwrk_buf_start, cifsFile->srch_inf.ntwrk_buf_start);
tcon->ses->server);
end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len; end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL); tmp_buf = kmalloc(UNICODE_NAME_MAX, GFP_KERNEL);

View File

@ -56,7 +56,7 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) { for (i = 0; i < ses->chan_count; i++) {
if (is_server_using_iface(ses->chans[i].server, iface)) { if (ses->chans[i].iface == iface) {
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
return true; return true;
} }
@ -65,16 +65,121 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
return false; return false;
} }
/* channel helper functions. assumed that chan_lock is held by caller. */
int
cifs_ses_get_chan_index(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int i;
for (i = 0; i < ses->chan_count; i++) {
if (ses->chans[i].server == server)
return i;
}
/* If we didn't find the channel, it is likely a bug */
if (server)
cifs_dbg(VFS, "unable to get chan index for server: 0x%llx",
server->conn_id);
WARN_ON(1);
return CIFS_INVAL_CHAN_INDEX;
}
void
cifs_chan_set_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return;
ses->chans[chan_index].in_reconnect = true;
}
void
cifs_chan_clear_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return;
ses->chans[chan_index].in_reconnect = false;
}
bool
cifs_chan_in_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return true; /* err on the safer side */
return CIFS_CHAN_IN_RECONNECT(ses, chan_index);
}
void
cifs_chan_set_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return;
set_bit(chan_index, &ses->chans_need_reconnect);
cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n",
chan_index, ses->chans_need_reconnect);
}
void
cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return;
clear_bit(chan_index, &ses->chans_need_reconnect);
cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n",
chan_index, ses->chans_need_reconnect);
}
bool
cifs_chan_needs_reconnect(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return true; /* err on the safer side */
return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
}
bool
cifs_chan_is_iface_active(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX)
return true; /* err on the safer side */
return ses->chans[chan_index].iface &&
ses->chans[chan_index].iface->is_active;
}
/* returns number of channels added */ /* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses) int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{ {
struct TCP_Server_Info *server = ses->server;
int old_chan_count, new_chan_count; int old_chan_count, new_chan_count;
int left; int left;
int i = 0;
int rc = 0; int rc = 0;
int tries = 0; int tries = 0;
struct cifs_server_iface *ifaces = NULL; size_t iface_weight = 0, iface_min_speed = 0;
size_t iface_count; struct cifs_server_iface *iface = NULL, *niface = NULL;
struct cifs_server_iface *last_iface = NULL;
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
@ -82,86 +187,271 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
left = ses->chan_max - ses->chan_count; left = ses->chan_max - ses->chan_count;
if (left <= 0) { if (left <= 0) {
spin_unlock(&ses->chan_lock);
cifs_dbg(FYI, cifs_dbg(FYI,
"ses already at max_channels (%zu), nothing to open\n", "ses already at max_channels (%zu), nothing to open\n",
ses->chan_max); ses->chan_max);
spin_unlock(&ses->chan_lock);
return 0; return 0;
} }
if (ses->server->dialect < SMB30_PROT_ID) { if (server->dialect < SMB30_PROT_ID) {
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n"); cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
return 0; return 0;
} }
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) { if (!(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
ses->chan_max = 1;
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname); cifs_server_dbg(VFS, "no multichannel support\n");
return 0; return 0;
} }
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
/*
* Make a copy of the iface list at the time and use that
* instead so as to not hold the iface spinlock for opening
* channels
*/
spin_lock(&ses->iface_lock);
iface_count = ses->iface_count;
if (iface_count <= 0) {
spin_unlock(&ses->iface_lock);
cifs_dbg(VFS, "no iface list available to open channels\n");
return 0;
}
ifaces = kmemdup(ses->iface_list, iface_count*sizeof(*ifaces),
GFP_ATOMIC);
if (!ifaces) {
spin_unlock(&ses->iface_lock);
return 0;
}
spin_unlock(&ses->iface_lock);
/*
* Keep connecting to same, fastest, iface for all channels as
* long as its RSS. Try next fastest one if not RSS or channel
* creation fails.
*/
while (left > 0) { while (left > 0) {
struct cifs_server_iface *iface;
tries++; tries++;
if (tries > 3*ses->chan_max) { if (tries > 3*ses->chan_max) {
cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n", cifs_dbg(VFS, "too many channel open attempts (%d channels left to open)\n",
left); left);
break; break;
} }
iface = &ifaces[i]; spin_lock(&ses->iface_lock);
if (is_ses_using_iface(ses, iface) && !iface->rss_capable) { if (!ses->iface_count) {
i = (i+1) % iface_count; spin_unlock(&ses->iface_lock);
continue; cifs_dbg(VFS, "server %s does not advertise interfaces\n",
ses->server->hostname);
break;
} }
if (!iface)
iface = list_first_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
list_for_each_entry_safe_from(iface, niface, &ses->iface_list,
iface_head) {
/* do not mix rdma and non-rdma interfaces */
if (iface->rdma_capable != ses->server->rdma)
continue;
/* skip ifaces that are unusable */
if (!iface->is_active ||
(is_ses_using_iface(ses, iface) &&
!iface->rss_capable))
continue;
/* check if we already allocated enough channels */
iface_weight = iface->speed / iface_min_speed;
if (iface->weight_fulfilled >= iface_weight)
continue;
/* take ref before unlock */
kref_get(&iface->refcount);
spin_unlock(&ses->iface_lock);
rc = cifs_ses_add_channel(cifs_sb, ses, iface); rc = cifs_ses_add_channel(cifs_sb, ses, iface);
spin_lock(&ses->iface_lock);
if (rc) { if (rc) {
cifs_dbg(FYI, "failed to open extra channel on iface#%d rc=%d\n", cifs_dbg(VFS, "failed to open extra channel on iface:%pIS rc=%d\n",
i, rc); &iface->sockaddr,
i = (i+1) % iface_count; rc);
kref_put(&iface->refcount, release_iface);
/* failure to add chan should increase weight */
iface->weight_fulfilled++;
continue; continue;
} }
cifs_dbg(FYI, "successfully opened new channel on iface#%d\n", iface->num_channels++;
i); iface->weight_fulfilled++;
cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
&iface->sockaddr);
break;
}
/* reached end of list. reset weight_fulfilled and start over */
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
list_for_each_entry(iface, &ses->iface_list, iface_head)
iface->weight_fulfilled = 0;
spin_unlock(&ses->iface_lock);
iface = NULL;
continue;
}
spin_unlock(&ses->iface_lock);
left--; left--;
new_chan_count++; new_chan_count++;
} }
kfree(ifaces);
return new_chan_count - old_chan_count; return new_chan_count - old_chan_count;
} }
/*
* update the iface for the channel if necessary.
* will return 0 when iface is updated, 1 if removed, 2 otherwise
* Must be called with chan_lock held.
*/
int
cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
{
unsigned int chan_index;
size_t iface_weight = 0, iface_min_speed = 0;
struct cifs_server_iface *iface = NULL;
struct cifs_server_iface *old_iface = NULL;
struct cifs_server_iface *last_iface = NULL;
struct sockaddr_storage ss;
int retry = 0;
int rc = 0;
spin_lock(&ses->chan_lock);
chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX) {
spin_unlock(&ses->chan_lock);
return 0;
}
if (ses->chans[chan_index].iface) {
old_iface = ses->chans[chan_index].iface;
if (old_iface->is_active) {
spin_unlock(&ses->chan_lock);
return 1;
}
}
spin_unlock(&ses->chan_lock);
spin_lock(&server->srv_lock);
ss = server->dstaddr;
spin_unlock(&server->srv_lock);
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
return 0;
}
try_again:
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
/* then look for a new one */
list_for_each_entry(iface, &ses->iface_list, iface_head) {
if (!chan_index) {
/* if we're trying to get the updated iface for primary channel */
if (!cifs_match_ipaddr((struct sockaddr *) &ss,
(struct sockaddr *) &iface->sockaddr))
continue;
kref_get(&iface->refcount);
break;
}
/* do not mix rdma and non-rdma interfaces */
if (iface->rdma_capable != server->rdma)
continue;
if (!iface->is_active ||
(is_ses_using_iface(ses, iface) &&
!iface->rss_capable)) {
continue;
}
/* check if we already allocated enough channels */
iface_weight = iface->speed / iface_min_speed;
if (iface->weight_fulfilled >= iface_weight)
continue;
kref_get(&iface->refcount);
break;
}
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
rc = 1;
list_for_each_entry(iface, &ses->iface_list, iface_head)
iface->weight_fulfilled = 0;
/* see if it can be satisfied in second attempt */
if (!retry++)
goto try_again;
iface = NULL;
cifs_dbg(FYI, "unable to find a suitable iface\n");
}
if (!chan_index && !iface) {
cifs_dbg(FYI, "unable to get the interface matching: %pIS\n",
&ss);
spin_unlock(&ses->iface_lock);
return 0;
}
/* now drop the ref to the current iface */
if (old_iface && iface) {
cifs_dbg(FYI, "replacing iface: %pIS with %pIS\n",
&old_iface->sockaddr,
&iface->sockaddr);
old_iface->num_channels--;
if (old_iface->weight_fulfilled)
old_iface->weight_fulfilled--;
iface->num_channels++;
iface->weight_fulfilled++;
kref_put(&old_iface->refcount, release_iface);
} else if (old_iface) {
cifs_dbg(FYI, "releasing ref to iface: %pIS\n",
&old_iface->sockaddr);
old_iface->num_channels--;
if (old_iface->weight_fulfilled)
old_iface->weight_fulfilled--;
kref_put(&old_iface->refcount, release_iface);
} else if (!chan_index) {
/* special case: update interface for primary channel */
cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
&iface->sockaddr);
iface->num_channels++;
iface->weight_fulfilled++;
} else {
WARN_ON(!iface);
cifs_dbg(FYI, "adding new iface: %pIS\n", &iface->sockaddr);
}
spin_unlock(&ses->iface_lock);
spin_lock(&ses->chan_lock);
chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX) {
spin_unlock(&ses->chan_lock);
return 0;
}
ses->chans[chan_index].iface = iface;
/* No iface is found. if secondary chan, drop connection */
if (!iface && CIFS_SERVER_IS_CHAN(server))
ses->chans[chan_index].server = NULL;
spin_unlock(&ses->chan_lock);
if (iface) {
spin_lock(&server->srv_lock);
memcpy(&server->dstaddr, &iface->sockaddr, sizeof(server->dstaddr));
spin_unlock(&server->srv_lock);
}
if (!iface && CIFS_SERVER_IS_CHAN(server))
cifs_put_tcp_session(server, false);
return rc;
}
/* /*
* If server is a channel of ses, return the corresponding enclosing * If server is a channel of ses, return the corresponding enclosing
* cifs_chan otherwise return NULL. * cifs_chan otherwise return NULL.
@ -265,9 +555,8 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
chan_server = cifs_get_tcp_session(&ctx, ses->server); chan_server = cifs_get_tcp_session(&ctx, ses->server);
mutex_lock(&ses->session_mutex);
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
chan = ses->binding_chan = &ses->chans[ses->chan_count]; chan = &ses->chans[ses->chan_count];
chan->server = chan_server; chan->server = chan_server;
if (IS_ERR(chan->server)) { if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server); rc = PTR_ERR(chan->server);
@ -275,8 +564,16 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
goto out; goto out;
} }
chan->iface = iface;
ses->chan_count++;
atomic_set(&ses->chan_seq, 0);
/* Mark this channel as needing connect/setup */
cifs_chan_set_need_reconnect(ses, chan->server);
spin_unlock(&ses->chan_lock); spin_unlock(&ses->chan_lock);
mutex_lock(&ses->session_mutex);
/* /*
* We need to allocate the server crypto now as we will need * We need to allocate the server crypto now as we will need
* to sign packets before we generate the channel signing key * to sign packets before we generate the channel signing key
@ -285,58 +582,46 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
rc = smb311_crypto_shash_allocate(chan->server); rc = smb311_crypto_shash_allocate(chan->server);
if (rc) { if (rc) {
cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__); cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
mutex_unlock(&ses->session_mutex);
goto out; goto out;
} }
ses->binding = true; rc = cifs_negotiate_protocol(xid, ses, chan->server);
rc = cifs_negotiate_protocol(xid, ses); if (!rc)
if (rc) rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
goto out;
rc = cifs_setup_session(xid, ses, cifs_sb->local_nls);
if (rc)
goto out;
/* success, put it on the list
* XXX: sharing ses between 2 tcp servers is not possible, the
* way "internal" linked lists works in linux makes element
* only able to belong to one list
*
* the binding session is already established so the rest of
* the code should be able to look it up, no need to add the
* ses to the new server.
*/
spin_lock(&ses->chan_lock);
ses->chan_count++;
atomic_set(&ses->chan_seq, 0);
spin_unlock(&ses->chan_lock);
out:
ses->binding = false;
ses->binding_chan = NULL;
mutex_unlock(&ses->session_mutex); mutex_unlock(&ses->session_mutex);
if (rc && chan->server) out:
cifs_put_tcp_session(chan->server, 0); if (rc && chan->server) {
/*
* we should avoid race with these delayed works before we
* remove this channel
*/
cancel_delayed_work_sync(&chan->server->echo);
cancel_delayed_work_sync(&chan->server->reconnect);
spin_lock(&ses->chan_lock);
/* we rely on all bits beyond chan_count to be clear */
cifs_chan_clear_need_reconnect(ses, chan->server);
ses->chan_count--;
/*
* chan_count should never reach 0 as at least the primary
* channel is always allocated
*/
WARN_ON(ses->chan_count < 1);
spin_unlock(&ses->chan_lock);
cifs_put_tcp_session(chan->server, 0);
}
free_xid(xid);
return rc; return rc;
} }
/* Mark all session channels for reconnect */ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
void cifs_ses_mark_for_reconnect(struct cifs_ses *ses) struct TCP_Server_Info *server,
{ SESSION_SETUP_ANDX *pSMB)
int i;
for (i = 0; i < ses->chan_count; i++) {
spin_lock(&GlobalMid_Lock);
if (ses->chans[i].server->tcpStatus != CifsExiting)
ses->chans[i].server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
}
}
static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{ {
__u32 capabilities = 0; __u32 capabilities = 0;
@ -349,7 +634,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32, pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4, CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
USHRT_MAX)); USHRT_MAX));
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
pSMB->req.VcNumber = cpu_to_le16(1); pSMB->req.VcNumber = cpu_to_le16(1);
/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
@ -360,7 +645,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
if (ses->server->sign) if (server->sign)
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_UNICODE) { if (ses->capabilities & CAP_UNICODE) {
@ -702,10 +987,10 @@ static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
u16 *buflen, u16 *buflen,
struct cifs_ses *ses, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp) const struct nls_table *nls_cp)
{ {
int rc = 0; int rc = 0;
struct TCP_Server_Info *server = cifs_ses_server(ses);
NEGOTIATE_MESSAGE *sec_blob; NEGOTIATE_MESSAGE *sec_blob;
__u32 flags; __u32 flags;
unsigned char *tmp; unsigned char *tmp;
@ -759,6 +1044,7 @@ setup_ntlm_neg_ret:
int build_ntlmssp_auth_blob(unsigned char **pbuffer, int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen, u16 *buflen,
struct cifs_ses *ses, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp) const struct nls_table *nls_cp)
{ {
int rc; int rc;
@ -916,6 +1202,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
struct sess_data { struct sess_data {
unsigned int xid; unsigned int xid;
struct cifs_ses *ses; struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_cp; struct nls_table *nls_cp;
void (*func)(struct sess_data *); void (*func)(struct sess_data *);
int result; int result;
@ -982,31 +1269,27 @@ static int
sess_establish_session(struct sess_data *sess_data) sess_establish_session(struct sess_data *sess_data)
{ {
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
mutex_lock(&ses->server->srv_mutex); cifs_server_lock(server);
if (!ses->server->session_estab) { if (!server->session_estab) {
if (ses->server->sign) { if (server->sign) {
ses->server->session_key.response = server->session_key.response =
kmemdup(ses->auth_key.response, kmemdup(ses->auth_key.response,
ses->auth_key.len, GFP_KERNEL); ses->auth_key.len, GFP_KERNEL);
if (!ses->server->session_key.response) { if (!server->session_key.response) {
mutex_unlock(&ses->server->srv_mutex); cifs_server_unlock(server);
return -ENOMEM; return -ENOMEM;
} }
ses->server->session_key.len = server->session_key.len =
ses->auth_key.len; ses->auth_key.len;
} }
ses->server->sequence_number = 0x2; server->sequence_number = 0x2;
ses->server->session_estab = true; server->session_estab = true;
} }
mutex_unlock(&ses->server->srv_mutex); cifs_server_unlock(server);
cifs_dbg(FYI, "CIFS session established successfully\n"); cifs_dbg(FYI, "CIFS session established successfully\n");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
return 0; return 0;
} }
@ -1048,6 +1331,7 @@ sess_auth_lanman(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr; char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
char lnm_session_key[CIFS_AUTH_RESP_SIZE]; char lnm_session_key[CIFS_AUTH_RESP_SIZE];
__u16 bytes_remaining; __u16 bytes_remaining;
@ -1059,7 +1343,7 @@ sess_auth_lanman(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base; bcc_ptr = sess_data->iov[2].iov_base;
(void)cifs_ssetup_hdr(ses, pSMB); (void)cifs_ssetup_hdr(ses, server, pSMB);
pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
@ -1154,6 +1438,7 @@ sess_auth_ntlm(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr; char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities; __u32 capabilities;
__u16 bytes_remaining; __u16 bytes_remaining;
@ -1165,7 +1450,7 @@ sess_auth_ntlm(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base; bcc_ptr = sess_data->iov[2].iov_base;
capabilities = cifs_ssetup_hdr(ses, pSMB); capabilities = cifs_ssetup_hdr(ses, server, pSMB);
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
if (ses->user_name != NULL) { if (ses->user_name != NULL) {
@ -1264,6 +1549,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr; char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities; __u32 capabilities;
__u16 bytes_remaining; __u16 bytes_remaining;
@ -1275,7 +1561,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base; bcc_ptr = sess_data->iov[2].iov_base;
capabilities = cifs_ssetup_hdr(ses, pSMB); capabilities = cifs_ssetup_hdr(ses, server, pSMB);
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
@ -1373,6 +1659,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr; char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities; __u32 capabilities;
__u16 bytes_remaining; __u16 bytes_remaining;
struct key *spnego_key = NULL; struct key *spnego_key = NULL;
@ -1387,9 +1674,9 @@ sess_auth_kerberos(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base; bcc_ptr = sess_data->iov[2].iov_base;
capabilities = cifs_ssetup_hdr(ses, pSMB); capabilities = cifs_ssetup_hdr(ses, server, pSMB);
spnego_key = cifs_get_spnego_key(ses); spnego_key = cifs_get_spnego_key(ses, server);
if (IS_ERR(spnego_key)) { if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key); rc = PTR_ERR(spnego_key);
spnego_key = NULL; spnego_key = NULL;
@ -1513,12 +1800,13 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
{ {
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities; __u32 capabilities;
char *bcc_ptr; char *bcc_ptr;
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
capabilities = cifs_ssetup_hdr(ses, pSMB); capabilities = cifs_ssetup_hdr(ses, server, pSMB);
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
cifs_dbg(VFS, "NTLMSSP requires Unicode support\n"); cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
return -ENOSYS; return -ENOSYS;
@ -1552,6 +1840,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
struct smb_hdr *smb_buf; struct smb_hdr *smb_buf;
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u16 bytes_remaining; __u16 bytes_remaining;
char *bcc_ptr; char *bcc_ptr;
unsigned char *ntlmsspblob = NULL; unsigned char *ntlmsspblob = NULL;
@ -1579,7 +1868,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
/* Build security blob before we assemble the request */ /* Build security blob before we assemble the request */
rc = build_ntlmssp_negotiate_blob(&ntlmsspblob, rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
&blob_len, ses, &blob_len, ses, server,
sess_data->nls_cp); sess_data->nls_cp);
if (rc) if (rc)
goto out_free_ntlmsspblob; goto out_free_ntlmsspblob;
@ -1657,6 +1946,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
struct smb_hdr *smb_buf; struct smb_hdr *smb_buf;
SESSION_SETUP_ANDX *pSMB; SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
__u16 bytes_remaining; __u16 bytes_remaining;
char *bcc_ptr; char *bcc_ptr;
unsigned char *ntlmsspblob = NULL; unsigned char *ntlmsspblob = NULL;
@ -1673,7 +1963,8 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base; pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
smb_buf = (struct smb_hdr *)pSMB; smb_buf = (struct smb_hdr *)pSMB;
rc = build_ntlmssp_auth_blob(&ntlmsspblob, rc = build_ntlmssp_auth_blob(&ntlmsspblob,
&blob_len, ses, sess_data->nls_cp); &blob_len, ses, server,
sess_data->nls_cp);
if (rc) if (rc)
goto out_free_ntlmsspblob; goto out_free_ntlmsspblob;
sess_data->iov[1].iov_len = blob_len; sess_data->iov[1].iov_len = blob_len;
@ -1757,11 +2048,13 @@ out:
sess_data->result = rc; sess_data->result = rc;
} }
static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) static int select_sec(struct sess_data *sess_data)
{ {
int type; int type;
struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
type = cifs_select_sectype(ses->server, ses->sectype); type = cifs_select_sectype(server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type); cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) { if (type == Unspecified) {
cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
@ -1807,6 +2100,7 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
} }
int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses, int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp) const struct nls_table *nls_cp)
{ {
int rc = 0; int rc = 0;
@ -1821,15 +2115,16 @@ int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
if (!sess_data) if (!sess_data)
return -ENOMEM; return -ENOMEM;
rc = select_sec(ses, sess_data);
if (rc)
goto out;
sess_data->xid = xid; sess_data->xid = xid;
sess_data->ses = ses; sess_data->ses = ses;
sess_data->server = server;
sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->buf0_type = CIFS_NO_BUFFER;
sess_data->nls_cp = (struct nls_table *) nls_cp; sess_data->nls_cp = (struct nls_table *) nls_cp;
rc = select_sec(sess_data);
if (rc)
goto out;
while (sess_data->func) while (sess_data->func)
sess_data->func(sess_data); sess_data->func(sess_data);

View File

@ -49,10 +49,10 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
in_buf->WordCount = 0; in_buf->WordCount = 0;
put_bcc(0, in_buf); put_bcc(0, in_buf);
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
if (rc) { if (rc) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
return rc; return rc;
} }
@ -66,7 +66,7 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
if (rc < 0) if (rc < 0)
server->sequence_number--; server->sequence_number--;
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n", cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n",
get_mid(in_buf), rc); get_mid(in_buf), rc);
@ -103,17 +103,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid; struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) { list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (compare_mid(mid->mid, buf) && if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED && mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) { le16_to_cpu(mid->command) == buf->Command) {
kref_get(&mid->refcount); kref_get(&mid->refcount);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return NULL; return NULL;
} }
@ -175,9 +175,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
{ {
__u64 mid = 0; __u64 mid = 0;
__u16 last_mid, cur_mid; __u16 last_mid, cur_mid;
bool collision; bool collision, reconnect = false;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
/* mid is 16 bit only for CIFS/SMB */ /* mid is 16 bit only for CIFS/SMB */
cur_mid = (__u16)((server->CurrentMid) & 0xffff); cur_mid = (__u16)((server->CurrentMid) & 0xffff);
@ -227,7 +227,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
* an eventual reconnect to clean out the pending_mid_q. * an eventual reconnect to clean out the pending_mid_q.
*/ */
if (num_mids > 32768) if (num_mids > 32768)
server->tcpStatus = CifsNeedReconnect; reconnect = true;
if (!collision) { if (!collision) {
mid = (__u64)cur_mid; mid = (__u64)cur_mid;
@ -236,7 +236,12 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
} }
cur_mid++; cur_mid++;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
if (reconnect) {
cifs_signal_cifsd_for_reconnect(server, false);
}
return mid; return mid;
} }
@ -426,14 +431,16 @@ cifs_need_neg(struct TCP_Server_Info *server)
} }
static int static int
cifs_negotiate(const unsigned int xid, struct cifs_ses *ses) cifs_negotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
int rc; int rc;
rc = CIFSSMBNegotiate(xid, ses); rc = CIFSSMBNegotiate(xid, ses, server);
if (rc == -EAGAIN) { if (rc == -EAGAIN) {
/* retry only once on 1st time connection */ /* retry only once on 1st time connection */
set_credits(ses->server, 1); set_credits(server, 1);
rc = CIFSSMBNegotiate(xid, ses); rc = CIFSSMBNegotiate(xid, ses, server);
if (rc == -EAGAIN) if (rc == -EAGAIN)
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
} }

View File

@ -61,7 +61,6 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
nr_ioctl_req.Reserved = 0; nr_ioctl_req.Reserved = 0;
rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
true /* is_fsctl */,
(char *)&nr_ioctl_req, sizeof(nr_ioctl_req), (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
CIFSMaxBufSize, NULL, NULL /* no return info */); CIFSMaxBufSize, NULL, NULL /* no return info */);
if (rc == -EOPNOTSUPP) { if (rc == -EOPNOTSUPP) {

View File

@ -48,6 +48,7 @@
#define SMB2_HMACSHA256_SIZE (32) #define SMB2_HMACSHA256_SIZE (32)
#define SMB2_CMACAES_SIZE (16) #define SMB2_CMACAES_SIZE (16)
#define SMB3_SIGNKEY_SIZE (16) #define SMB3_SIGNKEY_SIZE (16)
#define SMB3_GCM128_CRYPTKEY_SIZE (16)
#define SMB3_GCM256_CRYPTKEY_SIZE (32) #define SMB3_GCM256_CRYPTKEY_SIZE (32)
/* Maximum buffer size value we can send with 1 credit */ /* Maximum buffer size value we can send with 1 credit */

View File

@ -729,25 +729,27 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
FILE_BASIC_INFO *buf, const unsigned int xid) FILE_BASIC_INFO *buf, const unsigned int xid)
{ {
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = NULL;
struct tcon_link *tlink; struct tcon_link *tlink;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
struct cifsFileInfo *cfile; int rc = 0;
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
(buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
(buf->Attributes == 0))
return 0; /* would be a no op, no sense sending this */
tlink = cifs_sb_tlink(cifs_sb); tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) if (IS_ERR(tlink))
return PTR_ERR(tlink); return PTR_ERR(tlink);
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
(buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
if (buf->Attributes == 0)
goto out; /* would be a no op, no sense sending this */
cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile); cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
}
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN, FILE_WRITE_ATTRIBUTES, FILE_OPEN,
0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile); 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile);
out:
cifs_put_tlink(tlink); cifs_put_tlink(tlink);
return rc; return rc;
} }

View File

@ -225,7 +225,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server)
} }
} }
calc_len = smb2_calc_size(buf, server); calc_len = smb2_calc_size(buf);
/* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might
* be 0, and not a real miscalculation */ * be 0, and not a real miscalculation */
@ -413,9 +413,9 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr)
* portion, the number of word parameters and the data portion of the message. * portion, the number of word parameters and the data portion of the message.
*/ */
unsigned int unsigned int
smb2_calc_size(void *buf, struct TCP_Server_Info *srvr) smb2_calc_size(void *buf)
{ {
struct smb2_pdu *pdu = (struct smb2_pdu *)buf; struct smb2_pdu *pdu = buf;
struct smb2_hdr *shdr = &pdu->hdr; struct smb2_hdr *shdr = &pdu->hdr;
int offset; /* the offset from the beginning of SMB to data area */ int offset; /* the offset from the beginning of SMB to data area */
int data_length; /* the length of the variable length data area */ int data_length; /* the length of the variable length data area */
@ -611,20 +611,22 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
} }
static bool static bool
smb2_is_valid_lease_break(char *buffer) smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
{ {
struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer; struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
struct TCP_Server_Info *server; struct TCP_Server_Info *pserver;
struct cifs_ses *ses; struct cifs_ses *ses;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
struct cifs_pending_open *open; struct cifs_pending_open *open;
cifs_dbg(FYI, "Checking for lease break\n"); cifs_dbg(FYI, "Checking for lease break\n");
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
/* look up tcon based on tid & uid */ /* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
spin_lock(&tcon->open_file_lock); spin_lock(&tcon->open_file_lock);
cifs_stats_inc( cifs_stats_inc(
@ -665,7 +667,6 @@ smb2_is_valid_lease_break(char *buffer)
} }
} }
} }
}
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
return false; return false;
@ -689,7 +690,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
if (rsp->StructureSize != if (rsp->StructureSize !=
smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
if (le16_to_cpu(rsp->StructureSize) == 44) if (le16_to_cpu(rsp->StructureSize) == 44)
return smb2_is_valid_lease_break(buffer); return smb2_is_valid_lease_break(buffer, server);
else else
return false; return false;
} }
@ -865,12 +866,12 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
* SMB2 header. * SMB2 header.
*/ */
int int
smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec) smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct kvec *iov, int nvec)
{ {
int i, rc; int i, rc;
struct sdesc *d; struct sdesc *d;
struct smb2_hdr *hdr; struct smb2_hdr *hdr;
struct TCP_Server_Info *server = cifs_ses_server(ses);
hdr = (struct smb2_hdr *)iov[0].iov_base; hdr = (struct smb2_hdr *)iov[0].iov_base;
/* neg prot are always taken */ /* neg prot are always taken */

View File

@ -133,9 +133,13 @@ smb2_add_credits(struct TCP_Server_Info *server,
optype, scredits, add); optype, scredits, add);
} }
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect if (server->tcpStatus == CifsNeedReconnect
|| server->tcpStatus == CifsExiting) || server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return; return;
}
spin_unlock(&server->srv_lock);
switch (rc) { switch (rc) {
case -1: case -1:
@ -210,6 +214,16 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
while (1) { while (1) {
spin_unlock(&server->req_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&server->srv_lock);
spin_lock(&server->req_lock);
if (server->credits <= 0) { if (server->credits <= 0) {
spin_unlock(&server->req_lock); spin_unlock(&server->req_lock);
cifs_num_waiters_inc(server); cifs_num_waiters_inc(server);
@ -220,11 +234,6 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
return rc; return rc;
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
} else { } else {
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->req_lock);
return -ENOENT;
}
scredits = server->credits; scredits = server->credits;
/* can deadlock with reopen */ /* can deadlock with reopen */
if (scredits <= 8) { if (scredits <= 8) {
@ -318,19 +327,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
{ {
__u64 mid; __u64 mid;
/* for SMB2 we need the current value */ /* for SMB2 we need the current value */
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
mid = server->CurrentMid++; mid = server->CurrentMid++;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
static void static void
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{ {
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (server->CurrentMid >= val) if (server->CurrentMid >= val)
server->CurrentMid -= val; server->CurrentMid -= val;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
static struct mid_q_entry * static struct mid_q_entry *
@ -345,7 +354,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
return NULL; return NULL;
} }
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) { list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) && if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) && (mid->mid_state == MID_REQUEST_SUBMITTED) &&
@ -355,11 +364,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
list_del_init(&mid->qhead); list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return mid; return mid;
} }
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return NULL; return NULL;
} }
@ -386,7 +395,7 @@ smb2_dump_detail(void *buf, struct TCP_Server_Info *server)
shdr->Id.SyncId.ProcessId); shdr->Id.SyncId.ProcessId);
if (!server->ops->check_message(buf, server->total_read, server)) { if (!server->ops->check_message(buf, server->total_read, server)) {
cifs_server_dbg(VFS, "smb buf %p len %u\n", buf, cifs_server_dbg(VFS, "smb buf %p len %u\n", buf,
server->ops->calc_smb_size(buf, server)); server->ops->calc_smb_size(buf));
} }
#endif #endif
} }
@ -398,14 +407,16 @@ smb2_need_neg(struct TCP_Server_Info *server)
} }
static int static int
smb2_negotiate(const unsigned int xid, struct cifs_ses *ses) smb2_negotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
int rc; int rc;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
cifs_ses_server(ses)->CurrentMid = 0; server->CurrentMid = 0;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
rc = SMB2_negotiate(xid, ses); rc = SMB2_negotiate(xid, ses, server);
/* BB we probably don't need to retry with modern servers */ /* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN) if (rc == -EAGAIN)
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
@ -510,31 +521,154 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
static int static int
parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
size_t buf_len, size_t buf_len, struct cifs_ses *ses, bool in_mount)
struct cifs_server_iface **iface_list,
size_t *iface_count)
{ {
struct network_interface_info_ioctl_rsp *p; struct network_interface_info_ioctl_rsp *p;
struct sockaddr_in *addr4; struct sockaddr_in *addr4;
struct sockaddr_in6 *addr6; struct sockaddr_in6 *addr6;
struct iface_info_ipv4 *p4; struct iface_info_ipv4 *p4;
struct iface_info_ipv6 *p6; struct iface_info_ipv6 *p6;
struct cifs_server_iface *info; struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
struct cifs_server_iface tmp_iface;
ssize_t bytes_left; ssize_t bytes_left;
size_t next = 0; size_t next = 0;
int nb_iface = 0; int nb_iface = 0;
int rc = 0; int rc = 0, ret = 0;
*iface_list = NULL;
*iface_count = 0;
/*
* Fist pass: count and sanity check
*/
bytes_left = buf_len; bytes_left = buf_len;
p = buf; p = buf;
spin_lock(&ses->iface_lock);
/* do not query too frequently, this time with lock held */
if (ses->iface_last_update &&
time_before(jiffies, ses->iface_last_update +
(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
spin_unlock(&ses->iface_lock);
return 0;
}
/*
* Go through iface_list and mark them as inactive
*/
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head)
iface->is_active = 0;
spin_unlock(&ses->iface_lock);
/*
* Samba server e.g. can return an empty interface list in some cases,
* which would only be a problem if we were requesting multichannel
*/
if (bytes_left == 0) {
/* avoid spamming logs every 10 minutes, so log only in mount */
if ((ses->chan_max > 1) && in_mount)
cifs_dbg(VFS,
"multichannel not available\n"
"Empty network interface list returned by server %s\n",
ses->server->hostname);
rc = -EINVAL;
ses->iface_last_update = jiffies;
goto out;
}
while (bytes_left >= sizeof(*p)) { while (bytes_left >= sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
/* default to 1Gbps when link speed is unset */
tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
switch (p->Family) {
/*
* The kernel and wire socket structures have the same
* layout and use network byte order but make the
* conversion explicit in case either one changes.
*/
case INTERNETWORK:
addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
p4 = (struct iface_info_ipv4 *)p->Buffer;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
addr4->sin_port = cpu_to_be16(CIFS_PORT);
cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
&addr4->sin_addr);
break;
case INTERNETWORKV6:
addr6 = (struct sockaddr_in6 *)&tmp_iface.sockaddr;
p6 = (struct iface_info_ipv6 *)p->Buffer;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
addr6->sin6_flowinfo = 0;
addr6->sin6_scope_id = 0;
addr6->sin6_port = cpu_to_be16(CIFS_PORT);
cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
&addr6->sin6_addr);
break;
default:
cifs_dbg(VFS,
"%s: skipping unsupported socket family\n",
__func__);
goto next_iface;
}
/*
* The iface_list is assumed to be sorted by speed.
* Check if the new interface exists in that list.
* NEVER change iface. it could be in use.
* Add a new one instead
*/
spin_lock(&ses->iface_lock);
iface = niface = NULL;
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head) {
ret = iface_cmp(iface, &tmp_iface);
if (!ret) {
iface->is_active = 1;
spin_unlock(&ses->iface_lock);
goto next_iface;
} else if (ret < 0) {
/* all remaining ifaces are slower */
kref_get(&iface->refcount);
break;
}
}
spin_unlock(&ses->iface_lock);
/* no match. insert the entry in the list */
info = kmalloc(sizeof(struct cifs_server_iface),
GFP_KERNEL);
if (!info) {
rc = -ENOMEM;
goto out;
}
memcpy(info, &tmp_iface, sizeof(tmp_iface));
/* add this new entry to the list */
kref_init(&info->refcount);
info->is_active = 1;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, ses->iface_count);
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
le32_to_cpu(p->Capability));
spin_lock(&ses->iface_lock);
if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
list_add_tail(&info->iface_head, &iface->iface_head);
kref_put(&iface->refcount, release_iface);
} else
list_add_tail(&info->iface_head, &ses->iface_list);
ses->iface_count++;
spin_unlock(&ses->iface_lock);
next_iface:
nb_iface++; nb_iface++;
next = le32_to_cpu(p->Next); next = le32_to_cpu(p->Next);
if (!next) { if (!next) {
@ -556,137 +690,72 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__); cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
/* if (!ses->iface_count) {
* Second pass: extract info to internal structure
*/
*iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
if (!*iface_list) {
rc = -ENOMEM;
goto out;
}
info = *iface_list;
bytes_left = buf_len;
p = buf;
while (bytes_left >= sizeof(*p)) {
info->speed = le64_to_cpu(p->LinkSpeed);
info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
le32_to_cpu(p->Capability));
switch (p->Family) {
/*
* The kernel and wire socket structures have the same
* layout and use network byte order but make the
* conversion explicit in case either one changes.
*/
case INTERNETWORK:
addr4 = (struct sockaddr_in *)&info->sockaddr;
p4 = (struct iface_info_ipv4 *)p->Buffer;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
addr4->sin_port = cpu_to_be16(CIFS_PORT);
cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
&addr4->sin_addr);
break;
case INTERNETWORKV6:
addr6 = (struct sockaddr_in6 *)&info->sockaddr;
p6 = (struct iface_info_ipv6 *)p->Buffer;
addr6->sin6_family = AF_INET6;
memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
addr6->sin6_flowinfo = 0;
addr6->sin6_scope_id = 0;
addr6->sin6_port = cpu_to_be16(CIFS_PORT);
cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
&addr6->sin6_addr);
break;
default:
cifs_dbg(VFS,
"%s: skipping unsupported socket family\n",
__func__);
goto next_iface;
}
(*iface_count)++;
info++;
next_iface:
next = le32_to_cpu(p->Next);
if (!next)
break;
p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
bytes_left -= next;
}
if (!*iface_count) {
rc = -EINVAL; rc = -EINVAL;
goto out; goto out;
} }
ses->iface_last_update = jiffies;
out: out:
if (rc) { /*
kfree(*iface_list); * Go through the list again and put the inactive entries
*iface_count = 0; */
*iface_list = NULL; spin_lock(&ses->iface_lock);
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head) {
if (!iface->is_active) {
list_del(&iface->iface_head);
kref_put(&iface->refcount, release_iface);
ses->iface_count--;
} }
}
spin_unlock(&ses->iface_lock);
return rc; return rc;
} }
static int compare_iface(const void *ia, const void *ib) int
{ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount)
const struct cifs_server_iface *a = (struct cifs_server_iface *)ia;
const struct cifs_server_iface *b = (struct cifs_server_iface *)ib;
return a->speed == b->speed ? 0 : (a->speed > b->speed ? -1 : 1);
}
static int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
{ {
int rc; int rc;
unsigned int ret_data_len = 0; unsigned int ret_data_len = 0;
struct network_interface_info_ioctl_rsp *out_buf = NULL; struct network_interface_info_ioctl_rsp *out_buf = NULL;
struct cifs_server_iface *iface_list;
size_t iface_count;
struct cifs_ses *ses = tcon->ses; struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *pserver;
/* do not query too frequently */
if (ses->iface_last_update &&
time_before(jiffies, ses->iface_last_update +
(SMB_INTERFACE_POLL_INTERVAL * HZ)))
return 0;
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, FSCTL_QUERY_NETWORK_INTERFACE_INFO,
NULL /* no data input */, 0 /* no data input */, NULL /* no data input */, 0 /* no data input */,
CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
if (rc == -EOPNOTSUPP) { if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI, cifs_dbg(FYI,
"server does not support query network interfaces\n"); "server does not support query network interfaces\n");
goto out; ret_data_len = 0;
} else if (rc != 0) { } else if (rc != 0) {
cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc); cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out; goto out;
} }
rc = parse_server_interfaces(out_buf, ret_data_len, rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount);
&iface_list, &iface_count);
if (rc) if (rc)
goto out; goto out;
/* sort interfaces from fastest to slowest */ /* check if iface is still active */
sort(iface_list, iface_count, sizeof(*iface_list), compare_iface, NULL); spin_lock(&ses->chan_lock);
pserver = ses->chans[0].server;
spin_lock(&ses->iface_lock); if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
kfree(ses->iface_list); spin_unlock(&ses->chan_lock);
ses->iface_list = iface_list; cifs_chan_update_iface(ses, pserver);
ses->iface_count = iface_count; spin_lock(&ses->chan_lock);
ses->iface_last_update = jiffies; }
spin_unlock(&ses->iface_lock); spin_unlock(&ses->chan_lock);
out: out:
kfree(out_buf); kfree(out_buf);
@ -958,7 +1027,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) if (rc)
return; return;
SMB3_request_interfaces(xid, tcon); SMB3_request_interfaces(xid, tcon, true /* called during mount */);
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid, SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_ATTRIBUTE_INFORMATION); FS_ATTRIBUTE_INFORMATION);
@ -1083,9 +1152,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
size_t name_len, value_len, user_name_len; size_t name_len, value_len, user_name_len;
while (src_size > 0) { while (src_size > 0) {
name = &src->ea_data[0];
name_len = (size_t)src->ea_name_length; name_len = (size_t)src->ea_name_length;
value = &src->ea_data[src->ea_name_length + 1];
value_len = (size_t)le16_to_cpu(src->ea_value_length); value_len = (size_t)le16_to_cpu(src->ea_value_length);
if (name_len == 0) if (name_len == 0)
@ -1097,6 +1164,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
goto out; goto out;
} }
name = &src->ea_data[0];
value = &src->ea_data[src->ea_name_length + 1];
if (ea_name) { if (ea_name) {
if (ea_name_len == name_len && if (ea_name_len == name_len &&
memcmp(ea_name, name, name_len) == 0) { memcmp(ea_name, name, name_len) == 0) {
@ -1546,11 +1616,13 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
struct resume_key_req *res_key; struct resume_key_req *res_key;
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, FSCTL_SRV_REQUEST_RESUME_KEY, NULL, 0 /* no input */,
NULL, 0 /* no input */, CIFSMaxBufSize, CIFSMaxBufSize, (char **)&res_key, &ret_data_len);
(char **)&res_key, &ret_data_len);
if (rc) { if (rc == -EOPNOTSUPP) {
pr_warn_once("Server share %s does not support copy range\n", tcon->treeName);
goto req_res_key_exit;
} else if (rc) {
cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc); cifs_tcon_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
goto req_res_key_exit; goto req_res_key_exit;
} }
@ -1687,7 +1759,7 @@ smb2_ioctl_query_info(const unsigned int xid,
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE; rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID, rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer, qi.output_buffer_length, qi.info_type, buffer, qi.output_buffer_length,
CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE); MAX_SMB2_CLOSE_RESPONSE_SIZE);
free_req1_func = SMB2_ioctl_free; free_req1_func = SMB2_ioctl_free;
@ -1863,9 +1935,8 @@ smb2_copychunk_range(const unsigned int xid,
retbuf = NULL; retbuf = NULL;
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
true /* is_fsctl */, (char *)pcchunk, (char *)pcchunk, sizeof(struct copychunk_ioctl),
sizeof(struct copychunk_ioctl), CIFSMaxBufSize, CIFSMaxBufSize, (char **)&retbuf, &ret_data_len);
(char **)&retbuf, &ret_data_len);
if (rc == 0) { if (rc == 0) {
if (ret_data_len != if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) { sizeof(struct copychunk_ioctl_rsp)) {
@ -2025,7 +2096,6 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_SPARSE, cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
true /* is_fctl */,
&setsparse, 1, CIFSMaxBufSize, NULL, NULL); &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
if (rc) { if (rc) {
tcon->broken_sparse_sup = true; tcon->broken_sparse_sup = true;
@ -2096,7 +2166,6 @@ smb2_duplicate_extents(const unsigned int xid,
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid, trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE, FSCTL_DUPLICATE_EXTENTS_TO_FILE,
true /* is_fsctl */,
(char *)&dup_ext_buf, (char *)&dup_ext_buf,
sizeof(struct duplicate_extents_to_file), sizeof(struct duplicate_extents_to_file),
CIFSMaxBufSize, NULL, CIFSMaxBufSize, NULL,
@ -2131,7 +2200,6 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, return SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->fid.volatile_fid,
FSCTL_SET_INTEGRITY_INFORMATION, FSCTL_SET_INTEGRITY_INFORMATION,
true /* is_fsctl */,
(char *)&integr_info, (char *)&integr_info,
sizeof(struct fsctl_set_integrity_information_req), sizeof(struct fsctl_set_integrity_information_req),
CIFSMaxBufSize, NULL, CIFSMaxBufSize, NULL,
@ -2184,7 +2252,6 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS, FSCTL_SRV_ENUMERATE_SNAPSHOTS,
true /* is_fsctl */,
NULL, 0 /* no input data */, max_response_size, NULL, 0 /* no input data */, max_response_size,
(char **)&retbuf, (char **)&retbuf,
&ret_data_len); &ret_data_len);
@ -2513,7 +2580,9 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) {
spin_lock(&tcon->tc_lock);
tcon->need_reconnect = true; tcon->need_reconnect = true;
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
pr_warn_once("Server share %s deleted.\n", pr_warn_once("Server share %s deleted.\n",
tcon->treeName); tcon->treeName);
@ -2889,7 +2958,6 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
do { do {
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_DFS_GET_REFERRALS, FSCTL_DFS_GET_REFERRALS,
true /* is_fsctl */,
(char *)dfs_req, dfs_req_size, CIFSMaxBufSize, (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
(char **)&dfs_rsp, &dfs_rsp_size); (char **)&dfs_rsp, &dfs_rsp_size);
if (!is_retryable_error(rc)) if (!is_retryable_error(rc))
@ -3096,8 +3164,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, server, rc = SMB2_ioctl_init(tcon, server,
&rqst[1], fid.persistent_fid, &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT, fid.volatile_fid, FSCTL_GET_REPARSE_POINT, NULL, 0,
true /* is_fctl */, NULL, 0,
CIFSMaxBufSize - CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE); MAX_SMB2_CLOSE_RESPONSE_SIZE);
@ -3277,8 +3344,7 @@ smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_ioctl_init(tcon, server, rc = SMB2_ioctl_init(tcon, server,
&rqst[1], COMPOUND_FID, &rqst[1], COMPOUND_FID,
COMPOUND_FID, FSCTL_GET_REPARSE_POINT, COMPOUND_FID, FSCTL_GET_REPARSE_POINT, NULL, 0,
true /* is_fctl */, NULL, 0,
CIFSMaxBufSize - CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE); MAX_SMB2_CLOSE_RESPONSE_SIZE);
@ -3552,7 +3618,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len); fsctl_buf.BeyondFinalZero = cpu_to_le64(offset + len);
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, true, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
(char *)&fsctl_buf, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), sizeof(struct file_zero_data_information),
0, NULL, NULL); 0, NULL, NULL);
@ -3614,7 +3680,7 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
true /* is_fctl */, (char *)&fsctl_buf, (char *)&fsctl_buf,
sizeof(struct file_zero_data_information), sizeof(struct file_zero_data_information),
CIFSMaxBufSize, NULL, NULL); CIFSMaxBufSize, NULL, NULL);
free_xid(xid); free_xid(xid);
@ -3672,7 +3738,7 @@ static int smb3_simple_fallocate_range(unsigned int xid,
in_data.length = cpu_to_le64(len); in_data.length = cpu_to_le64(len);
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->fid.volatile_fid,
FSCTL_QUERY_ALLOCATED_RANGES, true, FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data), (char *)&in_data, sizeof(in_data),
1024 * sizeof(struct file_allocated_range_buffer), 1024 * sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len); (char **)&out_data, &out_data_len);
@ -3913,7 +3979,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->fid.volatile_fid,
FSCTL_QUERY_ALLOCATED_RANGES, true, FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data), (char *)&in_data, sizeof(in_data),
sizeof(struct file_allocated_range_buffer), sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len); (char **)&out_data, &out_data_len);
@ -3972,7 +4038,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->fid.volatile_fid,
FSCTL_QUERY_ALLOCATED_RANGES, true, FSCTL_QUERY_ALLOCATED_RANGES,
(char *)&in_data, sizeof(in_data), (char *)&in_data, sizeof(in_data),
1024 * sizeof(struct file_allocated_range_buffer), 1024 * sizeof(struct file_allocated_range_buffer),
(char **)&out_data, &out_data_len); (char **)&out_data, &out_data_len);
@ -4393,21 +4459,25 @@ static void *smb2_get_aead_req(struct crypto_aead *tfm, const struct smb_rqst *r
static int static int
smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
{ {
struct TCP_Server_Info *pserver;
struct cifs_ses *ses; struct cifs_ses *ses;
u8 *ses_enc_key; u8 *ses_enc_key;
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id) { if (ses->Suid == ses_id) {
spin_lock(&ses->ses_lock);
ses_enc_key = enc ? ses->smb3encryptionkey : ses_enc_key = enc ? ses->smb3encryptionkey :
ses->smb3decryptionkey; ses->smb3decryptionkey;
memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE); memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
spin_unlock(&ses->ses_lock);
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return 0; return 0;
} }
} }
}
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN; return -EAGAIN;
@ -4429,7 +4499,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
int rc = 0; int rc = 0;
struct scatterlist *sg; struct scatterlist *sg;
u8 sign[SMB2_SIGNATURE_SIZE] = {}; u8 sign[SMB2_SIGNATURE_SIZE] = {};
u8 key[SMB3_SIGN_KEY_SIZE]; u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req; struct aead_request *req;
u8 *iv; u8 *iv;
DECLARE_CRYPTO_WAIT(wait); DECLARE_CRYPTO_WAIT(wait);
@ -4439,8 +4509,8 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key); rc = smb2_get_enc_key(server, le64_to_cpu(tr_hdr->SessionId), enc, key);
if (rc) { if (rc) {
cifs_server_dbg(VFS, "%s: Could not get %scryption key\n", __func__, cifs_server_dbg(FYI, "%s: Could not get %scryption key. sid: 0x%llx\n", __func__,
enc ? "en" : "de"); enc ? "en" : "de", le64_to_cpu(tr_hdr->SessionId));
return rc; return rc;
} }
@ -4453,10 +4523,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
tfm = enc ? server->secmech.ccmaesencrypt : tfm = enc ? server->secmech.ccmaesencrypt :
server->secmech.ccmaesdecrypt; server->secmech.ccmaesdecrypt;
if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM) if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
else else
rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE); rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
if (rc) { if (rc) {
cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc); cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
@ -4723,7 +4794,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (server->ops->is_session_expired && if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) { server->ops->is_session_expired(buf)) {
if (!is_offloaded) if (!is_offloaded)
cifs_reconnect(server); cifs_reconnect(server, true);
return -1; return -1;
} }
@ -4896,17 +4967,21 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->callback(mid); mid->callback(mid);
} else { } else {
spin_lock(&GlobalMid_Lock); spin_lock(&dw->server->srv_lock);
if (dw->server->tcpStatus == CifsNeedReconnect) { if (dw->server->tcpStatus == CifsNeedReconnect) {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_RETRY_NEEDED; mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&GlobalMid_Lock); spin_unlock(&dw->server->mid_lock);
spin_unlock(&dw->server->srv_lock);
mid->callback(mid); mid->callback(mid);
} else { } else {
spin_lock(&dw->server->mid_lock);
mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_state = MID_REQUEST_SUBMITTED;
mid->mid_flags &= ~(MID_DELETED); mid->mid_flags &= ~(MID_DELETED);
list_add_tail(&mid->qhead, list_add_tail(&mid->qhead,
&dw->server->pending_mid_q); &dw->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&dw->server->mid_lock);
spin_unlock(&dw->server->srv_lock);
} }
} }
release_mid(mid); release_mid(mid);
@ -5138,13 +5213,13 @@ smb3_receive_transform(struct TCP_Server_Info *server,
sizeof(struct smb2_hdr)) { sizeof(struct smb2_hdr)) {
cifs_server_dbg(VFS, "Transform message is too small (%u)\n", cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
pdu_length); pdu_length);
cifs_reconnect(server); cifs_reconnect(server, true);
return -ECONNABORTED; return -ECONNABORTED;
} }
if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) { if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
cifs_server_dbg(VFS, "Transform message is broken\n"); cifs_server_dbg(VFS, "Transform message is broken\n");
cifs_reconnect(server); cifs_reconnect(server, true);
return -ECONNABORTED; return -ECONNABORTED;
} }

View File

@ -143,9 +143,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct TCP_Server_Info *server) struct TCP_Server_Info *server)
{ {
int rc = 0; int rc = 0;
struct nls_table *nls_codepage; struct nls_table *nls_codepage = NULL;
struct cifs_ses *ses; struct cifs_ses *ses;
int retries;
/* /*
* SMB2s NegProt, SessSetup, Logoff do not have tcon yet so * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
@ -162,7 +161,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0; return 0;
if (tcon->tidStatus == CifsExiting) { spin_lock(&tcon->tc_lock);
if (tcon->status == TID_EXITING) {
/* /*
* only tree disconnect, open, and write, * only tree disconnect, open, and write,
* (and ulogoff which does not have tcon) * (and ulogoff which does not have tcon)
@ -171,24 +171,28 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if ((smb2_command != SMB2_WRITE) && if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) && (smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) { (smb2_command != SMB2_TREE_DISCONNECT)) {
spin_unlock(&tcon->tc_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n", cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command); smb2_command);
return -ENODEV; return -ENODEV;
} }
} }
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) || spin_unlock(&tcon->tc_lock);
(!tcon->ses->server) || !server)
return -EIO;
ses = tcon->ses; ses = tcon->ses;
retries = server->nr_targets; if (!ses)
return -EIO;
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
return -EIO;
}
spin_unlock(&ses->ses_lock);
if (!ses->server || !server)
return -EIO;
/* spin_lock(&server->srv_lock);
* Give demultiplex thread up to 10 seconds to each target available for if (server->tcpStatus == CifsNeedReconnect) {
* reconnect -- should be greater than cifs socket timeout which is 7
* seconds.
*/
while (server->tcpStatus == CifsNeedReconnect) {
/* /*
* Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
* here since they are implicitly done when session drops. * here since they are implicitly done when session drops.
@ -201,100 +205,100 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
case SMB2_CANCEL: case SMB2_CANCEL:
case SMB2_CLOSE: case SMB2_CLOSE:
case SMB2_OPLOCK_BREAK: case SMB2_OPLOCK_BREAK:
spin_unlock(&server->srv_lock);
return -EAGAIN; return -EAGAIN;
} }
rc = wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
__func__);
return -ERESTARTSYS;
} }
spin_unlock(&server->srv_lock);
/* are we still trying to reconnect? */ again:
if (server->tcpStatus != CifsNeedReconnect) rc = cifs_wait_for_server_reconnect(server, tcon->retry);
break; if (rc)
return rc;
if (retries && --retries) spin_lock(&ses->chan_lock);
continue; if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
spin_unlock(&ses->chan_lock);
/*
* on "soft" mounts we wait once. Hard mounts keep
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry) {
cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
return -EHOSTDOWN;
}
retries = server->nr_targets;
}
if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
return 0; return 0;
}
spin_unlock(&ses->chan_lock);
cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
tcon->ses->chans_need_reconnect,
tcon->need_reconnect);
nls_codepage = load_nls_default(); mutex_lock(&ses->session_mutex);
/*
* need to prevent multiple threads trying to simultaneously reconnect
* the same SMB session
*/
mutex_lock(&tcon->ses->session_mutex);
/* /*
* Recheck after acquire mutex. If another thread is negotiating * Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed * and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect. * and tcpStatus set to reconnect.
*/ */
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
mutex_unlock(&ses->session_mutex);
if (tcon->retry)
goto again;
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
mutex_unlock(&tcon->ses->session_mutex);
goto out; goto out;
} }
spin_unlock(&server->srv_lock);
nls_codepage = load_nls_default();
/* /*
* If we are reconnecting an extra channel, bind * need to prevent multiple threads trying to simultaneously
* reconnect the same SMB session
*/ */
if (CIFS_SERVER_IS_CHAN(server)) { spin_lock(&ses->ses_lock);
ses->binding = true; spin_lock(&ses->chan_lock);
ses->binding_chan = cifs_ses_find_chan(ses, server); if (!cifs_chan_needs_reconnect(ses, server) &&
} ses->ses_status == SES_GOOD) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
/* this means that we only need to tree connect */
if (tcon->need_reconnect)
goto skip_sess_setup;
rc = cifs_negotiate_protocol(0, tcon->ses); mutex_unlock(&ses->session_mutex);
if (!rc && tcon->ses->need_reconnect) { goto out;
rc = cifs_setup_session(0, tcon->ses, nls_codepage); }
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses, server);
if (!rc) {
rc = cifs_setup_session(0, ses, server, nls_codepage);
if ((rc == -EACCES) && !tcon->retry) { if ((rc == -EACCES) && !tcon->retry) {
mutex_unlock(&ses->session_mutex);
rc = -EHOSTDOWN; rc = -EHOSTDOWN;
ses->binding = false;
ses->binding_chan = NULL;
mutex_unlock(&tcon->ses->session_mutex);
goto failed; goto failed;
} else if (rc) {
mutex_unlock(&ses->session_mutex);
goto out;
} }
} } else {
/* mutex_unlock(&ses->session_mutex);
* End of channel binding
*/
ses->binding = false;
ses->binding_chan = NULL;
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out; goto out;
} }
skip_sess_setup:
if (!tcon->need_reconnect) {
mutex_unlock(&ses->session_mutex);
goto out;
}
cifs_mark_open_files_invalid(tcon); cifs_mark_open_files_invalid(tcon);
if (tcon->use_persistent) if (tcon->use_persistent)
tcon->need_reopen_files = true; tcon->need_reopen_files = true;
rc = cifs_tree_connect(0, tcon, nls_codepage); rc = cifs_tree_connect(0, tcon, nls_codepage);
mutex_unlock(&tcon->ses->session_mutex); mutex_unlock(&ses->session_mutex);
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) { if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */ /* If sess reconnected but tcon didn't, something strange ... */
pr_warn_once("reconnect tcon failed rc = %d\n", rc); cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
goto out; goto out;
} }
@ -316,7 +320,6 @@ out:
case SMB2_READ: case SMB2_READ:
case SMB2_WRITE: case SMB2_WRITE:
case SMB2_LOCK: case SMB2_LOCK:
case SMB2_IOCTL:
case SMB2_QUERY_DIRECTORY: case SMB2_QUERY_DIRECTORY:
case SMB2_CHANGE_NOTIFY: case SMB2_CHANGE_NOTIFY:
case SMB2_QUERY_INFO: case SMB2_QUERY_INFO:
@ -334,7 +337,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
void *buf, void *buf,
unsigned int *total_len) unsigned int *total_len)
{ {
struct smb2_pdu *spdu = (struct smb2_pdu *)buf; struct smb2_pdu *spdu = buf;
/* lookup word count ie StructureSize from table */ /* lookup word count ie StructureSize from table */
__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
@ -522,9 +525,10 @@ static void
assemble_neg_contexts(struct smb2_negotiate_req *req, assemble_neg_contexts(struct smb2_negotiate_req *req,
struct TCP_Server_Info *server, unsigned int *total_len) struct TCP_Server_Info *server, unsigned int *total_len)
{ {
char *pneg_ctxt;
char *hostname = NULL;
unsigned int ctxt_len, neg_context_count; unsigned int ctxt_len, neg_context_count;
struct TCP_Server_Info *pserver;
char *pneg_ctxt;
char *hostname;
if (*total_len > 200) { if (*total_len > 200) {
/* In case length corrupted don't want to overrun smb buffer */ /* In case length corrupted don't want to overrun smb buffer */
@ -555,8 +559,9 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
* secondary channels don't have the hostname field populated * secondary channels don't have the hostname field populated
* use the hostname field in the primary channel instead * use the hostname field in the primary channel instead
*/ */
hostname = CIFS_SERVER_IS_CHAN(server) ? pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
server->primary_server->hostname : server->hostname; cifs_server_lock(pserver);
hostname = pserver->hostname;
if (hostname && (hostname[0] != 0)) { if (hostname && (hostname[0] != 0)) {
ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt, ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
hostname); hostname);
@ -565,6 +570,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
neg_context_count = 3; neg_context_count = 3;
} else } else
neg_context_count = 2; neg_context_count = 2;
cifs_server_unlock(pserver);
build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
*total_len += sizeof(struct smb2_posix_neg_context); *total_len += sizeof(struct smb2_posix_neg_context);
@ -843,7 +849,9 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
*/ */
int int
SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) SMB2_negotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
struct smb_rqst rqst; struct smb_rqst rqst;
struct smb2_negotiate_req *req; struct smb2_negotiate_req *req;
@ -852,7 +860,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
struct kvec rsp_iov; struct kvec rsp_iov;
int rc = 0; int rc = 0;
int resp_buftype; int resp_buftype;
struct TCP_Server_Info *server = cifs_ses_server(ses);
int blob_offset, blob_length; int blob_offset, blob_length;
char *security_blob; char *security_blob;
int flags = CIFS_NEG_OP; int flags = CIFS_NEG_OP;
@ -943,16 +950,17 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
} else if (rc != 0) } else if (rc != 0)
goto neg_exit; goto neg_exit;
rc = -EIO;
if (strcmp(server->vals->version_string, if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) { SMB3ANY_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_server_dbg(VFS, cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n"); "SMB2 dialect returned but not requested\n");
return -EIO; goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
cifs_server_dbg(VFS, cifs_server_dbg(VFS,
"SMB2.1 dialect returned but not requested\n"); "SMB2.1 dialect returned but not requested\n");
return -EIO; goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) { } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
/* ops set to 3.0 by default for default so update */ /* ops set to 3.0 by default for default so update */
server->ops = &smb311_operations; server->ops = &smb311_operations;
@ -963,7 +971,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) { if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_server_dbg(VFS, cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n"); "SMB2 dialect returned but not requested\n");
return -EIO; goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) { } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
/* ops set to 3.0 by default for default so update */ /* ops set to 3.0 by default for default so update */
server->ops = &smb21_operations; server->ops = &smb21_operations;
@ -977,7 +985,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* if requested single dialect ensure returned dialect matched */ /* if requested single dialect ensure returned dialect matched */
cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n", cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
le16_to_cpu(rsp->DialectRevision)); le16_to_cpu(rsp->DialectRevision));
return -EIO; goto neg_exit;
} }
cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode); cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
@ -995,9 +1003,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else { else {
cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n", cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision)); le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit; goto neg_exit;
} }
rc = 0;
server->dialect = le16_to_cpu(rsp->DialectRevision); server->dialect = le16_to_cpu(rsp->DialectRevision);
/* /*
@ -1144,7 +1153,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
} }
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, FSCTL_VALIDATE_NEGOTIATE_INFO,
(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize, (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
(char **)&pneg_rsp, &rsplen); (char **)&pneg_rsp, &rsplen);
if (rc == -EOPNOTSUPP) { if (rc == -EOPNOTSUPP) {
@ -1224,6 +1233,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
struct SMB2_sess_data { struct SMB2_sess_data {
unsigned int xid; unsigned int xid;
struct cifs_ses *ses; struct cifs_ses *ses;
struct TCP_Server_Info *server;
struct nls_table *nls_cp; struct nls_table *nls_cp;
void (*func)(struct SMB2_sess_data *); void (*func)(struct SMB2_sess_data *);
int result; int result;
@ -1245,9 +1255,10 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
{ {
int rc; int rc;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_req *req; struct smb2_sess_setup_req *req;
struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len; unsigned int total_len;
bool is_binding = false;
rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server, rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
(void **) &req, (void **) &req,
@ -1255,11 +1266,16 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
if (rc) if (rc)
return rc; return rc;
if (sess_data->ses->binding) { spin_lock(&ses->ses_lock);
req->hdr.SessionId = cpu_to_le64(sess_data->ses->Suid); is_binding = (ses->ses_status == SES_GOOD);
spin_unlock(&ses->ses_lock);
if (is_binding) {
req->hdr.SessionId = cpu_to_le64(ses->Suid);
req->hdr.Flags |= SMB2_FLAGS_SIGNED; req->hdr.Flags |= SMB2_FLAGS_SIGNED;
req->PreviousSessionId = 0; req->PreviousSessionId = 0;
req->Flags = SMB2_SESSION_REQ_FLAG_BINDING; req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
} else { } else {
/* First session, not a reauthenticate */ /* First session, not a reauthenticate */
req->hdr.SessionId = 0; req->hdr.SessionId = 0;
@ -1269,6 +1285,8 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
*/ */
req->PreviousSessionId = cpu_to_le64(sess_data->previous_session); req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
req->Flags = 0; /* MBZ */ req->Flags = 0; /* MBZ */
cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
sess_data->previous_session);
} }
/* enough to enable echos and oplocks and one max size write */ /* enough to enable echos and oplocks and one max size write */
@ -1328,7 +1346,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* BB add code to build os and lm fields */ /* BB add code to build os and lm fields */
rc = cifs_send_recv(sess_data->xid, sess_data->ses, rc = cifs_send_recv(sess_data->xid, sess_data->ses,
cifs_ses_server(sess_data->ses), sess_data->server,
&rqst, &rqst,
&sess_data->buf0_type, &sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov); CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
@ -1343,15 +1361,15 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
{ {
int rc = 0; int rc = 0;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = cifs_ses_server(ses); struct TCP_Server_Info *server = sess_data->server;
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
if (server->ops->generate_signingkey) { if (server->ops->generate_signingkey) {
rc = server->ops->generate_signingkey(ses); rc = server->ops->generate_signingkey(ses, server);
if (rc) { if (rc) {
cifs_dbg(FYI, cifs_dbg(FYI,
"SMB3 session key generation failed\n"); "SMB3 session key generation failed\n");
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
return rc; return rc;
} }
} }
@ -1359,17 +1377,9 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
server->sequence_number = 0x2; server->sequence_number = 0x2;
server->session_estab = true; server->session_estab = true;
} }
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
cifs_dbg(FYI, "SMB2/3 session established successfully\n"); cifs_dbg(FYI, "SMB2/3 session established successfully\n");
/* keep existing ses state if binding */
if (!ses->binding) {
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
return rc; return rc;
} }
@ -1379,15 +1389,17 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
{ {
int rc; int rc;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
struct cifs_spnego_msg *msg; struct cifs_spnego_msg *msg;
struct key *spnego_key = NULL; struct key *spnego_key = NULL;
struct smb2_sess_setup_rsp *rsp = NULL; struct smb2_sess_setup_rsp *rsp = NULL;
bool is_binding = false;
rc = SMB2_sess_alloc_buffer(sess_data); rc = SMB2_sess_alloc_buffer(sess_data);
if (rc) if (rc)
goto out; goto out;
spnego_key = cifs_get_spnego_key(ses); spnego_key = cifs_get_spnego_key(ses, server);
if (IS_ERR(spnego_key)) { if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key); rc = PTR_ERR(spnego_key);
if (rc == -ENOKEY) if (rc == -ENOKEY)
@ -1408,8 +1420,12 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key; goto out_put_spnego_key;
} }
spin_lock(&ses->ses_lock);
is_binding = (ses->ses_status == SES_GOOD);
spin_unlock(&ses->ses_lock);
/* keep session key if binding */ /* keep session key if binding */
if (!ses->binding) { if (!is_binding) {
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len, ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL); GFP_KERNEL);
if (!ses->auth_key.response) { if (!ses->auth_key.response) {
@ -1430,7 +1446,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
/* keep session id and flags if binding */ /* keep session id and flags if binding */
if (!ses->binding) { if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags); ses->session_flags = le16_to_cpu(rsp->SessionFlags);
} }
@ -1462,10 +1478,12 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
{ {
int rc; int rc;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_rsp *rsp = NULL; struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL; unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */ bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0; u16 blob_length = 0;
bool is_binding = false;
/* /*
* If memory allocation is successful, caller of this function * If memory allocation is successful, caller of this function
@ -1483,7 +1501,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
goto out_err; goto out_err;
rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob, rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
&blob_length, ses, &blob_length, ses, server,
sess_data->nls_cp); sess_data->nls_cp);
if (rc) if (rc)
goto out; goto out;
@ -1522,8 +1540,12 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n"); cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
spin_lock(&ses->ses_lock);
is_binding = (ses->ses_status == SES_GOOD);
spin_unlock(&ses->ses_lock);
/* keep existing ses id and flags if binding */ /* keep existing ses id and flags if binding */
if (!ses->binding) { if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags); ses->session_flags = le16_to_cpu(rsp->SessionFlags);
} }
@ -1548,11 +1570,13 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
{ {
int rc; int rc;
struct cifs_ses *ses = sess_data->ses; struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_req *req; struct smb2_sess_setup_req *req;
struct smb2_sess_setup_rsp *rsp = NULL; struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL; unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */ bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0; u16 blob_length = 0;
bool is_binding = false;
rc = SMB2_sess_alloc_buffer(sess_data); rc = SMB2_sess_alloc_buffer(sess_data);
if (rc) if (rc)
@ -1561,7 +1585,8 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base; req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
req->hdr.SessionId = cpu_to_le64(ses->Suid); req->hdr.SessionId = cpu_to_le64(ses->Suid);
rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses, rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
ses, server,
sess_data->nls_cp); sess_data->nls_cp);
if (rc) { if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc); cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
@ -1583,8 +1608,12 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base; rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
spin_lock(&ses->ses_lock);
is_binding = (ses->ses_status == SES_GOOD);
spin_unlock(&ses->ses_lock);
/* keep existing ses id and flags if binding */ /* keep existing ses id and flags if binding */
if (!ses->binding) { if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId); ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags); ses->session_flags = le16_to_cpu(rsp->SessionFlags);
} }
@ -1615,11 +1644,13 @@ out:
} }
static int static int
SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data) SMB2_select_sec(struct SMB2_sess_data *sess_data)
{ {
int type; int type;
struct cifs_ses *ses = sess_data->ses;
struct TCP_Server_Info *server = sess_data->server;
type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype); type = smb2_select_sectype(server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type); cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) { if (type == Unspecified) {
cifs_dbg(VFS, "Unable to select appropriate authentication method!\n"); cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
@ -1643,10 +1674,10 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
int int
SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp) const struct nls_table *nls_cp)
{ {
int rc = 0; int rc = 0;
struct TCP_Server_Info *server = cifs_ses_server(ses);
struct SMB2_sess_data *sess_data; struct SMB2_sess_data *sess_data;
cifs_dbg(FYI, "Session Setup\n"); cifs_dbg(FYI, "Session Setup\n");
@ -1660,15 +1691,17 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
if (!sess_data) if (!sess_data)
return -ENOMEM; return -ENOMEM;
rc = SMB2_select_sec(ses, sess_data);
if (rc)
goto out;
sess_data->xid = xid; sess_data->xid = xid;
sess_data->ses = ses; sess_data->ses = ses;
sess_data->server = server;
sess_data->buf0_type = CIFS_NO_BUFFER; sess_data->buf0_type = CIFS_NO_BUFFER;
sess_data->nls_cp = (struct nls_table *) nls_cp; sess_data->nls_cp = (struct nls_table *) nls_cp;
sess_data->previous_session = ses->Suid; sess_data->previous_session = ses->Suid;
rc = SMB2_select_sec(sess_data);
if (rc)
goto out;
/* /*
* Initialize the session hash with the server one. * Initialize the session hash with the server one.
*/ */
@ -1707,8 +1740,12 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
return -EIO; return -EIO;
/* no need to send SMB logoff if uid already closed due to reconnect */ /* no need to send SMB logoff if uid already closed due to reconnect */
if (ses->need_reconnect) spin_lock(&ses->chan_lock);
if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
spin_unlock(&ses->chan_lock);
goto smb2_session_already_dead; goto smb2_session_already_dead;
}
spin_unlock(&ses->chan_lock);
rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server, rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
(void **) &req, &total_len); (void **) &req, &total_len);
@ -1869,8 +1906,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
tcon->share_flags = le32_to_cpu(rsp->ShareFlags); tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */ tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess); tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId); tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName)); strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
@ -1915,8 +1950,13 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if (!ses || !(ses->server)) if (!ses || !(ses->server))
return -EIO; return -EIO;
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect)) spin_lock(&ses->chan_lock);
if ((tcon->need_reconnect) ||
(CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
spin_unlock(&ses->chan_lock);
return 0; return 0;
}
spin_unlock(&ses->chan_lock);
close_shroot_lease(&tcon->crfid); close_shroot_lease(&tcon->crfid);
@ -3010,7 +3050,7 @@ int
SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server, SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
struct smb_rqst *rqst, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode, u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen, char *in_data, u32 indatalen,
__u32 max_response_size) __u32 max_response_size)
{ {
struct smb2_ioctl_req *req; struct smb2_ioctl_req *req;
@ -3085,10 +3125,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
req->hdr.CreditCharge = req->hdr.CreditCharge =
cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
SMB2_MAX_BUFFER_SIZE)); SMB2_MAX_BUFFER_SIZE));
if (is_fsctl) /* always an FSCTL (for now) */
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
req->Flags = 0;
/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
@ -3115,9 +3153,9 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
*/ */
int int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid, u32 opcode, bool is_fsctl, u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
char *in_data, u32 indatalen, u32 max_out_data_len, u32 max_out_data_len, char **out_data,
char **out_data, u32 *plen /* returned data len */) u32 *plen /* returned data len */)
{ {
struct smb_rqst rqst; struct smb_rqst rqst;
struct smb2_ioctl_rsp *rsp = NULL; struct smb2_ioctl_rsp *rsp = NULL;
@ -3159,7 +3197,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rc = SMB2_ioctl_init(tcon, server, rc = SMB2_ioctl_init(tcon, server,
&rqst, persistent_fid, volatile_fid, opcode, &rqst, persistent_fid, volatile_fid, opcode,
is_fsctl, in_data, indatalen, max_out_data_len); in_data, indatalen, max_out_data_len);
if (rc) if (rc)
goto ioctl_exit; goto ioctl_exit;
@ -3251,7 +3289,7 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
cpu_to_le16(COMPRESSION_FORMAT_DEFAULT); cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */, FSCTL_SET_COMPRESSION,
(char *)&fsctl_input /* data input */, (char *)&fsctl_input /* data input */,
2 /* in data len */, CIFSMaxBufSize /* max out data */, 2 /* in data len */, CIFSMaxBufSize /* max out data */,
&ret_data /* out data */, NULL); &ret_data /* out data */, NULL);
@ -3739,27 +3777,41 @@ void smb2_reconnect_server(struct work_struct *work)
{ {
struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, reconnect.work); struct TCP_Server_Info, reconnect.work);
struct cifs_ses *ses; struct TCP_Server_Info *pserver;
struct cifs_ses *ses, *ses2;
struct cifs_tcon *tcon, *tcon2; struct cifs_tcon *tcon, *tcon2;
struct list_head tmp_list; struct list_head tmp_list, tmp_ses_list;
int tcon_exist = false; bool tcon_exist = false, ses_exist = false;
bool tcon_selected = false;
int rc; int rc;
int resched = false; bool resched = false;
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
mutex_lock(&server->reconnect_mutex); mutex_lock(&pserver->reconnect_mutex);
INIT_LIST_HEAD(&tmp_list); INIT_LIST_HEAD(&tmp_list);
cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); INIT_LIST_HEAD(&tmp_ses_list);
cifs_dbg(FYI, "Reconnecting tcons and channels\n");
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
continue;
}
spin_unlock(&ses->ses_lock);
tcon_selected = false;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) { if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++; tcon->tc_count++;
list_add_tail(&tcon->rlist, &tmp_list); list_add_tail(&tcon->rlist, &tmp_list);
tcon_exist = true; tcon_selected = tcon_exist = true;
} }
} }
/* /*
@ -3768,15 +3820,27 @@ void smb2_reconnect_server(struct work_struct *work)
*/ */
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) { if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list); list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
tcon_exist = true; tcon_selected = tcon_exist = true;
ses->ses_count++; ses->ses_count++;
} }
/*
* handle the case where channel needs to reconnect
* binding session, but tcon is healthy (some other channel
* is active)
*/
spin_lock(&ses->chan_lock);
if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
list_add_tail(&ses->rlist, &tmp_ses_list);
ses_exist = true;
ses->ses_count++;
}
spin_unlock(&ses->chan_lock);
} }
/* /*
* Get the reference to server struct to be sure that the last call of * Get the reference to server struct to be sure that the last call of
* cifs_put_tcon() in the loop below won't release the server pointer. * cifs_put_tcon() in the loop below won't release the server pointer.
*/ */
if (tcon_exist) if (tcon_exist || ses_exist)
server->srv_count++; server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock); spin_unlock(&cifs_tcp_ses_lock);
@ -3794,13 +3858,43 @@ void smb2_reconnect_server(struct work_struct *work)
cifs_put_tcon(tcon); cifs_put_tcon(tcon);
} }
cifs_dbg(FYI, "Reconnecting tcons finished\n"); if (!ses_exist)
goto done;
/* allocate a dummy tcon struct used for reconnect */
tcon = tconInfoAlloc();
if (!tcon) {
resched = true;
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
list_del_init(&ses->rlist);
cifs_put_smb_ses(ses);
}
goto done;
}
tcon->status = TID_GOOD;
tcon->retry = false;
tcon->need_reconnect = false;
/* now reconnect sessions for necessary channels */
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
tcon->ses = ses;
rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
if (rc)
resched = true;
list_del_init(&ses->rlist);
cifs_put_smb_ses(ses);
}
tconInfoFree(tcon);
done:
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
if (resched) if (resched)
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ); queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
mutex_unlock(&server->reconnect_mutex); mutex_unlock(&pserver->reconnect_mutex);
/* now we can safely release srv struct */ /* now we can safely release srv struct */
if (tcon_exist) if (tcon_exist || ses_exist)
cifs_put_tcp_session(server, 1); cifs_put_tcp_session(server, 1);
} }
@ -3814,13 +3908,17 @@ SMB2_echo(struct TCP_Server_Info *server)
.rq_nvec = 1 }; .rq_nvec = 1 };
unsigned int total_len; unsigned int total_len;
cifs_dbg(FYI, "In echo request\n"); cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
if (server->tcpStatus == CifsNeedNegotiate) { spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
spin_unlock(&server->srv_lock);
/* No need to send echo on newly established connections */ /* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0); mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc; return rc;
} }
spin_unlock(&server->srv_lock);
rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
(void **)&req, &total_len); (void **)&req, &total_len);

View File

@ -23,7 +23,7 @@ struct smb_rqst;
extern int map_smb2_to_linux_error(char *buf, bool log_err); extern int map_smb2_to_linux_error(char *buf, bool log_err);
extern int smb2_check_message(char *buf, unsigned int length, extern int smb2_check_message(char *buf, unsigned int length,
struct TCP_Server_Info *server); struct TCP_Server_Info *server);
extern unsigned int smb2_calc_size(void *buf, struct TCP_Server_Info *server); extern unsigned int smb2_calc_size(void *buf);
extern char *smb2_get_data_area_len(int *off, int *len, extern char *smb2_get_data_area_len(int *off, int *len,
struct smb2_hdr *shdr); struct smb2_hdr *shdr);
extern __le16 *cifs_convert_path_to_utf16(const char *from, extern __le16 *cifs_convert_path_to_utf16(const char *from,
@ -37,8 +37,6 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
struct smb_rqst *rqst); struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request( extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst); struct TCP_Server_Info *server, struct smb_rqst *rqst);
extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
__u64 ses_id);
extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
__u64 ses_id, __u32 tid); __u64 ses_id, __u32 tid);
extern int smb2_calc_signature(struct smb_rqst *rqst, extern int smb2_calc_signature(struct smb_rqst *rqst,
@ -119,8 +117,11 @@ extern void smb2_set_related(struct smb_rqst *rqst);
* SMB2 Worker functions - most of protocol specific implementation details * SMB2 Worker functions - most of protocol specific implementation details
* are contained within these calls. * are contained within these calls.
*/ */
extern int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses); extern int SMB2_negotiate(const unsigned int xid,
struct cifs_ses *ses,
struct TCP_Server_Info *server);
extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct nls_table *nls_cp); const struct nls_table *nls_cp);
extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses); extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses);
extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
@ -140,13 +141,13 @@ extern int SMB2_open_init(struct cifs_tcon *tcon,
extern void SMB2_open_free(struct smb_rqst *rqst); extern void SMB2_open_free(struct smb_rqst *rqst);
extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode, u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen, char *in_data, u32 indatalen, u32 maxoutlen,
char **out_data, u32 *plen /* returned data len */); char **out_data, u32 *plen /* returned data len */);
extern int SMB2_ioctl_init(struct cifs_tcon *tcon, extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
struct TCP_Server_Info *server, struct TCP_Server_Info *server,
struct smb_rqst *rqst, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode, u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen, char *in_data, u32 indatalen,
__u32 max_response_size); __u32 max_response_size);
extern void SMB2_ioctl_free(struct smb_rqst *rqst); extern void SMB2_ioctl_free(struct smb_rqst *rqst);
extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
@ -274,6 +275,7 @@ extern void smb2_copy_fs_info_to_kstatfs(
struct kstatfs *kst); struct kstatfs *kst);
extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server); extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses, extern int smb311_update_preauth_hash(struct cifs_ses *ses,
struct TCP_Server_Info *server,
struct kvec *iov, int nvec); struct kvec *iov, int nvec);
extern int smb2_query_info_compound(const unsigned int xid, extern int smb2_query_info_compound(const unsigned int xid,
struct cifs_tcon *tcon, struct cifs_tcon *tcon,

View File

@ -78,35 +78,44 @@ err:
static static
int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key) int smb3_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
{ {
struct cifs_chan *chan; struct cifs_chan *chan;
struct TCP_Server_Info *pserver;
struct cifs_ses *ses = NULL; struct cifs_ses *ses = NULL;
struct TCP_Server_Info *it = NULL;
int i; int i;
int rc = 0; int rc = 0;
bool is_binding = false;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(it, &cifs_tcp_ses_list, tcp_ses_list) { /* If server is a channel, select the primary channel */
list_for_each_entry(ses, &it->smb_ses_list, smb_ses_list) { pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
if (ses->Suid == ses_id) if (ses->Suid == ses_id)
goto found; goto found;
} }
} cifs_server_dbg(FYI, "%s: Could not find session 0x%llx\n",
cifs_server_dbg(VFS, "%s: Could not find session 0x%llx\n",
__func__, ses_id); __func__, ses_id);
rc = -ENOENT; rc = -ENOENT;
goto out; goto out;
found: found:
if (ses->binding) { spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
is_binding = (cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD);
if (is_binding) {
/* /*
* If we are in the process of binding a new channel * If we are in the process of binding a new channel
* to an existing session, use the master connection * to an existing session, use the master connection
* session key * session key
*/ */
memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE); memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
goto out; goto out;
} }
@ -118,9 +127,13 @@ found:
chan = ses->chans + i; chan = ses->chans + i;
if (chan->server == server) { if (chan->server == server) {
memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE); memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
goto out; goto out;
} }
} }
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
cifs_dbg(VFS, cifs_dbg(VFS,
"%s: Could not find channel signing key for session 0x%llx\n", "%s: Could not find channel signing key for session 0x%llx\n",
@ -144,23 +157,55 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
if (ses->Suid != ses_id) if (ses->Suid != ses_id)
continue; continue;
spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_EXITING) {
spin_unlock(&ses->ses_lock);
continue;
}
++ses->ses_count; ++ses->ses_count;
spin_unlock(&ses->ses_lock);
return ses; return ses;
} }
return NULL; return NULL;
} }
struct cifs_ses * static int smb2_get_sign_key(struct TCP_Server_Info *server,
smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id) __u64 ses_id, u8 *key)
{ {
struct cifs_ses *ses; struct cifs_ses *ses;
int rc = -ENOENT;
if (CIFS_SERVER_IS_CHAN(server))
server = server->primary_server;
spin_lock(&cifs_tcp_ses_lock); spin_lock(&cifs_tcp_ses_lock);
ses = smb2_find_smb_ses_unlocked(server, ses_id); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
spin_unlock(&cifs_tcp_ses_lock); if (ses->Suid != ses_id)
continue;
return ses; rc = 0;
spin_lock(&ses->ses_lock);
switch (ses->ses_status) {
case SES_EXITING: /* SMB2_LOGOFF */
case SES_GOOD:
if (likely(ses->auth_key.response)) {
memcpy(key, ses->auth_key.response,
SMB2_NTLMV2_SESSKEY_SIZE);
} else {
rc = -EIO;
}
break;
default:
rc = -EAGAIN;
break;
}
spin_unlock(&ses->ses_lock);
break;
}
spin_unlock(&cifs_tcp_ses_lock);
return rc;
} }
static struct cifs_tcon * static struct cifs_tcon *
@ -217,16 +262,18 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
unsigned char *sigptr = smb2_signature; unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov; struct kvec *iov = rqst->rq_iov;
struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base; struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
struct shash_desc *shash; struct shash_desc *shash;
struct crypto_shash *hash; struct crypto_shash *hash;
struct sdesc *sdesc = NULL; struct sdesc *sdesc = NULL;
struct smb_rqst drqst; struct smb_rqst drqst;
__u64 sid = le64_to_cpu(shdr->SessionId);
u8 key[SMB2_NTLMV2_SESSKEY_SIZE];
ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId)); rc = smb2_get_sign_key(server, sid, key);
if (!ses) { if (unlikely(rc)) {
cifs_server_dbg(VFS, "%s: Could not find session\n", __func__); cifs_server_dbg(FYI, "%s: [sesid=0x%llx] couldn't find signing key: %d\n",
return 0; __func__, sid, rc);
return rc;
} }
memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE); memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
@ -245,8 +292,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
shash = &server->secmech.sdeschmacsha256->shash; shash = &server->secmech.sdeschmacsha256->shash;
} }
rc = crypto_shash_setkey(hash, ses->auth_key.response, rc = crypto_shash_setkey(shash->tfm, key, sizeof(key));
SMB2_NTLMV2_SESSKEY_SIZE);
if (rc) { if (rc) {
cifs_server_dbg(VFS, cifs_server_dbg(VFS,
"%s: Could not update with response\n", "%s: Could not update with response\n",
@ -288,8 +334,6 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
out: out:
if (allocate_crypto) if (allocate_crypto)
cifs_free_hash(&hash, &sdesc); cifs_free_hash(&hash, &sdesc);
if (ses)
cifs_put_smb_ses(ses);
return rc; return rc;
} }
@ -298,7 +342,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
{ {
unsigned char zero = 0x0; unsigned char zero = 0x0;
__u8 i[4] = {0, 0, 0, 1}; __u8 i[4] = {0, 0, 0, 1};
__u8 L[4] = {0, 0, 0, 128}; __u8 L128[4] = {0, 0, 0, 128};
__u8 L256[4] = {0, 0, 1, 0};
int rc = 0; int rc = 0;
unsigned char prfhash[SMB2_HMACSHA256_SIZE]; unsigned char prfhash[SMB2_HMACSHA256_SIZE];
unsigned char *hashptr = prfhash; unsigned char *hashptr = prfhash;
@ -354,8 +399,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
goto smb3signkey_ret; goto smb3signkey_ret;
} }
if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash, rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
L, 4); L256, 4);
} else {
rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
L128, 4);
}
if (rc) { if (rc) {
cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__); cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
goto smb3signkey_ret; goto smb3signkey_ret;
@ -387,9 +438,28 @@ struct derivation_triplet {
static int static int
generate_smb3signingkey(struct cifs_ses *ses, generate_smb3signingkey(struct cifs_ses *ses,
struct TCP_Server_Info *server,
const struct derivation_triplet *ptriplet) const struct derivation_triplet *ptriplet)
{ {
int rc; int rc;
bool is_binding = false;
int chan_index = 0;
spin_lock(&ses->ses_lock);
spin_lock(&ses->chan_lock);
is_binding = (cifs_chan_needs_reconnect(ses, server) &&
ses->ses_status == SES_GOOD);
chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
return -EINVAL;
}
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
/* /*
* All channels use the same encryption/decryption keys but * All channels use the same encryption/decryption keys but
@ -401,10 +471,10 @@ generate_smb3signingkey(struct cifs_ses *ses,
* master connection signing key stored in the session * master connection signing key stored in the session
*/ */
if (ses->binding) { if (is_binding) {
rc = generate_key(ses, ptriplet->signing.label, rc = generate_key(ses, ptriplet->signing.label,
ptriplet->signing.context, ptriplet->signing.context,
cifs_ses_binding_channel(ses)->signkey, ses->chans[chan_index].signkey,
SMB3_SIGN_KEY_SIZE); SMB3_SIGN_KEY_SIZE);
if (rc) if (rc)
return rc; return rc;
@ -416,17 +486,20 @@ generate_smb3signingkey(struct cifs_ses *ses,
if (rc) if (rc)
return rc; return rc;
memcpy(ses->chans[0].signkey, ses->smb3signingkey, /* safe to access primary channel, since it will never go away */
spin_lock(&ses->chan_lock);
memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey,
SMB3_SIGN_KEY_SIZE); SMB3_SIGN_KEY_SIZE);
spin_unlock(&ses->chan_lock);
rc = generate_key(ses, ptriplet->encryption.label, rc = generate_key(ses, ptriplet->encryption.label,
ptriplet->encryption.context, ptriplet->encryption.context,
ses->smb3encryptionkey, ses->smb3encryptionkey,
SMB3_SIGN_KEY_SIZE); SMB3_ENC_DEC_KEY_SIZE);
rc = generate_key(ses, ptriplet->decryption.label, rc = generate_key(ses, ptriplet->decryption.label,
ptriplet->decryption.context, ptriplet->decryption.context,
ses->smb3decryptionkey, ses->smb3decryptionkey,
SMB3_SIGN_KEY_SIZE); SMB3_ENC_DEC_KEY_SIZE);
if (rc) if (rc)
return rc; return rc;
} }
@ -442,20 +515,30 @@ generate_smb3signingkey(struct cifs_ses *ses,
*/ */
cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid), cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
&ses->Suid); &ses->Suid);
cifs_dbg(VFS, "Cipher type %d\n", server->cipher_type);
cifs_dbg(VFS, "Session Key %*ph\n", cifs_dbg(VFS, "Session Key %*ph\n",
SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response); SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
cifs_dbg(VFS, "Signing Key %*ph\n", cifs_dbg(VFS, "Signing Key %*ph\n",
SMB3_SIGN_KEY_SIZE, ses->smb3signingkey); SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
(server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
cifs_dbg(VFS, "ServerIn Key %*ph\n", cifs_dbg(VFS, "ServerIn Key %*ph\n",
SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey); SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
cifs_dbg(VFS, "ServerOut Key %*ph\n", cifs_dbg(VFS, "ServerOut Key %*ph\n",
SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey); SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
} else {
cifs_dbg(VFS, "ServerIn Key %*ph\n",
SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
cifs_dbg(VFS, "ServerOut Key %*ph\n",
SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
}
#endif #endif
return rc; return rc;
} }
int int
generate_smb30signingkey(struct cifs_ses *ses) generate_smb30signingkey(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
struct derivation_triplet triplet; struct derivation_triplet triplet;
@ -479,11 +562,12 @@ generate_smb30signingkey(struct cifs_ses *ses)
d->context.iov_base = "ServerOut"; d->context.iov_base = "ServerOut";
d->context.iov_len = 10; d->context.iov_len = 10;
return generate_smb3signingkey(ses, &triplet); return generate_smb3signingkey(ses, server, &triplet);
} }
int int
generate_smb311signingkey(struct cifs_ses *ses) generate_smb311signingkey(struct cifs_ses *ses,
struct TCP_Server_Info *server)
{ {
struct derivation_triplet triplet; struct derivation_triplet triplet;
@ -507,7 +591,7 @@ generate_smb311signingkey(struct cifs_ses *ses)
d->context.iov_base = ses->preauth_sha_hash; d->context.iov_base = ses->preauth_sha_hash;
d->context.iov_len = 64; d->context.iov_len = 64;
return generate_smb3signingkey(ses, &triplet); return generate_smb3signingkey(ses, server, &triplet);
} }
int int
@ -525,9 +609,11 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
struct smb_rqst drqst; struct smb_rqst drqst;
u8 key[SMB3_SIGN_KEY_SIZE]; u8 key[SMB3_SIGN_KEY_SIZE];
rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key); rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
if (rc) if (unlikely(rc)) {
return 0; cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
return rc;
}
if (allocate_crypto) { if (allocate_crypto) {
rc = cifs_alloc_hash("cmac(aes)", &hash, &sdesc); rc = cifs_alloc_hash("cmac(aes)", &hash, &sdesc);
@ -609,8 +695,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed) if (!is_signed)
return 0; return 0;
if (server->tcpStatus == CifsNeedNegotiate) spin_lock(&server->srv_lock);
if (server->ops->need_neg &&
server->ops->need_neg(server)) {
spin_unlock(&server->srv_lock);
return 0; return 0;
}
spin_unlock(&server->srv_lock);
if (!is_binding && !server->session_estab) { if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8); strncpy(shdr->Signature, "BSRSPYL", 8);
return 0; return 0;
@ -714,7 +805,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr,
temp->callback = cifs_wake_up_task; temp->callback = cifs_wake_up_task;
temp->callback_data = current; temp->callback_data = current;
atomic_inc(&midCount); atomic_inc(&mid_count);
temp->mid_state = MID_REQUEST_ALLOCATED; temp->mid_state = MID_REQUEST_ALLOCATED;
trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId), trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId),
le64_to_cpu(shdr->SessionId), le64_to_cpu(shdr->SessionId),
@ -726,37 +817,50 @@ static int
smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct smb2_hdr *shdr, struct mid_q_entry **mid) struct smb2_hdr *shdr, struct mid_q_entry **mid)
{ {
if (server->tcpStatus == CifsExiting) spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
}
if (server->tcpStatus == CifsNeedReconnect) { if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&server->srv_lock);
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN; return -EAGAIN;
} }
if (server->tcpStatus == CifsNeedNegotiate && if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&server->srv_lock);
return -EAGAIN; return -EAGAIN;
}
spin_unlock(&server->srv_lock);
if (ses->status == CifsNew) { spin_lock(&ses->ses_lock);
if (ses->ses_status == SES_NEW) {
if ((shdr->Command != SMB2_SESSION_SETUP) && if ((shdr->Command != SMB2_SESSION_SETUP) &&
(shdr->Command != SMB2_NEGOTIATE)) (shdr->Command != SMB2_NEGOTIATE)) {
spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
}
/* else ok - we are setting up session */ /* else ok - we are setting up session */
} }
if (ses->status == CifsExiting) { if (ses->ses_status == SES_EXITING) {
if (shdr->Command != SMB2_LOGOFF) if (shdr->Command != SMB2_LOGOFF) {
spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
}
/* else ok - we are shutting down the session */ /* else ok - we are shutting down the session */
} }
spin_unlock(&ses->ses_lock);
*mid = smb2_mid_entry_alloc(shdr, server); *mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL) if (*mid == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_add_tail(&(*mid)->qhead, &server->pending_mid_q); list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return 0; return 0;
} }
@ -822,9 +926,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_hdr *)rqst->rq_iov[0].iov_base; (struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid; struct mid_q_entry *mid;
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsNeedNegotiate && if (server->tcpStatus == CifsNeedNegotiate &&
shdr->Command != SMB2_NEGOTIATE) shdr->Command != SMB2_NEGOTIATE) {
spin_unlock(&server->srv_lock);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
}
spin_unlock(&server->srv_lock);
smb2_seq_num_into_buf(server, shdr); smb2_seq_num_into_buf(server, shdr);

View File

@ -1391,9 +1391,9 @@ void smbd_destroy(struct TCP_Server_Info *server)
log_rdma_event(INFO, "freeing mr list\n"); log_rdma_event(INFO, "freeing mr list\n");
wake_up_interruptible_all(&info->wait_mr); wake_up_interruptible_all(&info->wait_mr);
while (atomic_read(&info->mr_used_count)) { while (atomic_read(&info->mr_used_count)) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
msleep(1000); msleep(1000);
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
} }
destroy_mr_list(info); destroy_mr_list(info);

View File

@ -39,7 +39,7 @@ cifs_wake_up_task(struct mid_q_entry *mid)
wake_up_process(mid->callback_data); wake_up_process(mid->callback_data);
} }
struct mid_q_entry * static struct mid_q_entry *
alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
{ {
struct mid_q_entry *temp; struct mid_q_entry *temp;
@ -70,7 +70,7 @@ alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp->callback = cifs_wake_up_task; temp->callback = cifs_wake_up_task;
temp->callback_data = current; temp->callback_data = current;
atomic_inc(&midCount); atomic_inc(&mid_count);
temp->mid_state = MID_REQUEST_ALLOCATED; temp->mid_state = MID_REQUEST_ALLOCATED;
return temp; return temp;
} }
@ -94,7 +94,7 @@ void __release_mid(struct kref *refcount)
server->ops->handle_cancelled_mid(midEntry, server); server->ops->handle_cancelled_mid(midEntry, server);
midEntry->mid_state = MID_FREE; midEntry->mid_state = MID_FREE;
atomic_dec(&midCount); atomic_dec(&mid_count);
if (midEntry->large_buf) if (midEntry->large_buf)
cifs_buf_release(midEntry->resp_buf); cifs_buf_release(midEntry->resp_buf);
else else
@ -158,12 +158,12 @@ void __release_mid(struct kref *refcount)
void void
delete_mid(struct mid_q_entry *mid) delete_mid(struct mid_q_entry *mid)
{ {
spin_lock(&GlobalMid_Lock); spin_lock(&mid->server->mid_lock);
if (!(mid->mid_flags & MID_DELETED)) { if (!(mid->mid_flags & MID_DELETED)) {
list_del_init(&mid->qhead); list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED; mid->mid_flags |= MID_DELETED;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&mid->server->mid_lock);
release_mid(mid); release_mid(mid);
} }
@ -290,7 +290,7 @@ static int
__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct smb_rqst *rqst) struct smb_rqst *rqst)
{ {
int rc = 0; int rc;
struct kvec *iov; struct kvec *iov;
int n_vec; int n_vec;
unsigned int send_length = 0; unsigned int send_length = 0;
@ -301,6 +301,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
struct msghdr smb_msg = {}; struct msghdr smb_msg = {};
__be32 rfc1002_marker; __be32 rfc1002_marker;
cifs_in_send_inc(server);
if (cifs_rdma_enabled(server)) { if (cifs_rdma_enabled(server)) {
/* return -EAGAIN when connecting or reconnecting */ /* return -EAGAIN when connecting or reconnecting */
rc = -EAGAIN; rc = -EAGAIN;
@ -309,14 +310,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
goto smbd_done; goto smbd_done;
} }
rc = -EAGAIN;
if (ssocket == NULL) if (ssocket == NULL)
return -EAGAIN; goto out;
rc = -ERESTARTSYS;
if (fatal_signal_pending(current)) { if (fatal_signal_pending(current)) {
cifs_dbg(FYI, "signal pending before send request\n"); cifs_dbg(FYI, "signal pending before send request\n");
return -ERESTARTSYS; goto out;
} }
rc = 0;
/* cork the socket */ /* cork the socket */
tcp_sock_set_cork(ssocket->sk, true); tcp_sock_set_cork(ssocket->sk, true);
@ -417,9 +421,7 @@ unmask:
* be taken as the remainder of this one. We need to kill the * be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB * socket so the server throws away the partial SMB
*/ */
spin_lock(&GlobalMid_Lock); cifs_signal_cifsd_for_reconnect(server, false);
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
trace_smb3_partial_send_reconnect(server->CurrentMid, trace_smb3_partial_send_reconnect(server->CurrentMid,
server->conn_id, server->hostname); server->conn_id, server->hostname);
} }
@ -433,13 +435,11 @@ smbd_done:
cifs_server_dbg(VFS, "Error %d sending data on socket to server\n", cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
rc); rc);
rc = -ECONNABORTED; rc = -ECONNABORTED;
spin_lock(&GlobalMid_Lock); cifs_signal_cifsd_for_reconnect(server, false);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedReconnect;
spin_unlock(&GlobalMid_Lock);
} else if (rc > 0) } else if (rc > 0)
rc = 0; rc = 0;
out:
cifs_in_send_dec(server);
return rc; return rc;
} }
@ -463,13 +463,12 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
return -EIO; return -EIO;
} }
tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS); tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
if (!tr_hdr) if (!tr_hdr)
return -ENOMEM; return -ENOMEM;
memset(&cur_rqst[0], 0, sizeof(cur_rqst)); memset(&cur_rqst[0], 0, sizeof(cur_rqst));
memset(&iov, 0, sizeof(iov)); memset(&iov, 0, sizeof(iov));
memset(tr_hdr, 0, sizeof(*tr_hdr));
iov.iov_base = tr_hdr; iov.iov_base = tr_hdr;
iov.iov_len = sizeof(*tr_hdr); iov.iov_len = sizeof(*tr_hdr);
@ -550,6 +549,16 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
} }
while (1) { while (1) {
spin_unlock(&server->req_lock);
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT;
}
spin_unlock(&server->srv_lock);
spin_lock(&server->req_lock);
if (*credits < num_credits) { if (*credits < num_credits) {
scredits = *credits; scredits = *credits;
spin_unlock(&server->req_lock); spin_unlock(&server->req_lock);
@ -575,11 +584,6 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
return -ERESTARTSYS; return -ERESTARTSYS;
spin_lock(&server->req_lock); spin_lock(&server->req_lock);
} else { } else {
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->req_lock);
return -ENOENT;
}
/* /*
* For normal commands, reserve the last MAX_COMPOUND * For normal commands, reserve the last MAX_COMPOUND
* credits to compound requests. * credits to compound requests.
@ -720,35 +724,32 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ) struct mid_q_entry **ppmidQ)
{ {
if (ses->server->tcpStatus == CifsExiting) { spin_lock(&ses->ses_lock);
return -ENOENT; if (ses->ses_status == SES_NEW) {
}
if (ses->server->tcpStatus == CifsNeedReconnect) {
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
if (ses->status == CifsNew) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
(in_buf->Command != SMB_COM_NEGOTIATE)) (in_buf->Command != SMB_COM_NEGOTIATE)) {
spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
}
/* else ok - we are setting up session */ /* else ok - we are setting up session */
} }
if (ses->status == CifsExiting) { if (ses->ses_status == SES_EXITING) {
/* check if SMB session is bad because we are setting it up */ /* check if SMB session is bad because we are setting it up */
if (in_buf->Command != SMB_COM_LOGOFF_ANDX) if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
spin_unlock(&ses->ses_lock);
return -EAGAIN; return -EAGAIN;
}
/* else ok - we are shutting down session */ /* else ok - we are shutting down session */
} }
spin_unlock(&ses->ses_lock);
*ppmidQ = alloc_mid(in_buf, ses->server); *ppmidQ = alloc_mid(in_buf, ses->server);
if (*ppmidQ == NULL) if (*ppmidQ == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock(&GlobalMid_Lock); spin_lock(&ses->server->mid_lock);
list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&ses->server->mid_lock);
return 0; return 0;
} }
@ -821,7 +822,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
} else } else
instance = exist_credits->instance; instance = exist_credits->instance;
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
/* /*
* We can't use credits obtained from the previous session to send this * We can't use credits obtained from the previous session to send this
@ -829,14 +830,14 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
* return -EAGAIN in such cases to let callers handle it. * return -EAGAIN in such cases to let callers handle it.
*/ */
if (instance != server->reconnect_instance) { if (instance != server->reconnect_instance) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
add_credits_and_wake_if(server, &credits, optype); add_credits_and_wake_if(server, &credits, optype);
return -EAGAIN; return -EAGAIN;
} }
mid = server->ops->setup_async_request(server, rqst); mid = server->ops->setup_async_request(server, rqst);
if (IS_ERR(mid)) { if (IS_ERR(mid)) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
add_credits_and_wake_if(server, &credits, optype); add_credits_and_wake_if(server, &credits, optype);
return PTR_ERR(mid); return PTR_ERR(mid);
} }
@ -848,18 +849,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */ /* put it on the pending_mid_q */
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
list_add_tail(&mid->qhead, &server->pending_mid_q); list_add_tail(&mid->qhead, &server->pending_mid_q);
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
/* /*
* Need to store the time in mid before calling I/O. For call_async, * Need to store the time in mid before calling I/O. For call_async,
* I/O response may come back and free the mid entry on another thread. * I/O response may come back and free the mid entry on another thread.
*/ */
cifs_save_when_sent(mid); cifs_save_when_sent(mid);
cifs_in_send_inc(server);
rc = smb_send_rqst(server, 1, rqst, flags); rc = smb_send_rqst(server, 1, rqst, flags);
cifs_in_send_dec(server);
if (rc < 0) { if (rc < 0) {
revert_current_mid(server, mid->credits); revert_current_mid(server, mid->credits);
@ -867,7 +866,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
delete_mid(mid); delete_mid(mid);
} }
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
if (rc == 0) if (rc == 0)
return 0; return 0;
@ -911,10 +910,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
switch (mid->mid_state) { switch (mid->mid_state) {
case MID_RESPONSE_READY: case MID_RESPONSE_READY:
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return rc; return rc;
case MID_RETRY_NEEDED: case MID_RETRY_NEEDED:
rc = -EAGAIN; rc = -EAGAIN;
@ -934,7 +933,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
__func__, mid->mid, mid->mid_state); __func__, mid->mid, mid->mid_state);
rc = -EIO; rc = -EIO;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
release_mid(mid); release_mid(mid);
return rc; return rc;
@ -1041,23 +1040,45 @@ cifs_cancelled_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses) struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
{ {
uint index = 0; uint index = 0;
unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
struct TCP_Server_Info *server = NULL;
int i, start, cur;
if (!ses) if (!ses)
return NULL; return NULL;
spin_lock(&ses->chan_lock); spin_lock(&ses->chan_lock);
if (!ses->binding) { start = atomic_inc_return(&ses->chan_seq);
/* round robin */ for (i = 0; i < ses->chan_count; i++) {
if (ses->chan_count > 1) { cur = (start + i) % ses->chan_count;
index = (uint)atomic_inc_return(&ses->chan_seq); server = ses->chans[cur].server;
index %= ses->chan_count; if (!server)
continue;
/*
* strictly speaking, we should pick up req_lock to read
* server->in_flight. But it shouldn't matter much here if we
* race while reading this data. The worst that can happen is
* that we could use a channel that's not least loaded. Avoiding
* taking the lock could help reduce wait time, which is
* important for this function
*/
if (server->in_flight < min_in_flight) {
min_in_flight = server->in_flight;
index = cur;
} }
spin_unlock(&ses->chan_lock); if (server->in_flight > max_in_flight)
return ses->chans[index].server; max_in_flight = server->in_flight;
} else {
spin_unlock(&ses->chan_lock);
return cifs_ses_server(ses);
} }
/* if all channels are equally loaded, fall back to round-robin */
if (min_in_flight == max_in_flight)
index = (uint)start % ses->chan_count;
server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
return server;
} }
int int
@ -1085,8 +1106,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO; return -EIO;
} }
if (server->tcpStatus == CifsExiting) spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
}
spin_unlock(&server->srv_lock);
/* /*
* Wait for all the requests to become available. * Wait for all the requests to become available.
@ -1112,7 +1137,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* of smb data. * of smb data.
*/ */
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
/* /*
* All the parts of the compound chain belong obtained credits from the * All the parts of the compound chain belong obtained credits from the
@ -1122,7 +1147,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
* handle it. * handle it.
*/ */
if (instance != server->reconnect_instance) { if (instance != server->reconnect_instance) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
for (j = 0; j < num_rqst; j++) for (j = 0; j < num_rqst; j++)
add_credits(server, &credits[j], optype); add_credits(server, &credits[j], optype);
return -EAGAIN; return -EAGAIN;
@ -1134,7 +1159,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
revert_current_mid(server, i); revert_current_mid(server, i);
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
delete_mid(midQ[j]); delete_mid(midQ[j]);
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
/* Update # of requests on wire to server */ /* Update # of requests on wire to server */
for (j = 0; j < num_rqst; j++) for (j = 0; j < num_rqst; j++)
@ -1154,9 +1179,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
else else
midQ[i]->callback = cifs_compound_last_callback; midQ[i]->callback = cifs_compound_last_callback;
} }
cifs_in_send_inc(server);
rc = smb_send_rqst(server, num_rqst, rqst, flags); rc = smb_send_rqst(server, num_rqst, rqst, flags);
cifs_in_send_dec(server);
for (i = 0; i < num_rqst; i++) for (i = 0; i < num_rqst; i++)
cifs_save_when_sent(midQ[i]); cifs_save_when_sent(midQ[i]);
@ -1166,7 +1189,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
server->sequence_number -= 2; server->sequence_number -= 2;
} }
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
/* /*
* If sending failed for some reason or it is an oplock break that we * If sending failed for some reason or it is an oplock break that we
@ -1189,12 +1212,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/* /*
* Compounding is never used during session establish. * Compounding is never used during session establish.
*/ */
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { spin_lock(&ses->ses_lock);
mutex_lock(&server->srv_mutex); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
smb311_update_preauth_hash(ses, rqst[0].rq_iov, spin_unlock(&ses->ses_lock);
rqst[0].rq_nvec);
mutex_unlock(&server->srv_mutex); cifs_server_lock(server);
smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
cifs_server_unlock(server);
spin_lock(&ses->ses_lock);
} }
spin_unlock(&ses->ses_lock);
for (i = 0; i < num_rqst; i++) { for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]); rc = wait_for_response(server, midQ[i]);
@ -1206,7 +1234,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command)); midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]); send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED; midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED || if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
midQ[i]->mid_state == MID_RESPONSE_RECEIVED) { midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
@ -1214,7 +1242,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cancelled_mid[i] = true; cancelled_mid[i] = true;
credits[i].value = 0; credits[i].value = 0;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
} }
@ -1258,15 +1286,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/* /*
* Compounding is never used during session establish. * Compounding is never used during session establish.
*/ */
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { spin_lock(&ses->ses_lock);
if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = { struct kvec iov = {
.iov_base = resp_iov[0].iov_base, .iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len .iov_len = resp_iov[0].iov_len
}; };
mutex_lock(&server->srv_mutex); spin_unlock(&ses->ses_lock);
smb311_update_preauth_hash(ses, &iov, 1); cifs_server_lock(server);
mutex_unlock(&server->srv_mutex); smb311_update_preauth_hash(ses, server, &iov, 1);
cifs_server_unlock(server);
spin_lock(&ses->ses_lock);
} }
spin_unlock(&ses->ses_lock);
out: out:
/* /*
@ -1355,8 +1387,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO; return -EIO;
} }
if (server->tcpStatus == CifsExiting) spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
}
spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or to the same server. We may make this configurable later or
@ -1376,11 +1412,11 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
and avoid races inside tcp sendmsg code that could cause corruption and avoid races inside tcp sendmsg code that could cause corruption
of smb data */ of smb data */
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
rc = allocate_mid(ses, in_buf, &midQ); rc = allocate_mid(ses, in_buf, &midQ);
if (rc) { if (rc) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
/* Update # of requests on wire to server */ /* Update # of requests on wire to server */
add_credits(server, &credits, 0); add_credits(server, &credits, 0);
return rc; return rc;
@ -1388,21 +1424,19 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) { if (rc) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
goto out; goto out;
} }
midQ->mid_state = MID_REQUEST_SUBMITTED; midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len); rc = smb_send(server, in_buf, len);
cifs_in_send_dec(server);
cifs_save_when_sent(midQ); cifs_save_when_sent(midQ);
if (rc < 0) if (rc < 0)
server->sequence_number -= 2; server->sequence_number -= 2;
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
if (rc < 0) if (rc < 0)
goto out; goto out;
@ -1410,16 +1444,16 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
rc = wait_for_response(server, midQ); rc = wait_for_response(server, midQ);
if (rc != 0) { if (rc != 0) {
send_cancel(server, &rqst, midQ); send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED || if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
midQ->mid_state == MID_RESPONSE_RECEIVED) { midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */ /* no longer considered to be "in-flight" */
midQ->callback = release_mid; midQ->callback = release_mid;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
add_credits(server, &credits, 0); add_credits(server, &credits, 0);
return rc; return rc;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
rc = cifs_sync_mid_result(midQ, server); rc = cifs_sync_mid_result(midQ, server);
@ -1497,8 +1531,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO; return -EIO;
} }
if (server->tcpStatus == CifsExiting) spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsExiting) {
spin_unlock(&server->srv_lock);
return -ENOENT; return -ENOENT;
}
spin_unlock(&server->srv_lock);
/* Ensure that we do not send more than 50 overlapping requests /* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or to the same server. We may make this configurable later or
@ -1518,31 +1556,29 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
and avoid races inside tcp sendmsg code that could cause corruption and avoid races inside tcp sendmsg code that could cause corruption
of smb data */ of smb data */
mutex_lock(&server->srv_mutex); cifs_server_lock(server);
rc = allocate_mid(ses, in_buf, &midQ); rc = allocate_mid(ses, in_buf, &midQ);
if (rc) { if (rc) {
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
return rc; return rc;
} }
rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
if (rc) { if (rc) {
delete_mid(midQ); delete_mid(midQ);
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
return rc; return rc;
} }
midQ->mid_state = MID_REQUEST_SUBMITTED; midQ->mid_state = MID_REQUEST_SUBMITTED;
cifs_in_send_inc(server);
rc = smb_send(server, in_buf, len); rc = smb_send(server, in_buf, len);
cifs_in_send_dec(server);
cifs_save_when_sent(midQ); cifs_save_when_sent(midQ);
if (rc < 0) if (rc < 0)
server->sequence_number -= 2; server->sequence_number -= 2;
mutex_unlock(&server->srv_mutex); cifs_server_unlock(server);
if (rc < 0) { if (rc < 0) {
delete_mid(midQ); delete_mid(midQ);
@ -1557,11 +1593,13 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
(server->tcpStatus != CifsNew))); (server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */ /* Were we interrupted by a signal ? */
spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) && if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED || (midQ->mid_state == MID_REQUEST_SUBMITTED ||
midQ->mid_state == MID_RESPONSE_RECEIVED) && midQ->mid_state == MID_RESPONSE_RECEIVED) &&
((server->tcpStatus == CifsGood) || ((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) { (server->tcpStatus == CifsNew))) {
spin_unlock(&server->srv_lock);
if (in_buf->Command == SMB_COM_TRANSACTION2) { if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the /* POSIX lock. We send a NT_CANCEL SMB to cause the
@ -1588,20 +1626,22 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
rc = wait_for_response(server, midQ); rc = wait_for_response(server, midQ);
if (rc) { if (rc) {
send_cancel(server, &rqst, midQ); send_cancel(server, &rqst, midQ);
spin_lock(&GlobalMid_Lock); spin_lock(&server->mid_lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED || if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
midQ->mid_state == MID_RESPONSE_RECEIVED) { midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */ /* no longer considered to be "in-flight" */
midQ->callback = release_mid; midQ->callback = release_mid;
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
return rc; return rc;
} }
spin_unlock(&GlobalMid_Lock); spin_unlock(&server->mid_lock);
} }
/* We got the response - restart system call. */ /* We got the response - restart system call. */
rstart = 1; rstart = 1;
spin_lock(&server->srv_lock);
} }
spin_unlock(&server->srv_lock);
rc = cifs_sync_mid_result(midQ, server); rc = cifs_sync_mid_result(midQ, server);
if (rc != 0) if (rc != 0)

View File

@ -639,6 +639,7 @@ enum {
#define EXT4_EX_NOCACHE 0x40000000 #define EXT4_EX_NOCACHE 0x40000000
#define EXT4_EX_FORCE_CACHE 0x20000000 #define EXT4_EX_FORCE_CACHE 0x20000000
#define EXT4_EX_NOFAIL 0x10000000 #define EXT4_EX_NOFAIL 0x10000000
#define EXT4_EX_FILTER 0x70000000
/* /*
* Flags used by ext4_free_blocks * Flags used by ext4_free_blocks

View File

@ -1534,7 +1534,7 @@ static int ext4_ext_search_left(struct inode *inode,
static int ext4_ext_search_right(struct inode *inode, static int ext4_ext_search_right(struct inode *inode,
struct ext4_ext_path *path, struct ext4_ext_path *path,
ext4_lblk_t *logical, ext4_fsblk_t *phys, ext4_lblk_t *logical, ext4_fsblk_t *phys,
struct ext4_extent *ret_ex) struct ext4_extent *ret_ex, int flags)
{ {
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
struct ext4_extent_header *eh; struct ext4_extent_header *eh;
@ -1608,7 +1608,8 @@ got_index:
ix++; ix++;
while (++depth < path->p_depth) { while (++depth < path->p_depth) {
/* subtract from p_depth to get proper eh_depth */ /* subtract from p_depth to get proper eh_depth */
bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); bh = read_extent_tree_block(inode, ix, path->p_depth - depth,
flags);
if (IS_ERR(bh)) if (IS_ERR(bh))
return PTR_ERR(bh); return PTR_ERR(bh);
eh = ext_block_hdr(bh); eh = ext_block_hdr(bh);
@ -1616,7 +1617,7 @@ got_index:
put_bh(bh); put_bh(bh);
} }
bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0); bh = read_extent_tree_block(inode, ix, path->p_depth - depth, flags);
if (IS_ERR(bh)) if (IS_ERR(bh))
return PTR_ERR(bh); return PTR_ERR(bh);
eh = ext_block_hdr(bh); eh = ext_block_hdr(bh);
@ -2933,6 +2934,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
struct partial_cluster partial; struct partial_cluster partial;
handle_t *handle; handle_t *handle;
int i = 0, err = 0; int i = 0, err = 0;
int flags = EXT4_EX_NOCACHE | EXT4_EX_NOFAIL;
partial.pclu = 0; partial.pclu = 0;
partial.lblk = 0; partial.lblk = 0;
@ -2963,8 +2965,7 @@ again:
ext4_fsblk_t pblk; ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */ /* find extent for or closest extent to this block */
path = ext4_find_extent(inode, end, NULL, path = ext4_find_extent(inode, end, NULL, flags);
EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
if (IS_ERR(path)) { if (IS_ERR(path)) {
ext4_journal_stop(handle); ext4_journal_stop(handle);
return PTR_ERR(path); return PTR_ERR(path);
@ -3029,7 +3030,7 @@ again:
*/ */
lblk = ex_end + 1; lblk = ex_end + 1;
err = ext4_ext_search_right(inode, path, &lblk, &pblk, err = ext4_ext_search_right(inode, path, &lblk, &pblk,
NULL); NULL, flags);
if (err < 0) if (err < 0)
goto out; goto out;
if (pblk) { if (pblk) {
@ -3106,8 +3107,7 @@ again:
i + 1, ext4_idx_pblock(path[i].p_idx)); i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path)); memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode, path[i].p_idx, bh = read_extent_tree_block(inode, path[i].p_idx,
depth - i - 1, depth - i - 1, flags);
EXT4_EX_NOCACHE);
if (IS_ERR(bh)) { if (IS_ERR(bh)) {
/* should we reset i_size? */ /* should we reset i_size? */
err = PTR_ERR(bh); err = PTR_ERR(bh);
@ -4295,7 +4295,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */ /* find extent for this block */
path = ext4_find_extent(inode, map->m_lblk, NULL, 0); path = ext4_find_extent(inode, map->m_lblk, NULL, flags);
if (IS_ERR(path)) { if (IS_ERR(path)) {
err = PTR_ERR(path); err = PTR_ERR(path);
path = NULL; path = NULL;
@ -4413,7 +4413,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (err) if (err)
goto out2; goto out2;
ar.lright = map->m_lblk; ar.lright = map->m_lblk;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2); err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright,
&ex2, flags);
if (err < 0) if (err < 0)
goto out2; goto out2;
@ -5029,8 +5030,14 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
break; break;
} }
} }
/*
* Do not cache any unrelated extents, as it does not hold the
* i_rwsem or invalidate_lock, which could corrupt the extent
* status tree.
*/
ret = ext4_map_blocks(handle, inode, &map, ret = ext4_map_blocks(handle, inode, &map,
EXT4_GET_BLOCKS_IO_CONVERT_EXT); EXT4_GET_BLOCKS_IO_CONVERT_EXT |
EXT4_EX_NOCACHE);
if (ret <= 0) if (ret <= 0)
ext4_warning(inode->i_sb, ext4_warning(inode->i_sb,
"inode #%lu: block %u: len %u: " "inode #%lu: block %u: len %u: "

View File

@ -478,6 +478,38 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
} }
#endif /* ES_AGGRESSIVE_TEST */ #endif /* ES_AGGRESSIVE_TEST */
static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags)
{
unsigned int status;
int ret, retval;
flags &= EXT4_EX_FILTER;
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(handle, inode, map, flags);
else
retval = ext4_ind_map_blocks(handle, inode, map, flags);
if (retval <= 0)
return retval;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret != 0)
retval = ret;
return retval;
}
/* /*
* The ext4_map_blocks() function tries to look up the requested blocks, * The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped. * and returns if the blocks are already mapped.
@ -526,6 +558,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
return -EFSCORRUPTED; return -EFSCORRUPTED;
/*
* Do not allow caching of unrelated ranges of extents during I/O
* submission.
*/
if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
WARN_ON_ONCE(!(flags & EXT4_EX_NOCACHE));
/* Lookup extent status tree firstly */ /* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
@ -561,10 +600,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
down_read(&EXT4_I(inode)->i_data_sem); down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
retval = ext4_ext_map_blocks(handle, inode, map, flags & retval = ext4_ext_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE); (EXT4_GET_BLOCKS_KEEP_SIZE|EXT4_EX_NOCACHE));
} else { } else {
retval = ext4_ind_map_blocks(handle, inode, map, flags & retval = ext4_ind_map_blocks(handle, inode, map, flags &
EXT4_GET_BLOCKS_KEEP_SIZE); (EXT4_GET_BLOCKS_KEEP_SIZE|EXT4_EX_NOCACHE));
} }
if (retval > 0) { if (retval > 0) {
unsigned int status; unsigned int status;
@ -1879,12 +1918,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* Lookup extent status tree firstly */ /* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) { if (ext4_es_lookup_extent(inode, iblock, &es)) {
if (ext4_es_is_hole(&es)) { if (ext4_es_is_hole(&es))
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
goto add_delayed; goto add_delayed;
}
found:
/* /*
* Delayed extent could be allocated by fallocate. * Delayed extent could be allocated by fallocate.
* So we need to check it. * So we need to check it.
@ -1921,52 +1958,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
down_read(&EXT4_I(inode)->i_data_sem); down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode)) if (ext4_has_inline_data(inode))
retval = 0; retval = 0;
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else else
retval = ext4_ind_map_blocks(NULL, inode, map, 0); retval = ext4_map_query_blocks(NULL, inode, map, 0);
up_read(&EXT4_I(inode)->i_data_sem);
if (retval)
return retval;
add_delayed: add_delayed:
if (retval == 0) { down_write(&EXT4_I(inode)->i_data_sem);
int ret;
/* /*
* XXX: __block_prepare_write() unmaps passed block, * Page fault path (ext4_page_mkwrite does not take i_rwsem)
* is it OK? * and fallocate path (no folio lock) can race. Make sure we
* lookup the extent status tree here again while i_data_sem
* is held in write mode, before inserting a new da entry in
* the extent status tree.
*/ */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
ret = ext4_insert_delayed_block(inode, map->m_lblk); if (!ext4_es_is_hole(&es)) {
if (ret != 0) { up_write(&EXT4_I(inode)->i_data_sem);
retval = ret; goto found;
goto out_unlock;
} }
} else if (!ext4_has_inline_data(inode)) {
retval = ext4_map_query_blocks(NULL, inode, map, 0);
if (retval) {
up_write(&EXT4_I(inode)->i_data_sem);
return retval;
}
}
retval = ext4_insert_delayed_block(inode, map->m_lblk);
up_write(&EXT4_I(inode)->i_data_sem);
if (retval)
return retval;
map_bh(bh, inode->i_sb, invalid_block); map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh); set_buffer_new(bh);
set_buffer_delay(bh); set_buffer_delay(bh);
} else if (retval > 0) {
int ret;
unsigned int status;
if (unlikely(retval != map->m_len)) {
ext4_warning(inode->i_sb,
"ES len assertion failed for inode "
"%lu: retval %d != map->m_len %d",
inode->i_ino, retval, map->m_len);
WARN_ON(1);
}
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
map->m_pblk, status);
if (ret != 0)
retval = ret;
}
out_unlock:
up_read((&EXT4_I(inode)->i_data_sem));
return retval; return retval;
} }
@ -2472,7 +2499,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
* previously reserved. However we must not fail because we're in * previously reserved. However we must not fail because we're in
* writeback and there is nothing we can do about it so it might result * writeback and there is nothing we can do about it so it might result
* in data loss. So use reserved blocks to allocate metadata if * in data loss. So use reserved blocks to allocate metadata if
* possible. * possible. In addition, do not cache any unrelated extents, as it
* only holds the folio lock but does not hold the i_rwsem or
* invalidate_lock, which could corrupt the extent status tree.
* *
* We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
* the blocks in question are delalloc blocks. This indicates * the blocks in question are delalloc blocks. This indicates
@ -2481,7 +2510,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
*/ */
get_blocks_flags = EXT4_GET_BLOCKS_CREATE | get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_GET_BLOCKS_METADATA_NOFAIL |
EXT4_GET_BLOCKS_IO_SUBMIT; EXT4_GET_BLOCKS_IO_SUBMIT |
EXT4_EX_NOCACHE;
dioread_nolock = ext4_should_dioread_nolock(inode); dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock) if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT; get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;

View File

@ -596,17 +596,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); error = gfs2_find_jhead(sdp->sd_jdesc, &head);
if (error) if (gfs2_assert_withdraw_delayed(sdp, !error))
gfs2_consist(sdp); return error;
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
gfs2_consist(sdp); GFS2_LOG_HEAD_UNMOUNT))
return -EIO;
/* Initialize some head of the log stuff */ gfs2_log_pointers_init(sdp, &head);
if (!gfs2_withdrawn(sdp)) {
sdp->sd_log_sequence = head.lh_sequence + 1;
gfs2_log_pointers_init(sdp, head.lh_blkno);
}
} }
return 0; return 0;
} }

View File

@ -895,10 +895,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
} }
/** /**
* ail_drain - drain the ail lists after a withdraw * gfs2_ail_drain - drain the ail lists after a withdraw
* @sdp: Pointer to GFS2 superblock * @sdp: Pointer to GFS2 superblock
*/ */
static void ail_drain(struct gfs2_sbd *sdp) void gfs2_ail_drain(struct gfs2_sbd *sdp)
{ {
struct gfs2_trans *tr; struct gfs2_trans *tr;
@ -925,6 +925,7 @@ static void ail_drain(struct gfs2_sbd *sdp)
list_del(&tr->tr_list); list_del(&tr->tr_list);
gfs2_trans_free(sdp, tr); gfs2_trans_free(sdp, tr);
} }
gfs2_drain_revokes(sdp);
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
} }
@ -1113,7 +1114,6 @@ out_withdraw:
if (tr && list_empty(&tr->tr_list)) if (tr && list_empty(&tr->tr_list))
list_add(&tr->tr_list, &sdp->sd_ail1_list); list_add(&tr->tr_list, &sdp->sd_ail1_list);
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
ail_drain(sdp); /* frees all transactions */
tr = NULL; tr = NULL;
goto out_end; goto out_end;
} }

View File

@ -47,17 +47,6 @@ __releases(&sdp->sd_log_lock)
spin_unlock(&sdp->sd_log_lock); spin_unlock(&sdp->sd_log_lock);
} }
static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
unsigned int value)
{
if (++value == sdp->sd_jdesc->jd_blocks) {
value = 0;
}
sdp->sd_log_tail = value;
sdp->sd_log_flush_tail = value;
sdp->sd_log_head = value;
}
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{ {
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
@ -96,5 +85,6 @@ int gfs2_logd(void *data);
void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
void gfs2_glock_remove_revoke(struct gfs2_glock *gl); void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
void gfs2_flush_revokes(struct gfs2_sbd *sdp); void gfs2_flush_revokes(struct gfs2_sbd *sdp);
void gfs2_ail_drain(struct gfs2_sbd *sdp);
#endif /* __LOG_DOT_H__ */ #endif /* __LOG_DOT_H__ */

View File

@ -510,8 +510,7 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
* *
* Returns: 0 on success, errno otherwise * Returns: 0 on success, errno otherwise
*/ */
int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
bool keep_cache)
{ {
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct address_space *mapping = jd->jd_inode->i_mapping; struct address_space *mapping = jd->jd_inode->i_mapping;
@ -601,7 +600,6 @@ out:
if (!ret) if (!ret)
ret = filemap_check_wb_err(mapping, since); ret = filemap_check_wb_err(mapping, since);
if (!keep_cache)
truncate_inode_pages(mapping, 0); truncate_inode_pages(mapping, 0);
return ret; return ret;
@ -884,7 +882,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_write_page(sdp, page); gfs2_log_write_page(sdp, page);
} }
static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) void gfs2_drain_revokes(struct gfs2_sbd *sdp)
{ {
struct list_head *head = &sdp->sd_log_revokes; struct list_head *head = &sdp->sd_log_revokes;
struct gfs2_bufdata *bd; struct gfs2_bufdata *bd;
@ -899,6 +897,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
} }
} }
static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
gfs2_drain_revokes(sdp);
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd, static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, int pass) struct gfs2_log_header_host *head, int pass)
{ {

View File

@ -22,7 +22,9 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
void gfs2_log_submit_bio(struct bio **biop, int opf); void gfs2_log_submit_bio(struct bio **biop, int opf);
void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
int gfs2_find_jhead(struct gfs2_jdesc *jd, int gfs2_find_jhead(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, bool keep_cache); struct gfs2_log_header_host *head);
void gfs2_drain_revokes(struct gfs2_sbd *sdp);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp) static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{ {
return sdp->sd_ldptrs; return sdp->sd_ldptrs;

View File

@ -265,16 +265,12 @@ static void clean_journal(struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head) struct gfs2_log_header_host *head)
{ {
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
u32 lblock = head->lh_blkno;
gfs2_replay_incr_blk(jd, &lblock); gfs2_replay_incr_blk(jd, &head->lh_blkno);
gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock, head->lh_sequence++;
gfs2_write_log_header(sdp, jd, head->lh_sequence, 0, head->lh_blkno,
GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY, GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC); REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
sdp->sd_log_flush_head = lblock;
gfs2_log_incr_head(sdp);
}
} }
@ -459,7 +455,7 @@ void gfs2_recover_func(struct work_struct *work)
if (error) if (error)
goto fail_gunlock_ji; goto fail_gunlock_ji;
error = gfs2_find_jhead(jd, &head, true); error = gfs2_find_jhead(jd, &head);
if (error) if (error)
goto fail_gunlock_ji; goto fail_gunlock_ji;
t_jhd = ktime_get(); t_jhd = ktime_get();
@ -535,6 +531,9 @@ void gfs2_recover_func(struct work_struct *work)
ktime_ms_delta(t_rep, t_tlck)); ktime_ms_delta(t_rep, t_tlck));
} }
if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
gfs2_log_pointers_init(sdp, &head);
gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
if (jlocked) { if (jlocked) {
@ -582,3 +581,13 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
return wait ? jd->jd_recover_error : 0; return wait ? jd->jd_recover_error : 0;
} }
void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
struct gfs2_log_header_host *head)
{
sdp->sd_log_sequence = head->lh_sequence + 1;
gfs2_replay_incr_blk(sdp->sd_jdesc, &head->lh_blkno);
sdp->sd_log_tail = head->lh_blkno;
sdp->sd_log_flush_head = head->lh_blkno;
sdp->sd_log_flush_tail = head->lh_blkno;
sdp->sd_log_head = head->lh_blkno;
}

View File

@ -32,6 +32,8 @@ void gfs2_recover_func(struct work_struct *work);
int __get_log_header(struct gfs2_sbd *sdp, int __get_log_header(struct gfs2_sbd *sdp,
const struct gfs2_log_header *lh, unsigned int blkno, const struct gfs2_log_header *lh, unsigned int blkno,
struct gfs2_log_header_host *head); struct gfs2_log_header_host *head);
void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
struct gfs2_log_header_host *head);
#endif /* __RECOVERY_DOT_H__ */ #endif /* __RECOVERY_DOT_H__ */

View File

@ -137,28 +137,22 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
{ {
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_glock *j_gl = ip->i_gl;
struct gfs2_log_header_host head;
int error; int error;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp)) if (gfs2_withdrawn(sdp))
return -EIO; return -EIO;
error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); if (sdp->sd_log_sequence == 0) {
if (error || gfs2_withdrawn(sdp)) fs_err(sdp, "unknown status of our own journal jid %d",
return error; sdp->sd_lockstruct.ls_jid);
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
gfs2_consist(sdp);
return -EIO; return -EIO;
} }
/* Initialize some head of the log stuff */
sdp->sd_log_sequence = head.lh_sequence + 1;
gfs2_log_pointers_init(sdp, head.lh_blkno);
error = gfs2_quota_init(sdp); error = gfs2_quota_init(sdp);
if (!error && !gfs2_withdrawn(sdp)) if (!error && gfs2_withdrawn(sdp))
error = -EIO;
if (!error)
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
return error; return error;
} }
@ -391,7 +385,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
error = gfs2_jdesc_check(jd); error = gfs2_jdesc_check(jd);
if (error) if (error)
break; break;
error = gfs2_find_jhead(jd, &lh, false); error = gfs2_find_jhead(jd, &lh);
if (error) if (error)
break; break;
if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {

View File

@ -74,7 +74,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
"mount.\n"); "mount.\n");
goto out_unlock; goto out_unlock;
} }
error = gfs2_find_jhead(jd, &head, false); error = gfs2_find_jhead(jd, &head);
if (error) { if (error) {
if (verbose) if (verbose)
fs_err(sdp, "Error parsing journal for spectator " fs_err(sdp, "Error parsing journal for spectator "
@ -129,6 +129,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc) if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
return; return;
gfs2_ail_drain(sdp); /* frees all transactions */
inode = sdp->sd_jdesc->jd_inode; inode = sdp->sd_jdesc->jd_inode;
ip = GFS2_I(inode); ip = GFS2_I(inode);
i_gl = ip->i_gl; i_gl = ip->i_gl;

20
include/linux/rh_waived.h Normal file
View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/rh_waived.h
*
* rh_waived cmdline parameter interface.
*
* Copyright (C) 2024, Red Hat, Inc. Ricardo Robaina <rrobaina@redhat.com>
*/
#ifndef _RH_WAIVED_H
#define _RH_WAIVED_H
enum rh_waived_items {
CVE_2025_38085,
/* RH_WAIVED_ITEMS must always be the last item in the enum */
RH_WAIVED_ITEMS,
};
bool is_rh_waived(enum rh_waived_items feat);
#endif /* _RH_WAIVED_H */

View File

@ -12,7 +12,7 @@ obj-y = fork.o exec_domain.o panic.o \
notifier.o ksysfs.o cred.o reboot.o \ notifier.o ksysfs.o cred.o reboot.o \
async.o range.o smpboot.o ucount.o regset.o async.o range.o smpboot.o ucount.o regset.o
obj-$(CONFIG_RHEL_DIFFERENCES) += rh_messages.o rh_flags.o obj-$(CONFIG_RHEL_DIFFERENCES) += rh_messages.o rh_flags.o rh_waived.o
obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o
obj-$(CONFIG_MODULES) += kmod.o obj-$(CONFIG_MODULES) += kmod.o
obj-$(CONFIG_MULTIUSER) += groups.o obj-$(CONFIG_MULTIUSER) += groups.o

147
kernel/rh_waived.c Normal file
View File

@ -0,0 +1,147 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* kernel/rh_waived.c
*
* rh_waived cmdline parameter support.
*
* Copyright (C) 2024, Red Hat, Inc. Ricardo Robaina <rrobaina@redhat.com>
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/printk.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rh_flags.h>
#include <linux/rh_waived.h>
/*
* RH_INSERT_WAIVED_ITEM
* This macro is intended to be used to insert items into the
* rh_waived_list array. It expects to get an item from
* enum rh_waived_items as its first argument, and a string
* holding the feature name as its second argument.
*
* The feature name is also utilized as the token for the
* boot parameter parser.
*
* Example usage:
* struct rh_waived_item foo[RH_WAIVED_FEAT_ITEMS] = {
* RH_INSERT_WAIVED_ITEM(FOO_FEAT, "foo_feat_short_str", "alias", RH_WAIVED_FEAT),
* };
*/
#define RH_INSERT_WAIVED_ITEM(enum_item, item, item_alt, class) \
[(enum_item)] = { .name = (item), .alias = (item_alt), \
.type = (class), .waived = 0, }
/* Indicates if the rh_flag 'rh_waived' should be added. */
bool __initdata add_rh_flag = false;
typedef enum {
RH_WAIVED_FEAT,
RH_WAIVED_CVE,
RH_WAIVED_ANY
} rh_waived_t;
struct rh_waived_item {
char *name, *alias;
rh_waived_t type;
unsigned int waived;
};
/* Always use the marco RH_INSERT_WAIVED to insert items to this array. */
struct rh_waived_item rh_waived_list[RH_WAIVED_ITEMS] = {
RH_INSERT_WAIVED_ITEM(CVE_2025_38085, "CVE-2025-38085",
"no-cve-2025-38085", RH_WAIVED_CVE),
};
/*
* is_rh_waived() - Checks if a given item has been marked as waived.
*
* @item: waived item.
*/
__inline__ bool is_rh_waived(enum rh_waived_items item)
{
return !!rh_waived_list[item].waived;
}
EXPORT_SYMBOL(is_rh_waived);
static void __init rh_waived_parser(char *s, rh_waived_t type)
{
int i;
char *token;
pr_info(KERN_CONT "rh_waived: ");
if (!s) {
for (i = 0; i < RH_WAIVED_ITEMS; i++) {
if (type != RH_WAIVED_ANY && rh_waived_list[i].type != type)
continue;
rh_waived_list[i].waived = 1;
pr_info(KERN_CONT "%s%s", rh_waived_list[i].name,
i < RH_WAIVED_ITEMS - 1 ? " " : "\n");
}
add_rh_flag = true;
return;
}
while ((token = strsep(&s, ",")) != NULL) {
for (i = 0; i < RH_WAIVED_ITEMS; i++) {
char *alias = rh_waived_list[i].alias;
if (type != RH_WAIVED_ANY && rh_waived_list[i].type != type)
continue;
if (!strcmp(token, rh_waived_list[i].name) ||
(alias && !strcmp(token, alias))) {
rh_waived_list[i].waived = 1;
pr_info(KERN_CONT "%s ", rh_waived_list[i].name);
}
}
}
pr_info(KERN_CONT "\n");
add_rh_flag = true;
}
static int __init rh_waived_setup(char *s)
{
/*
* originally, if no string was passed to the cmdline option
* all listed features would be waived, so we keep that same
* compromise with the new contract.
*/
if (!s || !strcmp(s, "features")) {
rh_waived_parser(NULL, RH_WAIVED_FEAT);
return 0;
}
/* waive all possible mitigations in the list */
if (!strcmp(s, "cves")) {
rh_waived_parser(NULL, RH_WAIVED_CVE);
return 0;
}
/* otherwise, just deal with the enumerated waive list */
rh_waived_parser(s, RH_WAIVED_ANY);
return 0;
}
early_param("rh_waived", rh_waived_setup);
/*
* rh_flags is initialized at subsys_initcall, calling rh_add_flag()
* from rh_waived_setup() would result in a can't boot situation.
* Deffering the inclusion 'rh_waived' rh_flag to late_initcall to
* avoid this issue.
*/
static int __init __add_rh_flag(void)
{
if (add_rh_flag)
rh_add_flag("rh_waived");
return 0;
}
late_initcall(__add_rh_flag);

View File

@ -40,6 +40,7 @@
#include <linux/node.h> #include <linux/node.h>
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <linux/page_owner.h> #include <linux/page_owner.h>
#include <linux/rh_waived.h>
#include "internal.h" #include "internal.h"
int hugetlb_max_hstate __read_mostly; int hugetlb_max_hstate __read_mostly;
@ -5667,8 +5668,14 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
* using this page table as a normal, non-hugetlb page table. * using this page table as a normal, non-hugetlb page table.
* Wait for pending gup_fast() in other threads to finish before letting * Wait for pending gup_fast() in other threads to finish before letting
* that happen. * that happen.
*
* RHEL-120391: some customers reported severe interference/performance
* degradation on particular database workloads, thus we are including
* a waiving flag to allow for disabling this CVE mitigation
*/ */
if (likely(!is_rh_waived(CVE_2025_38085)))
tlb_remove_table_sync_one(); tlb_remove_table_sync_one();
put_page(virt_to_page(ptep)); put_page(virt_to_page(ptep));
mm_dec_nr_pmds(mm); mm_dec_nr_pmds(mm);
/* /*

View File

@ -3713,6 +3713,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
break; break;
goto queue; goto queue;
case WLAN_CATEGORY_S1G: case WLAN_CATEGORY_S1G:
if (len < offsetofend(typeof(*mgmt),
u.action.u.s1g.action_code))
break;
switch (mgmt->u.action.u.s1g.action_code) { switch (mgmt->u.action.u.s1g.action_code) {
case WLAN_S1G_TWT_SETUP: case WLAN_S1G_TWT_SETUP:
case WLAN_S1G_TWT_TEARDOWN: case WLAN_S1G_TWT_TEARDOWN:

View File

@ -1665,6 +1665,7 @@ cfg80211_update_known_bss(struct cfg80211_registered_device *rdev,
*/ */
f = rcu_access_pointer(new->pub.beacon_ies); f = rcu_access_pointer(new->pub.beacon_ies);
if (!new->pub.hidden_beacon_bss)
kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head); kfree_rcu((struct cfg80211_bss_ies *)f, rcu_head);
return false; return false;
} }