Import of kernel-6.12.0-124.52.1.el10_1

This commit is contained in:
almalinux-bot-kernel 2026-04-24 05:07:24 +00:00
parent d3c36ff4dc
commit 0adfe8916d
39 changed files with 419 additions and 175 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 1
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 124.49.1
RHEL_RELEASE = 124.52.1
#
# RHEL_REBASE_NUM

View File

@ -498,6 +498,9 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_PPC_UV=y
# CONFIG_LD_HEAD_STUB_CATCH is not set
CONFIG_MPROFILE_KERNEL=y
CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY=y
CONFIG_PPC_FTRACE_OUT_OF_LINE=y
CONFIG_PPC_FTRACE_OUT_OF_LINE_NUM_RESERVE=32768
CONFIG_HOTPLUG_CPU=y
CONFIG_INTERRUPT_SANITIZE_REGISTERS=y
CONFIG_PPC_QUEUED_SPINLOCKS=y
@ -721,6 +724,7 @@ CONFIG_FUNCTION_ALIGNMENT_4B=y
CONFIG_FUNCTION_ALIGNMENT=4
CONFIG_CC_HAS_MIN_FUNCTION_ALIGNMENT=y
CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT=y
CONFIG_ARCH_WANTS_PRE_LINK_VMLINUX=y
# end of General architecture-dependent options
CONFIG_RT_MUTEXES=y
@ -5026,6 +5030,7 @@ CONFIG_HID_KUNIT_TEST=m
#
# HID-BPF support
#
CONFIG_HID_BPF=y
# end of HID-BPF support
CONFIG_I2C_HID=y
@ -7095,6 +7100,8 @@ CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
@ -7114,6 +7121,8 @@ CONFIG_FUNCTION_TRACER=y
CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_DYNAMIC_FTRACE=y
CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y
CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y
CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y
CONFIG_FPROBE=y
CONFIG_FUNCTION_PROFILER=y
@ -7138,7 +7147,7 @@ CONFIG_BPF_EVENTS=y
CONFIG_DYNAMIC_EVENTS=y
CONFIG_PROBE_EVENTS=y
CONFIG_FTRACE_MCOUNT_RECORD=y
CONFIG_FTRACE_MCOUNT_USE_CC=y
CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y
CONFIG_TRACING_MAP=y
CONFIG_SYNTH_EVENTS=y
# CONFIG_USER_EVENTS is not set
@ -7164,6 +7173,8 @@ CONFIG_RV_REACTORS=y
CONFIG_RV_REACT_PRINTK=y
CONFIG_RV_REACT_PANIC=y
# CONFIG_SAMPLES is not set
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y
CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y
CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y
CONFIG_STRICT_DEVMEM=y
# CONFIG_IO_STRICT_DEVMEM is not set

View File

@ -422,22 +422,6 @@ static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_
domain->qi_batch);
}
static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
{
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH, domain->qi_batch);
if (info->dtlb_extra_inval)
qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH, domain->qi_batch);
}
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@ -450,7 +434,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
struct cache_tag *tag;
unsigned long flags;
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
if (start == 0 && end == ULONG_MAX) {
addr = 0;
pages = -1;
mask = MAX_AGAW_PFN_WIDTH;
} else {
addr = calculate_psi_aligned_address(start, end, &pages, &mask);
}
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
@ -491,31 +481,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
*/
void cache_tag_flush_all(struct dmar_domain *domain)
{
struct intel_iommu *iommu = NULL;
struct cache_tag *tag;
unsigned long flags;
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
if (iommu && iommu != tag->iommu)
qi_batch_flush_descs(iommu, domain->qi_batch);
iommu = tag->iommu;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
cache_tag_flush_devtlb_all(domain, tag);
break;
}
trace_cache_tag_flush_all(tag);
}
qi_batch_flush_descs(iommu, domain->qi_batch);
spin_unlock_irqrestore(&domain->cache_lock, flags);
cache_tag_flush_range(domain, 0, ULONG_MAX, 0);
}
/*

View File

@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
TP_ARGS(tag)
);
DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
TP_PROTO(struct cache_tag *tag),
TP_ARGS(tag)
);
DECLARE_EVENT_CLASS(cache_tag_flush,
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
unsigned long addr, unsigned long pages, unsigned long mask),

View File

@ -1225,7 +1225,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
int i = 0;
struct bio *behind_bio = NULL;
behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
&r1_bio->mddev->bio_set);
/* discard op, we don't support writezero/writesame yet */

View File

@ -1418,4 +1418,15 @@ static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
}
static inline u32 i40e_get_max_num_descriptors(const struct i40e_pf *pf)
{
const struct i40e_hw *hw = &pf->hw;
switch (hw->mac.type) {
case I40E_MAC_XL710:
return I40E_MAX_NUM_DESCRIPTORS_XL710;
default:
return I40E_MAX_NUM_DESCRIPTORS;
}
}
#endif /* _I40E_H_ */

View File

@ -2012,18 +2012,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
}
static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
switch (hw->mac.type) {
case I40E_MAC_XL710:
return I40E_MAX_NUM_DESCRIPTORS_XL710;
default:
return I40E_MAX_NUM_DESCRIPTORS;
}
}
static void i40e_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,

View File

@ -656,7 +656,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 8 */
if (!IS_ALIGNED(info->ring_len, 8) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_context;
}
@ -726,7 +726,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* ring_len has to be multiple of 32 */
if (!IS_ALIGNED(info->ring_len, 32) ||
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
info->ring_len > i40e_get_max_num_descriptors(pf)) {
ret = -EINVAL;
goto error_param;
}

View File

@ -1765,6 +1765,9 @@ static int netvsc_set_rxfh(struct net_device *dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (!ndc->rx_table_sz)
return -EOPNOTSUPP;
rndis_dev = ndev->extension;
if (rxfh->indir) {
for (i = 0; i < ndc->rx_table_sz; i++)

View File

@ -1292,7 +1292,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
kfree(item);
qla24xx_free_purex_item(item);
goto out;
}

View File

@ -369,7 +369,7 @@ void netfs_readahead(struct readahead_control *ractl)
return netfs_put_request(rreq, netfs_rreq_trace_put_return);
cleanup_free:
return netfs_put_request(rreq, netfs_rreq_trace_put_failed);
return netfs_put_failed_request(rreq);
}
EXPORT_SYMBOL(netfs_readahead);
@ -472,7 +472,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio)
return ret < 0 ? ret : 0;
discard:
netfs_put_request(rreq, netfs_rreq_trace_put_discard);
netfs_put_failed_request(rreq);
alloc_error:
folio_unlock(folio);
return ret;
@ -532,7 +532,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
return ret < 0 ? ret : 0;
discard:
netfs_put_request(rreq, netfs_rreq_trace_put_discard);
netfs_put_failed_request(rreq);
alloc_error:
folio_unlock(folio);
return ret;
@ -699,7 +699,7 @@ have_folio_no_wait:
return 0;
error_put:
netfs_put_request(rreq, netfs_rreq_trace_put_failed);
netfs_put_failed_request(rreq);
error:
if (folio) {
folio_unlock(folio);
@ -754,7 +754,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
return ret < 0 ? ret : 0;
error_put:
netfs_put_request(rreq, netfs_rreq_trace_put_discard);
netfs_put_failed_request(rreq);
error:
_leave(" = %d", ret);
return ret;

View File

@ -347,7 +347,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
folio_put(folio);
ret = filemap_write_and_wait_range(mapping, fpos, fpos + flen - 1);
if (ret < 0)
goto error_folio_unlock;
goto out;
continue;
copied:

View File

@ -131,6 +131,7 @@ static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
if (rreq->len == 0) {
pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
netfs_put_request(rreq, netfs_rreq_trace_put_discard);
return -EIO;
}
@ -205,7 +206,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
if (user_backed_iter(iter)) {
ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0);
if (ret < 0)
goto out;
goto error_put;
rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec;
rreq->direct_bv_count = ret;
rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
@ -238,6 +239,10 @@ out:
if (ret > 0)
orig_count -= ret;
return ret;
error_put:
netfs_put_failed_request(rreq);
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_read_iter_locked);

View File

@ -57,7 +57,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0);
if (n < 0) {
ret = n;
goto out;
goto error_put;
}
wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec;
wreq->direct_bv_count = n;
@ -101,6 +101,10 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
out:
netfs_put_request(wreq, netfs_rreq_trace_put_return);
return ret;
error_put:
netfs_put_failed_request(wreq);
return ret;
}
EXPORT_SYMBOL(netfs_unbuffered_write_iter_locked);

View File

@ -87,6 +87,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq);
void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_put_failed_request(struct netfs_io_request *rreq);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
static inline void netfs_see_request(struct netfs_io_request *rreq,

View File

@ -116,10 +116,8 @@ static void netfs_free_request_rcu(struct rcu_head *rcu)
netfs_stat_d(&netfs_n_rh_rreq);
}
static void netfs_free_request(struct work_struct *work)
static void netfs_deinit_request(struct netfs_io_request *rreq)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, cleanup_work);
struct netfs_inode *ictx = netfs_inode(rreq->inode);
unsigned int i;
@ -149,6 +147,14 @@ static void netfs_free_request(struct work_struct *work)
if (atomic_dec_and_test(&ictx->io_count))
wake_up_var(&ictx->io_count);
}
static void netfs_free_request(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, cleanup_work);
netfs_deinit_request(rreq);
call_rcu(&rreq->rcu, netfs_free_request_rcu);
}
@ -167,6 +173,24 @@ void netfs_put_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
}
}
/*
* Free a request (synchronously) that was just allocated but has
* failed before it could be submitted.
*/
void netfs_put_failed_request(struct netfs_io_request *rreq)
{
int r = refcount_read(&rreq->ref);
/* new requests have two references (see
* netfs_alloc_request(), and this function is only allowed on
* new request objects
*/
WARN_ON_ONCE(r != 2);
trace_netfs_rreq_ref(rreq->debug_id, r, netfs_rreq_trace_put_failed);
netfs_free_request(&rreq->cleanup_work);
}
/*
* Allocate and partially initialise an I/O request structure.
*/

View File

@ -137,7 +137,7 @@ static void netfs_read_unlock_folios(struct netfs_io_request *rreq,
rreq->front_folio_order = order;
fsize = PAGE_SIZE << order;
fpos = folio_pos(folio);
fend = umin(fpos + fsize, rreq->i_size);
fend = fpos + fsize;
trace_netfs_collect_folio(rreq, folio, fend, collected_to);
@ -281,8 +281,10 @@ reassess:
} else if (test_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags)) {
notes |= MADE_PROGRESS;
} else {
if (!stream->failed)
if (!stream->failed) {
stream->transferred += transferred;
stream->transferred_valid = true;
}
if (front->transferred < front->len)
set_bit(NETFS_RREQ_SHORT_TRANSFER, &rreq->flags);
notes |= MADE_PROGRESS;
@ -544,6 +546,15 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
}
}
/* If need retry is set, error should not matter unless we hit too many
* retries. Pause the generation of new subreqs
*/
if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause);
set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
goto skip_error_checks;
}
if (unlikely(subreq->error < 0)) {
trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read);
if (subreq->source == NETFS_READ_FROM_CACHE) {
@ -557,6 +568,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
set_bit(NETFS_RREQ_PAUSE, &rreq->flags);
}
skip_error_checks:
trace_netfs_sreq(subreq, netfs_sreq_trace_terminated);
netfs_subreq_clear_in_progress(subreq);
netfs_put_subrequest(subreq, netfs_sreq_trace_put_terminated);

View File

@ -118,7 +118,7 @@ static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
return creq;
cancel_put:
netfs_put_request(creq, netfs_rreq_trace_put_return);
netfs_put_failed_request(creq);
cancel:
rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);

View File

@ -12,6 +12,7 @@
static void netfs_reissue_read(struct netfs_io_request *rreq,
struct netfs_io_subrequest *subreq)
{
subreq->error = 0;
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_stat(&netfs_n_rh_retry_read_subreq);
@ -242,8 +243,7 @@ abandon_after:
subreq = list_next_entry(subreq, rreq_link);
abandon:
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
if (!subreq->error &&
!test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
if (!test_bit(NETFS_SREQ_FAILED, &subreq->flags) &&
!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags))
continue;
subreq->error = -ENOMEM;

View File

@ -189,7 +189,7 @@ ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_ite
return ret;
cleanup_free:
netfs_put_request(rreq, netfs_rreq_trace_put_failed);
netfs_put_failed_request(rreq);
return ret;
}
EXPORT_SYMBOL(netfs_read_single);

View File

@ -254,6 +254,7 @@ reassess_streams:
if (front->start + front->transferred > stream->collected_to) {
stream->collected_to = front->start + front->transferred;
stream->transferred = stream->collected_to - wreq->start;
stream->transferred_valid = true;
notes |= MADE_PROGRESS;
}
if (test_bit(NETFS_SREQ_FAILED, &front->flags)) {
@ -356,6 +357,7 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
{
struct netfs_inode *ictx = netfs_inode(wreq->inode);
size_t transferred;
bool transferred_valid = false;
int s;
_enter("R=%x", wreq->debug_id);
@ -376,12 +378,16 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
continue;
if (!list_empty(&stream->subrequests))
return false;
if (stream->transferred < transferred)
if (stream->transferred_valid &&
stream->transferred < transferred) {
transferred = stream->transferred;
transferred_valid = true;
}
}
/* Okay, declare that all I/O is complete. */
wreq->transferred = transferred;
if (transferred_valid)
wreq->transferred = transferred;
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
if (wreq->io_streams[1].active &&
@ -486,11 +492,11 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error)
if (IS_ERR_VALUE(transferred_or_error)) {
subreq->error = transferred_or_error;
if (subreq->error == -EAGAIN)
set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
else
/* if need retry is set, error should not matter */
if (!test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
set_bit(NETFS_SREQ_FAILED, &subreq->flags);
trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write);
trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write);
}
switch (subreq->source) {
case NETFS_WRITE_TO_CACHE:

View File

@ -118,12 +118,12 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
wreq->io_streams[0].prepare_write = ictx->ops->prepare_write;
wreq->io_streams[0].issue_write = ictx->ops->issue_write;
wreq->io_streams[0].collected_to = start;
wreq->io_streams[0].transferred = LONG_MAX;
wreq->io_streams[0].transferred = 0;
wreq->io_streams[1].stream_nr = 1;
wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE;
wreq->io_streams[1].collected_to = start;
wreq->io_streams[1].transferred = LONG_MAX;
wreq->io_streams[1].transferred = 0;
if (fscache_resources_valid(&wreq->cache_resources)) {
wreq->io_streams[1].avail = true;
wreq->io_streams[1].active = true;
@ -133,8 +133,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
return wreq;
nomem:
wreq->error = -ENOMEM;
netfs_put_request(wreq, netfs_rreq_trace_put_failed);
netfs_put_failed_request(wreq);
return ERR_PTR(-ENOMEM);
}
@ -251,6 +250,7 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
iov_iter_truncate(&subreq->io_iter, size);
subreq->retry_count++;
subreq->error = 0;
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
netfs_stat(&netfs_n_wh_retry_write_subreq);

View File

@ -98,7 +98,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
subreq->start = start;
subreq->len = len;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
subreq->retry_count++;
trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
/* Renegotiate max_len (wsize) */

View File

@ -152,7 +152,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
struct cached_fid *cfid;
struct cached_fids *cfids;
const char *npath;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
__le32 lease_flags = 0;
if (cifs_sb->root == NULL)
@ -302,6 +302,10 @@ replay_again:
smb2_set_related(&rqst[1]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst[0]);
smb2_set_replay(server, &rqst[1]);
}

View File

@ -36,10 +36,10 @@ struct cached_fid {
struct list_head entry;
struct cached_fids *cfids;
const char *path;
bool has_lease:1;
bool is_open:1;
bool on_list:1;
bool file_all_info_is_valid:1;
bool has_lease;
bool is_open;
bool on_list;
bool file_all_info_is_valid;
unsigned long time; /* jiffies of when lease was taken */
unsigned long last_access_time; /* jiffies of when last accessed */
struct kref refcount;

View File

@ -1538,6 +1538,8 @@ struct cifs_io_subrequest {
int result;
bool have_xid;
bool replay;
unsigned int retries; /* number of retries so far */
unsigned int cur_sleep; /* time to sleep before replay */
struct kvec iov[2];
struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT

View File

@ -1958,6 +1958,10 @@ static int match_session(struct cifs_ses *ses,
case Kerberos:
if (!uid_eq(ctx->cred_uid, ses->cred_uid))
return 0;
if (strncmp(ses->user_name ?: "",
ctx->username ?: "",
CIFS_MAX_USERNAME_LEN))
return 0;
break;
case NTLMv2:
case RawNTLMSSP:

View File

@ -177,6 +177,9 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
&err_buftype);
if (rc == -EACCES && retry_without_read_attributes) {
free_rsp_buf(err_buftype, err_iov.iov_base);
memset(&err_iov, 0, sizeof(err_iov));
err_buftype = CIFS_NO_BUFFER;
oparms->desired_access &= ~FILE_READ_ATTRIBUTES;
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
&err_buftype);

View File

@ -185,7 +185,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct reparse_data_buffer *rbuf;
struct TCP_Server_Info *server;
int resp_buftype[MAX_COMPOUND];
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
__u8 delete_pending[8] = {1,};
struct kvec *rsp_iov, *iov;
struct inode *inode = NULL;
@ -635,18 +635,26 @@ replay_again:
num_rqst++;
if (cfile) {
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
for (i = 1; i < num_rqst - 2; i++)
smb2_set_replay(server, &rqst[i]);
}
rc = compound_send_recv(xid, ses, server,
flags, num_rqst - 2,
&rqst[1], &resp_buftype[1],
&rsp_iov[1]);
} else {
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
for (i = 0; i < num_rqst; i++)
smb2_set_replay(server, &rqst[i]);
}
rc = compound_send_recv(xid, ses, server,
flags, num_rqst,
@ -1177,7 +1185,7 @@ smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
{
struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
__le16 *utf16_path __free(kfree) = NULL;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
struct TCP_Server_Info *server;
struct cifs_open_parms oparms;
struct smb2_create_req *creq;
@ -1205,6 +1213,7 @@ again:
memset(resp_buftype, 0, sizeof(resp_buftype));
memset(rsp_iov, 0, sizeof(rsp_iov));
memset(open_iov, 0, sizeof(open_iov));
rqst[0].rq_iov = open_iov;
rqst[0].rq_nvec = ARRAY_SIZE(open_iov);
@ -1229,16 +1238,20 @@ again:
creq = rqst[0].rq_iov[0].iov_base;
creq->ShareAccess = FILE_SHARE_DELETE_LE;
memset(&close_iov, 0, sizeof(close_iov));
rqst[1].rq_iov = &close_iov;
rqst[1].rq_nvec = 1;
rc = SMB2_close_init(tcon, server, &rqst[1],
COMPOUND_FID, COMPOUND_FID, false);
smb2_set_related(&rqst[1]);
if (rc)
goto err_free;
smb2_set_related(&rqst[1]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
for (int i = 0; i < ARRAY_SIZE(rqst); i++)
smb2_set_replay(server, &rqst[i]);
}
@ -1259,7 +1272,7 @@ again:
if (rc == -EINVAL && dentry) {
dentry = NULL;
retries = 0;
cur_sleep = 1;
cur_sleep = 0;
goto again;
}
/*

View File

@ -1174,10 +1174,11 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_file_full_ea_info *ea;
struct smb2_query_info_rsp *rsp;
int rc, used_len = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
used_len = 0;
flags = CIFS_CP_CREATE_CLOSE_OP;
oplock = SMB2_OPLOCK_LEVEL_NONE;
server = cifs_pick_channel(ses);
@ -1304,6 +1305,9 @@ replay_again:
smb2_set_related(&rqst[2]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst[0]);
smb2_set_replay(server, &rqst[1]);
smb2_set_replay(server, &rqst[2]);
@ -1572,10 +1576,11 @@ smb2_ioctl_query_info(const unsigned int xid,
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
void (*free_req1_func)(struct smb_rqst *r);
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
buffer = NULL;
flags = CIFS_CP_CREATE_CLOSE_OP;
oplock = SMB2_OPLOCK_LEVEL_NONE;
server = cifs_pick_channel(ses);
@ -1721,6 +1726,9 @@ replay_again:
smb2_set_related(&rqst[2]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst[0]);
smb2_set_replay(server, &rqst[1]);
smb2_set_replay(server, &rqst[2]);
@ -2337,7 +2345,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
struct TCP_Server_Info *server;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -2395,6 +2403,9 @@ replay_again:
smb2_set_related(&rqst[1]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst[0]);
smb2_set_replay(server, &rqst[1]);
}
@ -2671,10 +2682,14 @@ bool smb2_should_replay(struct cifs_tcon *tcon,
return false;
if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
msleep(*pcur_sleep);
(*pcur_sleep) = ((*pcur_sleep) << 1);
if ((*pcur_sleep) > CIFS_MAX_SLEEP)
(*pcur_sleep) = CIFS_MAX_SLEEP;
/* Update sleep time for exponential backoff */
if (!(*pcur_sleep))
(*pcur_sleep) = 1;
else {
(*pcur_sleep) = ((*pcur_sleep) << 1);
if ((*pcur_sleep) > CIFS_MAX_SLEEP)
(*pcur_sleep) = CIFS_MAX_SLEEP;
}
return true;
}
@ -2705,7 +2720,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
__le16 *utf16_path;
struct cached_fid *cfid;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -2795,6 +2810,9 @@ replay_again:
smb2_set_related(&rqst[2]);
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
if (!cfid) {
smb2_set_replay(server, &rqst[0]);
smb2_set_replay(server, &rqst[2]);

View File

@ -2839,10 +2839,11 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
unsigned int total_len;
__le16 *utf16_path = NULL;
struct TCP_Server_Info *server;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
pc_buf = NULL;
flags = 0;
n_iov = 2;
server = cifs_pick_channel(ses);
@ -2951,8 +2952,12 @@ replay_again:
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
/* resource #4: response buffer */
rc = cifs_send_recv(xid, ses, server,
@ -3200,7 +3205,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -3228,8 +3233,12 @@ replay_again:
trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
oparms->create_options, oparms->desired_access);
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
@ -3413,7 +3422,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
if (!tcon)
return -EIO;
@ -3453,8 +3462,12 @@ replay_again:
if (rc)
goto ioctl_exit;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
@ -3608,7 +3621,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int flags = 0;
bool query_attrs = false;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -3640,8 +3653,12 @@ replay_again:
if (rc)
goto close_exit;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -3811,7 +3828,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
struct TCP_Server_Info *server;
int flags = 0;
bool allocated = false;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
cifs_dbg(FYI, "Query Info\n");
@ -3845,8 +3862,12 @@ replay_again:
trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -4001,7 +4022,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -4032,8 +4053,12 @@ replay_again:
trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
(u8)watch_tree, completion_filter);
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -4335,7 +4360,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -4361,8 +4386,12 @@ replay_again:
trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -4554,9 +4583,19 @@ smb2_readv_callback(struct mid_q_entry *mid)
iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
rc = smb2_verify_signature(&rqst, server);
if (rc)
if (rc) {
cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
rc);
rdata->subreq.error = rc;
rdata->result = rc;
if (is_replayable_error(rc)) {
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
} else
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_bad);
} else
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->got_bytes);
@ -4631,6 +4670,14 @@ do_retry:
if (rdata->got_bytes)
__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
}
/* see if we need to retry */
if (is_replayable_error(rdata->result) &&
smb2_should_replay(tcon,
&rdata->retries,
&rdata->cur_sleep))
rdata->replay = true;
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
server->credits, server->in_flight,
0, cifs_trace_rw_credits_read_response_clear);
@ -4679,7 +4726,7 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc)
return rc;
goto out;
if (smb3_encryption_required(io_parms.tcon))
flags |= CIFS_TRANSFORM_REQ;
@ -4691,6 +4738,13 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
shdr = (struct smb2_hdr *)buf;
if (rdata->replay) {
/* Back-off before retry */
if (rdata->cur_sleep)
msleep(rdata->cur_sleep);
smb2_set_replay(server, &rqst);
}
if (rdata->credits.value > 0) {
shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
SMB2_MAX_BUFFER_SIZE));
@ -4726,6 +4780,17 @@ smb2_async_readv(struct cifs_io_subrequest *rdata)
async_readv_out:
cifs_small_buf_release(buf);
out:
/* if the send error is retryable, let netfs know about it */
if (is_replayable_error(rc) &&
smb2_should_replay(tcon,
&rdata->retries,
&rdata->cur_sleep)) {
trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
}
return rc;
}
@ -4839,14 +4904,20 @@ smb2_writev_callback(struct mid_q_entry *mid)
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
credits.instance = server->reconnect_instance;
result = smb2_check_receive(mid, server, 0);
if (result != 0) {
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad);
if (is_replayable_error(result)) {
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
} else {
wdata->subreq.error = result;
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad);
}
break;
}
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
written = le32_to_cpu(rsp->DataLength);
/*
@ -4861,7 +4932,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
cifs_stats_bytes_written(tcon, written);
if (written < wdata->subreq.len) {
wdata->result = -ENOSPC;
result = -ENOSPC;
} else if (written > 0) {
wdata->subreq.len = written;
__set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
@ -4902,6 +4973,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
}
#endif
if (result) {
wdata->result = result;
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
trace_smb3_write_err(wdata->rreq->debug_id,
wdata->subreq.debug_index,
@ -4924,6 +4996,14 @@ smb2_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
/* see if we need to retry */
if (is_replayable_error(wdata->result) &&
smb2_should_replay(tcon,
&wdata->retries,
&wdata->cur_sleep))
wdata->replay = true;
cifs_write_subrequest_terminated(wdata, result ?: written);
release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
@ -5042,8 +5122,12 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
}
#endif
if (wdata->subreq.retry_count > 0)
if (wdata->replay) {
/* Back-off before retry */
if (wdata->cur_sleep)
msleep(wdata->cur_sleep);
smb2_set_replay(server, &rqst);
}
cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
@ -5089,6 +5173,16 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
async_writev_out:
cifs_small_buf_release(req);
out:
/* if the send error is retryable, let netfs know about it */
if (is_replayable_error(rc) &&
smb2_should_replay(tcon,
&wdata->retries,
&wdata->cur_sleep)) {
wdata->replay = true;
trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
}
if (rc) {
trace_smb3_rw_credits(wdata->rreq->debug_id,
wdata->subreq.debug_index,
@ -5120,7 +5214,7 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
int flags = 0;
unsigned int total_len;
struct TCP_Server_Info *server;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -5168,8 +5262,12 @@ replay_again:
rqst.rq_iov = iov;
rqst.rq_nvec = n_vec + 1;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
&rqst,
@ -5520,7 +5618,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -5545,8 +5643,12 @@ replay_again:
if (rc)
goto qdir_exit;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -5655,7 +5757,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_ses *ses = tcon->ses;
struct TCP_Server_Info *server;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -5688,8 +5790,12 @@ replay_again:
return rc;
}
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags,
@ -5768,7 +5874,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -5798,8 +5904,12 @@ replay_again:
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buf_type, flags, &rsp_iov);
@ -5901,7 +6011,7 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
struct TCP_Server_Info *server;
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -5922,8 +6032,12 @@ replay_again:
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -5966,7 +6080,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
struct TCP_Server_Info *server;
unsigned int rsp_len, offset;
int flags = 0;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -6003,8 +6117,12 @@ replay_again:
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, ses, server,
&rqst, &resp_buftype, flags, &rsp_iov);
@ -6066,7 +6184,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
int flags = CIFS_NO_RSP_BUF;
unsigned int total_len;
struct TCP_Server_Info *server;
int retries = 0, cur_sleep = 1;
int retries = 0, cur_sleep = 0;
replay_again:
/* reinitialize for possible replay */
@ -6102,8 +6220,12 @@ replay_again:
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
if (retries)
if (retries) {
/* Back-off before retry */
if (cur_sleep)
msleep(cur_sleep);
smb2_set_replay(server, &rqst);
}
rc = cifs_send_recv(xid, tcon->ses, server,
&rqst, &resp_buf_type, flags,

View File

@ -744,6 +744,9 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
sd.pos = kiocb.ki_pos;
if (ret <= 0)
break;
WARN_ONCE(ret > sd.total_len - left,
"Splice Exceeded! ret=%zd tot=%zu left=%zu\n",
ret, sd.total_len, left);
sd.num_spliced += ret;
sd.total_len -= ret;

View File

@ -850,15 +850,16 @@ sparse_alloc:
* invalid inode records, such as records that start at agbno 0
* or extend beyond the AG.
*
* Set min agbno to the first aligned, non-zero agbno and max to
* the last aligned agbno that is at least one full chunk from
* the end of the AG.
* Set min agbno to the first chunk aligned, non-zero agbno and
* max to one less than the last chunk aligned agbno from the
* end of the AG. We subtract 1 from max so that the cluster
* allocation alignment takes over and allows allocation within
* the last full inode chunk in the AG.
*/
args.min_agbno = args.mp->m_sb.sb_inoalignmt;
args.max_agbno = round_down(xfs_ag_block_count(args.mp,
pag->pag_agno),
args.mp->m_sb.sb_inoalignmt) -
igeo->ialloc_blks;
args.mp->m_sb.sb_inoalignmt) - 1;
error = xfs_alloc_vextent_near_bno(&args,
XFS_AGB_TO_FSB(args.mp, pag->pag_agno,

View File

@ -150,6 +150,7 @@ struct netfs_io_stream {
bool active; /* T if stream is active */
bool need_retry; /* T if this stream needs retrying */
bool failed; /* T if this stream failed */
bool transferred_valid; /* T is ->transferred is valid */
};
/*

View File

@ -1594,7 +1594,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@ -1740,14 +1739,14 @@ static void cake_reconfigure(struct Qdisc *sch);
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
u32 idx, tin, prev_qlen, prev_backlog, drop_id;
struct cake_sched_data *q = qdisc_priv(sch);
int len = qdisc_pkt_len(skb);
int ret;
int len = qdisc_pkt_len(skb), ret;
struct sk_buff *ack = NULL;
ktime_t now = ktime_get();
struct cake_tin_data *b;
struct cake_flow *flow;
u32 idx;
bool same_flow = false;
/* choose flow to insert into */
idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
@ -1757,6 +1756,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
__qdisc_drop(skb, to_free);
return ret;
}
tin = (u32)(b - q->tins);
idx--;
flow = &b->flows[idx];
@ -1819,6 +1819,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb);
} else {
/* not splitting */
int ack_pkt_len = 0;
cobalt_set_enqueue_time(skb, now);
get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb);
flow_queue_add(flow, skb);
@ -1829,13 +1831,13 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (ack) {
b->ack_drops++;
sch->qstats.drops++;
b->bytes += qdisc_pkt_len(ack);
len -= qdisc_pkt_len(ack);
ack_pkt_len = qdisc_pkt_len(ack);
b->bytes += ack_pkt_len;
q->buffer_used += skb->truesize - ack->truesize;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, ack, now, true);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len);
consume_skb(ack);
} else {
sch->q.qlen++;
@ -1844,11 +1846,11 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* stats */
b->packets++;
b->bytes += len;
b->backlogs[idx] += len;
b->tin_backlog += len;
sch->qstats.backlog += len;
q->avg_window_bytes += len;
b->bytes += len - ack_pkt_len;
b->backlogs[idx] += len - ack_pkt_len;
b->tin_backlog += len - ack_pkt_len;
sch->qstats.backlog += len - ack_pkt_len;
q->avg_window_bytes += len - ack_pkt_len;
}
if (q->overflow_timeout)
@ -1923,15 +1925,29 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (q->buffer_used > q->buffer_max_used)
q->buffer_max_used = q->buffer_used;
if (q->buffer_used > q->buffer_limit) {
u32 dropped = 0;
if (q->buffer_used <= q->buffer_limit)
return NET_XMIT_SUCCESS;
while (q->buffer_used > q->buffer_limit) {
dropped++;
cake_drop(sch, to_free);
}
b->drop_overlimit += dropped;
prev_qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (q->buffer_used > q->buffer_limit) {
drop_id = cake_drop(sch, to_free);
if ((drop_id >> 16) == tin &&
(drop_id & 0xFFFF) == idx)
same_flow = true;
}
prev_qlen -= sch->q.qlen;
prev_backlog -= sch->qstats.backlog;
b->drop_overlimit += prev_qlen;
if (same_flow) {
qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
prev_backlog - len);
return NET_XMIT_CN;
}
qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
return NET_XMIT_SUCCESS;
}

View File

@ -1,3 +1,35 @@
* Sat Apr 11 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.52.1.el10_1]
- md/raid1: fix data lost for writemostly rdev (Nigel Croxon) [RHEL-143660]
Resolves: RHEL-143660
* Thu Apr 09 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.51.1.el10_1]
- smb: client: fix krb5 mount with username option (Paulo Alcantara) [RHEL-158989]
Resolves: RHEL-158989
* Thu Apr 02 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.50.1.el10_1]
- scsi: qla2xxx: Fix improper freeing of purex item (CKI Backport Bot) [RHEL-159225] {CVE-2025-68741}
- net: hv_netvsc: reject RSS hash key programming without RX indirection table (Vinay Mulugund) [RHEL-150573]
- smb: client: fix oops due to uninitialised var in smb2_unlink() (Paulo Alcantara) [RHEL-154414]
- cifs: some missing initializations on replay (Paulo Alcantara) [RHEL-154414]
- smb: client: split cached_fid bitfields to avoid shared-byte RMW races (Paulo Alcantara) [RHEL-154414]
- cifs: make retry logic in read/write path consistent with other paths (Paulo Alcantara) [RHEL-154414]
- cifs: on replayable errors back-off before replay, not after (Paulo Alcantara) [RHEL-154414]
- smb: client: fix potential UAF and double free in smb2_open_file() (Paulo Alcantara) [RHEL-154414]
- smb/client: fix memory leak in smb2_open_file() (Paulo Alcantara) [RHEL-154414]
- netfs: avoid double increment of retry_count in subreq (Paulo Alcantara) [RHEL-154414]
- netfs: when subreq is marked for retry, do not check if it faced an error (Paulo Alcantara) [RHEL-154414]
- netfs: Fix early read unlock of page with EOF in middle (Paulo Alcantara) [RHEL-154414]
- netfs: fix reference leak (Paulo Alcantara) [RHEL-154414]
- netfs: Prevent duplicate unlocking (Paulo Alcantara) [RHEL-154414]
- netfs: Fix unbuffered write error handling (Paulo Alcantara) [RHEL-154414]
- net/sched: sch_cake: Fix incorrect qlen reduction in cake_drop (CKI Backport Bot) [RHEL-150451] {CVE-2025-39766}
- net/sched: Make cake_enqueue return NET_XMIT_CN when past buffer_limit (CKI Backport Bot) [RHEL-150451] {CVE-2025-39766}
- xfs: set max_agbno to allow sparse alloc of last full inode chunk (Brian Foster) [RHEL-142607]
- iommu/vt-d: Deduplicate cache_tag_flush_all by reusing flush_range (Jerry Snitselaar) [RHEL-144317]
- iommu/vt-d: Fix missing PASID in dev TLB flush with cache_tag_flush_all (Jerry Snitselaar) [RHEL-144317]
- i40e: validate ring_len parameter against hardware-specific values (CKI Backport Bot) [RHEL-141694]
Resolves: RHEL-141694, RHEL-142607, RHEL-144317, RHEL-150451, RHEL-150573, RHEL-154414, RHEL-159225
* Tue Mar 31 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.49.1.el10_1]
- net/mlx5: Fix ECVF vports unload on shutdown flow (CKI Backport Bot) [RHEL-154540] {CVE-2025-38109}
- mm/damon/sysfs: cleanup attrs subdirs on context dir setup failure (Rafael Aquini) [RHEL-150480] {CVE-2026-23144}

View File

@ -1,3 +1,3 @@
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.49.1.el10.x86_64,mailto:secalert@redhat.com
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.49.1.el10.x86_64,mailto:security@almalinux.org
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.52.1.el10.x86_64,mailto:secalert@redhat.com
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.52.1.el10.x86_64,mailto:security@almalinux.org

View File

@ -1,3 +1,3 @@
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.49.1.el10.x86_64,mailto:secalert@redhat.com
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.49.1.el10.x86_64,mailto:security@almalinux.org
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.52.1.el10.x86_64,mailto:secalert@redhat.com
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.52.1.el10.x86_64,mailto:security@almalinux.org