Import of kernel-6.12.0-55.31.1.el10_0
This commit is contained in:
parent
e9ab258a78
commit
93a7f3b72f
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.30.1
|
||||
RHEL_RELEASE = 55.31.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
@ -340,20 +340,24 @@ static int add_or_reset_cxl_resource(struct resource *parent, struct resource *r
|
||||
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
|
||||
if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
|
||||
DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
|
||||
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
|
||||
struct cxl_cfmws_context *ctx)
|
||||
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
|
||||
const unsigned long end)
|
||||
{
|
||||
int target_map[CXL_DECODER_MAX_INTERLEAVE];
|
||||
struct cxl_cfmws_context *ctx = arg;
|
||||
struct cxl_port *root_port = ctx->root_port;
|
||||
struct cxl_cxims_context cxims_ctx;
|
||||
struct device *dev = ctx->dev;
|
||||
struct acpi_cedt_cfmws *cfmws;
|
||||
struct cxl_decoder *cxld;
|
||||
unsigned int ways, i, ig;
|
||||
int rc;
|
||||
|
||||
cfmws = (struct acpi_cedt_cfmws *) header;
|
||||
|
||||
rc = cxl_acpi_cfmws_verify(dev, cfmws);
|
||||
if (rc)
|
||||
return rc;
|
||||
return 0;
|
||||
|
||||
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
|
||||
if (rc)
|
||||
@ -378,7 +382,7 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
|
||||
cxl_root_decoder_alloc(root_port, ways);
|
||||
|
||||
if (IS_ERR(cxlrd))
|
||||
return PTR_ERR(cxlrd);
|
||||
return 0;
|
||||
|
||||
cxld = &cxlrd->cxlsd.cxld;
|
||||
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
|
||||
@ -421,29 +425,16 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
|
||||
rc = cxl_decoder_add(cxld, target_map);
|
||||
if (rc)
|
||||
return rc;
|
||||
return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
|
||||
}
|
||||
rc = cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd));
|
||||
if (rc) {
|
||||
dev_err(dev, "Failed to add decode range: %pr", res);
|
||||
return rc;
|
||||
}
|
||||
dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
|
||||
dev_name(&cxld->dev),
|
||||
phys_to_target_node(cxld->hpa_range.start),
|
||||
cxld->hpa_range.start, cxld->hpa_range.end);
|
||||
|
||||
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
|
||||
const unsigned long end)
|
||||
{
|
||||
struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
|
||||
struct cxl_cfmws_context *ctx = arg;
|
||||
struct device *dev = ctx->dev;
|
||||
int rc;
|
||||
|
||||
rc = __cxl_parse_cfmws(cfmws, ctx);
|
||||
if (rc)
|
||||
dev_err(dev,
|
||||
"Failed to add decode range: [%#llx - %#llx] (%d)\n",
|
||||
cfmws->base_hpa,
|
||||
cfmws->base_hpa + cfmws->window_size - 1, rc);
|
||||
else
|
||||
dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
|
||||
phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
|
||||
cfmws->base_hpa + cfmws->window_size - 1);
|
||||
|
||||
/* never fail cxl_acpi load for a single window failure */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -359,7 +359,7 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&ubuf->unpin_list);
|
||||
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
for (i = 0; i < head->count; i++) {
|
||||
if (!PAGE_ALIGNED(list[i].offset))
|
||||
goto err;
|
||||
|
@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref)
|
||||
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
const struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
bool exists;
|
||||
|
||||
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
if (fb->obj[i]) {
|
||||
exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
|
||||
if (exists)
|
||||
fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&fb->filp_head);
|
||||
|
||||
fb->funcs = funcs;
|
||||
@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
|
||||
false, drm_framebuffer_free);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err;
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
dev->mode_config.num_fb++;
|
||||
@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
drm_mode_object_register(dev, &fb->base);
|
||||
out:
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_framebuffer_init);
|
||||
@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
|
||||
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
list_del(&fb->head);
|
||||
|
@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
||||
|
||||
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
|
||||
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Acquires a reference on the GEM buffer object's handle. Required to keep
|
||||
* the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
|
||||
* to release the reference. Does nothing if the buffer object has no handle.
|
||||
*
|
||||
* Returns:
|
||||
* True if a handle exists, or false otherwise
|
||||
*/
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
guard(mutex)(&dev->object_name_lock);
|
||||
|
||||
/*
|
||||
* First ref taken during GEM object creation, if any. Some
|
||||
* drivers set up internal framebuffers with GEM objects that
|
||||
* do not have a GEM handle. Hence, this counter can be zero.
|
||||
*/
|
||||
if (!obj->handle_count)
|
||||
return false;
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||
* @obj: GEM object to clean up.
|
||||
@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
/**
|
||||
* drm_gem_object_handle_put_unlocked - releases reference on user-space handle
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Releases a reference on the GEM buffer object's handle. Possibly releases
|
||||
* the GEM buffer object and associated dma-buf objects.
|
||||
*/
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
bool final = false;
|
||||
|
||||
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
|
||||
if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before
|
||||
* we checked for a name.
|
||||
*/
|
||||
|
||||
mutex_lock(&dev->object_name_lock);
|
||||
if (--obj->handle_count == 0) {
|
||||
@ -363,8 +409,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
/*
|
||||
* Get the user-visible handle using idr. Preload and perform
|
||||
|
@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
|
||||
|
||||
/* drm_gem.c */
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
|
||||
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
u32 *handlep);
|
||||
|
@ -245,17 +245,19 @@ static int __init vkms_init(void)
|
||||
if (!config)
|
||||
return -ENOMEM;
|
||||
|
||||
default_config = config;
|
||||
|
||||
config->cursor = enable_cursor;
|
||||
config->writeback = enable_writeback;
|
||||
config->overlay = enable_overlay;
|
||||
|
||||
ret = vkms_create(config);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(config);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
default_config = config;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vkms_destroy(struct vkms_config *config)
|
||||
@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config)
|
||||
|
||||
static void __exit vkms_exit(void)
|
||||
{
|
||||
if (default_config->dev)
|
||||
vkms_destroy(default_config);
|
||||
if (!default_config)
|
||||
return;
|
||||
|
||||
vkms_destroy(default_config);
|
||||
kfree(default_config);
|
||||
}
|
||||
|
||||
|
@ -124,15 +124,18 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
|
||||
struct bio *bio = NULL;
|
||||
int ret;
|
||||
|
||||
if (has_metadata && !supports_metadata)
|
||||
return -EINVAL;
|
||||
|
||||
if (has_metadata && !supports_metadata) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) {
|
||||
struct iov_iter iter;
|
||||
|
||||
/* fixedbufs is only for non-vectored io */
|
||||
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
|
||||
return -EINVAL;
|
||||
if (flags & NVME_IOCTL_VEC) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
|
||||
rq_data_dir(req), &iter, ioucmd);
|
||||
if (ret < 0)
|
||||
|
@ -23,6 +23,7 @@
|
||||
#ifndef __DRM_FRAMEBUFFER_H__
|
||||
#define __DRM_FRAMEBUFFER_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sched.h>
|
||||
@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
|
||||
unsigned num_clips);
|
||||
};
|
||||
|
||||
#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
|
||||
|
||||
/**
|
||||
* struct drm_framebuffer - frame buffer object
|
||||
*
|
||||
@ -188,6 +191,10 @@ struct drm_framebuffer {
|
||||
* DRM_MODE_FB_MODIFIERS.
|
||||
*/
|
||||
int flags;
|
||||
/**
|
||||
* @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
|
||||
*/
|
||||
unsigned int internal_flags;
|
||||
/**
|
||||
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
|
||||
*/
|
||||
|
@ -689,21 +689,16 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
|
||||
*
|
||||
* XXX could add max_slice to the augmented data to track this.
|
||||
*/
|
||||
static s64 entity_lag(u64 avruntime, struct sched_entity *se)
|
||||
static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
s64 vlag, limit;
|
||||
|
||||
vlag = avruntime - se->vruntime;
|
||||
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
|
||||
|
||||
return clamp(vlag, -limit, limit);
|
||||
}
|
||||
|
||||
static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
SCHED_WARN_ON(!se->on_rq);
|
||||
|
||||
se->vlag = entity_lag(avg_vruntime(cfs_rq), se);
|
||||
vlag = avg_vruntime(cfs_rq) - se->vruntime;
|
||||
limit = calc_delta_fair(max_t(u64, 2*se->slice, TICK_NSEC), se);
|
||||
|
||||
se->vlag = clamp(vlag, -limit, limit);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3774,137 +3769,33 @@ static inline void
|
||||
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
||||
#endif
|
||||
|
||||
static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
|
||||
unsigned long weight)
|
||||
{
|
||||
unsigned long old_weight = se->load.weight;
|
||||
s64 vlag, vslice;
|
||||
|
||||
/*
|
||||
* VRUNTIME
|
||||
* --------
|
||||
*
|
||||
* COROLLARY #1: The virtual runtime of the entity needs to be
|
||||
* adjusted if re-weight at !0-lag point.
|
||||
*
|
||||
* Proof: For contradiction assume this is not true, so we can
|
||||
* re-weight without changing vruntime at !0-lag point.
|
||||
*
|
||||
* Weight VRuntime Avg-VRuntime
|
||||
* before w v V
|
||||
* after w' v' V'
|
||||
*
|
||||
* Since lag needs to be preserved through re-weight:
|
||||
*
|
||||
* lag = (V - v)*w = (V'- v')*w', where v = v'
|
||||
* ==> V' = (V - v)*w/w' + v (1)
|
||||
*
|
||||
* Let W be the total weight of the entities before reweight,
|
||||
* since V' is the new weighted average of entities:
|
||||
*
|
||||
* V' = (WV + w'v - wv) / (W + w' - w) (2)
|
||||
*
|
||||
* by using (1) & (2) we obtain:
|
||||
*
|
||||
* (WV + w'v - wv) / (W + w' - w) = (V - v)*w/w' + v
|
||||
* ==> (WV-Wv+Wv+w'v-wv)/(W+w'-w) = (V - v)*w/w' + v
|
||||
* ==> (WV - Wv)/(W + w' - w) + v = (V - v)*w/w' + v
|
||||
* ==> (V - v)*W/(W + w' - w) = (V - v)*w/w' (3)
|
||||
*
|
||||
* Since we are doing at !0-lag point which means V != v, we
|
||||
* can simplify (3):
|
||||
*
|
||||
* ==> W / (W + w' - w) = w / w'
|
||||
* ==> Ww' = Ww + ww' - ww
|
||||
* ==> W * (w' - w) = w * (w' - w)
|
||||
* ==> W = w (re-weight indicates w' != w)
|
||||
*
|
||||
* So the cfs_rq contains only one entity, hence vruntime of
|
||||
* the entity @v should always equal to the cfs_rq's weighted
|
||||
* average vruntime @V, which means we will always re-weight
|
||||
* at 0-lag point, thus breach assumption. Proof completed.
|
||||
*
|
||||
*
|
||||
* COROLLARY #2: Re-weight does NOT affect weighted average
|
||||
* vruntime of all the entities.
|
||||
*
|
||||
* Proof: According to corollary #1, Eq. (1) should be:
|
||||
*
|
||||
* (V - v)*w = (V' - v')*w'
|
||||
* ==> v' = V' - (V - v)*w/w' (4)
|
||||
*
|
||||
* According to the weighted average formula, we have:
|
||||
*
|
||||
* V' = (WV - wv + w'v') / (W - w + w')
|
||||
* = (WV - wv + w'(V' - (V - v)w/w')) / (W - w + w')
|
||||
* = (WV - wv + w'V' - Vw + wv) / (W - w + w')
|
||||
* = (WV + w'V' - Vw) / (W - w + w')
|
||||
*
|
||||
* ==> V'*(W - w + w') = WV + w'V' - Vw
|
||||
* ==> V' * (W - w) = (W - w) * V (5)
|
||||
*
|
||||
* If the entity is the only one in the cfs_rq, then reweight
|
||||
* always occurs at 0-lag point, so V won't change. Or else
|
||||
* there are other entities, hence W != w, then Eq. (5) turns
|
||||
* into V' = V. So V won't change in either case, proof done.
|
||||
*
|
||||
*
|
||||
* So according to corollary #1 & #2, the effect of re-weight
|
||||
* on vruntime should be:
|
||||
*
|
||||
* v' = V' - (V - v) * w / w' (4)
|
||||
* = V - (V - v) * w / w'
|
||||
* = V - vl * w / w'
|
||||
* = V - vl'
|
||||
*/
|
||||
if (avruntime != se->vruntime) {
|
||||
vlag = entity_lag(avruntime, se);
|
||||
vlag = div_s64(vlag * old_weight, weight);
|
||||
se->vruntime = avruntime - vlag;
|
||||
}
|
||||
|
||||
/*
|
||||
* DEADLINE
|
||||
* --------
|
||||
*
|
||||
* When the weight changes, the virtual time slope changes and
|
||||
* we should adjust the relative virtual deadline accordingly.
|
||||
*
|
||||
* d' = v' + (d - v)*w/w'
|
||||
* = V' - (V - v)*w/w' + (d - v)*w/w'
|
||||
* = V - (V - v)*w/w' + (d - v)*w/w'
|
||||
* = V + (d - V)*w/w'
|
||||
*/
|
||||
vslice = (s64)(se->deadline - avruntime);
|
||||
vslice = div_s64(vslice * old_weight, weight);
|
||||
se->deadline = avruntime + vslice;
|
||||
}
|
||||
static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags);
|
||||
|
||||
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
unsigned long weight)
|
||||
{
|
||||
bool curr = cfs_rq->curr == se;
|
||||
u64 avruntime;
|
||||
|
||||
if (se->on_rq) {
|
||||
/* commit outstanding execution time */
|
||||
update_curr(cfs_rq);
|
||||
avruntime = avg_vruntime(cfs_rq);
|
||||
update_entity_lag(cfs_rq, se);
|
||||
se->deadline -= se->vruntime;
|
||||
se->rel_deadline = 1;
|
||||
cfs_rq->nr_running--;
|
||||
if (!curr)
|
||||
__dequeue_entity(cfs_rq, se);
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
}
|
||||
dequeue_load_avg(cfs_rq, se);
|
||||
|
||||
if (se->on_rq) {
|
||||
reweight_eevdf(se, avruntime, weight);
|
||||
} else {
|
||||
/*
|
||||
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
|
||||
* we need to scale se->vlag when w_i changes.
|
||||
*/
|
||||
se->vlag = div_s64(se->vlag * se->load.weight, weight);
|
||||
}
|
||||
/*
|
||||
* Because we keep se->vlag = V - v_i, while: lag_i = w_i*(V - v_i),
|
||||
* we need to scale se->vlag when w_i changes.
|
||||
*/
|
||||
se->vlag = div_s64(se->vlag * se->load.weight, weight);
|
||||
if (se->rel_deadline)
|
||||
se->deadline = div_s64(se->deadline * se->load.weight, weight);
|
||||
|
||||
update_load_set(&se->load, weight);
|
||||
|
||||
@ -3918,9 +3809,11 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
|
||||
enqueue_load_avg(cfs_rq, se);
|
||||
if (se->on_rq) {
|
||||
place_entity(cfs_rq, se, 0);
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!curr)
|
||||
__enqueue_entity(cfs_rq, se);
|
||||
cfs_rq->nr_running++;
|
||||
|
||||
/*
|
||||
* The entity's vruntime has been adjusted, so let's check
|
||||
@ -4065,7 +3958,11 @@ static void update_cfs_group(struct sched_entity *se)
|
||||
struct cfs_rq *gcfs_rq = group_cfs_rq(se);
|
||||
long shares;
|
||||
|
||||
if (!gcfs_rq)
|
||||
/*
|
||||
* When a group becomes empty, preserve its weight. This matters for
|
||||
* DELAY_DEQUEUE.
|
||||
*/
|
||||
if (!gcfs_rq || !gcfs_rq->load.weight)
|
||||
return;
|
||||
|
||||
if (throttled_hierarchy(gcfs_rq))
|
||||
@ -5291,7 +5188,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
*
|
||||
* EEVDF: placement strategy #1 / #2
|
||||
*/
|
||||
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
|
||||
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) {
|
||||
struct sched_entity *curr = cfs_rq->curr;
|
||||
unsigned long load;
|
||||
|
||||
@ -5361,7 +5258,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
|
||||
|
||||
se->vruntime = vruntime - lag;
|
||||
|
||||
if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) {
|
||||
if (se->rel_deadline) {
|
||||
se->deadline += se->vruntime;
|
||||
se->rel_deadline = 0;
|
||||
return;
|
||||
|
@ -784,15 +784,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
|
||||
|
||||
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
{
|
||||
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
|
||||
const struct Qdisc_class_ops *cops;
|
||||
unsigned long cl;
|
||||
u32 parentid;
|
||||
bool notify;
|
||||
int drops;
|
||||
|
||||
if (n == 0 && len == 0)
|
||||
return;
|
||||
drops = max_t(int, n, 0);
|
||||
rcu_read_lock();
|
||||
while ((parentid = sch->parent)) {
|
||||
@ -801,17 +798,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
|
||||
if (sch->flags & TCQ_F_NOPARENT)
|
||||
break;
|
||||
/* Notify parent qdisc only if child qdisc becomes empty.
|
||||
*
|
||||
* If child was empty even before update then backlog
|
||||
* counter is screwed and we skip notification because
|
||||
* parent class is already passive.
|
||||
*
|
||||
* If the original child was offloaded then it is allowed
|
||||
* to be seem as empty, so the parent is notified anyway.
|
||||
*/
|
||||
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
|
||||
!qdisc_is_offloaded);
|
||||
/* Notify parent qdisc only if child qdisc becomes empty. */
|
||||
notify = !sch->q.qlen;
|
||||
/* TODO: perform the search on a per txq basis */
|
||||
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
|
||||
if (sch == NULL) {
|
||||
@ -820,6 +808,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
}
|
||||
cops = sch->ops->cl_ops;
|
||||
if (notify && cops->qlen_notify) {
|
||||
/* Note that qlen_notify must be idempotent as it may get called
|
||||
* multiple times.
|
||||
*/
|
||||
cl = cops->find(sch, parentid);
|
||||
cops->qlen_notify(sch, cl);
|
||||
}
|
||||
|
@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
|
||||
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
|
||||
drop_func, dequeue_func);
|
||||
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->stats.drop_count && sch->q.qlen) {
|
||||
if (q->stats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
|
@ -105,6 +105,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
return -ENOBUFS;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
cl->common.classid = classid;
|
||||
cl->quantum = quantum;
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
||||
@ -229,7 +230,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
|
||||
{
|
||||
struct drr_class *cl = (struct drr_class *)arg;
|
||||
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
}
|
||||
|
||||
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
|
||||
@ -390,7 +391,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
|
||||
if (unlikely(skb == NULL))
|
||||
goto out;
|
||||
if (cl->qdisc->q.qlen == 0)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
@ -431,7 +432,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||
if (cl->qdisc->q.qlen)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
|
@ -649,6 +649,12 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
sch_tree_lock(sch);
|
||||
|
||||
for (i = nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_purge_queue(q->classes[i].qdisc);
|
||||
}
|
||||
|
||||
WRITE_ONCE(q->nbands, nbands);
|
||||
for (i = nstrict; i < q->nstrict; i++) {
|
||||
if (q->classes[i].qdisc->q.qlen) {
|
||||
@ -656,11 +662,6 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
q->classes[i].deficit = quanta[i];
|
||||
}
|
||||
}
|
||||
for (i = q->nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_tree_flush_backlog(q->classes[i].qdisc);
|
||||
}
|
||||
WRITE_ONCE(q->nstrict, nstrict);
|
||||
memcpy(q->prio2band, priomap, sizeof(priomap));
|
||||
|
||||
|
@ -314,10 +314,8 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->cstats.drop_count && sch->q.qlen) {
|
||||
|
||||
if (q->cstats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
q->cstats.drop_len);
|
||||
q->cstats.drop_count = 0;
|
||||
|
@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
|
||||
*/
|
||||
static inline void htb_next_rb_node(struct rb_node **n)
|
||||
{
|
||||
*n = rb_next(*n);
|
||||
if (*n)
|
||||
*n = rb_next(*n);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
|
||||
*/
|
||||
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
|
||||
{
|
||||
WARN_ON(!cl->prio_activity);
|
||||
|
||||
if (!cl->prio_activity)
|
||||
return;
|
||||
htb_deactivate_prios(q, cl);
|
||||
cl->prio_activity = 0;
|
||||
}
|
||||
@ -1738,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
|
||||
if (cl->parent)
|
||||
cl->parent->children--;
|
||||
|
||||
if (cl->prio_activity)
|
||||
htb_deactivate(q, cl);
|
||||
htb_deactivate(q, cl);
|
||||
|
||||
if (cl->cmode != HTB_CAN_SEND)
|
||||
htb_safe_rb_erase(&cl->pq_node,
|
||||
@ -1947,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
/* turn parent into inner node */
|
||||
qdisc_purge_queue(parent->leaf.q);
|
||||
parent_qdisc = parent->leaf.q;
|
||||
if (parent->prio_activity)
|
||||
htb_deactivate(q, parent);
|
||||
htb_deactivate(q, parent);
|
||||
|
||||
/* remove from evt list because of level change */
|
||||
if (parent->cmode != HTB_CAN_SEND) {
|
||||
|
@ -347,7 +347,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
|
||||
struct qfq_aggregate *agg = cl->agg;
|
||||
|
||||
|
||||
list_del(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
if (list_empty(&agg->active)) /* agg is now inactive */
|
||||
qfq_deactivate_agg(q, agg);
|
||||
}
|
||||
@ -477,6 +477,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
cl->common.classid = classid;
|
||||
cl->deficit = lmax;
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
classid, NULL);
|
||||
@ -985,7 +986,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
|
||||
cl->deficit -= (int) len;
|
||||
|
||||
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
|
||||
cl->deficit += agg->lmax;
|
||||
list_move_tail(&cl->alist, &agg->active);
|
||||
@ -1418,6 +1419,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
struct qfq_sched *q = qdisc_priv(sch);
|
||||
struct qfq_class *cl = (struct qfq_class *)arg;
|
||||
|
||||
if (list_empty(&cl->alist))
|
||||
return;
|
||||
qfq_deactivate_class(q, cl);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user