Import of kernel-4.18.0-553.75.1.el8_10
This commit is contained in:
parent
b32a85fff3
commit
4cdf272fc1
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.74.1
|
||||
RHEL_RELEASE = 553.75.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -844,11 +844,23 @@ void drm_framebuffer_free(struct kref *kref)
|
||||
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
const struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
bool exists;
|
||||
|
||||
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
if (fb->obj[i]) {
|
||||
exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
|
||||
if (exists)
|
||||
fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&fb->filp_head);
|
||||
|
||||
fb->funcs = funcs;
|
||||
@ -857,7 +869,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
|
||||
false, drm_framebuffer_free);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err;
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
dev->mode_config.num_fb++;
|
||||
@ -865,7 +877,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
drm_mode_object_register(dev, &fb->base);
|
||||
out:
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_framebuffer_init);
|
||||
@ -943,6 +964,13 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
list_del(&fb->head);
|
||||
dev->mode_config.num_fb--;
|
||||
|
@ -183,6 +183,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
||||
|
||||
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
|
||||
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Acquires a reference on the GEM buffer object's handle. Required to keep
|
||||
* the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
|
||||
* to release the reference. Does nothing if the buffer object has no handle.
|
||||
*
|
||||
* Returns:
|
||||
* True if a handle exists, or false otherwise
|
||||
*/
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
guard(mutex)(&dev->object_name_lock);
|
||||
|
||||
/*
|
||||
* First ref taken during GEM object creation, if any. Some
|
||||
* drivers set up internal framebuffers with GEM objects that
|
||||
* do not have a GEM handle. Hence, this counter can be zero.
|
||||
*/
|
||||
if (!obj->handle_count)
|
||||
return false;
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||
* @obj: GEM object to clean up.
|
||||
@ -213,20 +253,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
/**
|
||||
* drm_gem_object_handle_put_unlocked - releases reference on user-space handle
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Releases a reference on the GEM buffer object's handle. Possibly releases
|
||||
* the GEM buffer object and associated dma-buf objects.
|
||||
*/
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
bool final = false;
|
||||
|
||||
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
|
||||
if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before
|
||||
* we checked for a name
|
||||
*/
|
||||
|
||||
mutex_lock(&dev->object_name_lock);
|
||||
if (--obj->handle_count == 0) {
|
||||
@ -367,8 +413,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
/*
|
||||
* Get the user-visible handle using idr. Preload and perform
|
||||
|
@ -159,6 +159,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
|
||||
|
||||
/* drm_gem.c */
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
|
||||
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
u32 *handlep);
|
||||
|
@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
|
||||
*/
|
||||
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
{
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* free ring buffers and the ring itself */
|
||||
idpf_ctlq_dealloc_ring_res(hw, cq);
|
||||
@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
/* Set ring_size to 0 to indicate uninitialized queue */
|
||||
cq->ring_size = 0;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
mutex_destroy(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
|
||||
|
||||
idpf_ctlq_init_regs(hw, cq, is_rxq);
|
||||
|
||||
mutex_init(&cq->cq_lock);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
list_add(&cq->cq_list, &hw->cq_list_head);
|
||||
|
||||
@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* Ensure there are enough descriptors to send all messages */
|
||||
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
|
||||
@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
wr32(hw, cq->reg.tail, cq->next_to_use);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
if (*clean_count > cq->ring_size)
|
||||
return -EBADR;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* Return number of descriptors actually cleaned */
|
||||
*clean_count = i;
|
||||
@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
if (*buff_count > 0)
|
||||
buffs_avail = true;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
if (tbp >= cq->ring_size)
|
||||
tbp = 0;
|
||||
@ -524,7 +523,7 @@ post_buffs_out:
|
||||
wr32(hw, cq->reg.tail, cq->next_to_post);
|
||||
}
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* return the number of buffers that were not posted */
|
||||
*buff_count = *buff_count - i;
|
||||
@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
u16 i;
|
||||
|
||||
/* take the lock before we start messing with the ring */
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
*num_q_msg = i;
|
||||
if (*num_q_msg == 0)
|
||||
|
@ -99,7 +99,7 @@ struct idpf_ctlq_info {
|
||||
|
||||
enum idpf_ctlq_type cq_type;
|
||||
int q_id;
|
||||
struct mutex cq_lock; /* control queue lock */
|
||||
spinlock_t cq_lock; /* control queue lock */
|
||||
/* used for interrupt processing */
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
|
@ -2338,8 +2338,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
size_t sz = ALIGN(size, 4096);
|
||||
|
||||
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
/* The control queue resources are freed under a spinlock, contiguous
|
||||
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
|
||||
* dma_free_*() path.
|
||||
*/
|
||||
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
|
||||
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = sz;
|
||||
|
||||
return mem->va;
|
||||
@ -2354,8 +2358,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
|
||||
{
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
|
||||
dma_free_coherent(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa);
|
||||
dma_free_attrs(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = 0;
|
||||
mem->va = NULL;
|
||||
mem->pa = 0;
|
||||
|
@ -2213,6 +2213,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING;
|
||||
align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
|
||||
|
||||
if (unlikely(size > skb->len)) {
|
||||
netif_dbg(dev, rx_err, dev->net,
|
||||
"size err rx_cmd_a=0x%08x\n",
|
||||
rx_cmd_a);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(rx_cmd_a & RX_CMD_A_RED)) {
|
||||
netif_dbg(dev, rx_err, dev->net,
|
||||
"Error rx_cmd_a=0x%08x\n", rx_cmd_a);
|
||||
|
@ -1878,8 +1878,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
|
||||
rq->data_ring.base,
|
||||
rq->data_ring.basePA);
|
||||
rq->data_ring.base = NULL;
|
||||
rq->data_ring.desc_size = 0;
|
||||
}
|
||||
rq->data_ring.desc_size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -993,7 +993,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
resource_size_t min_align, align, size, size0, size1;
|
||||
resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
|
||||
resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
|
||||
int order, max_order;
|
||||
struct resource *b_res = find_bus_resource_of_type(bus,
|
||||
mask | IORESOURCE_PREFETCH, type);
|
||||
|
@ -613,7 +613,6 @@ xfs_attr_rmtval_set_blk(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(nmap == 1);
|
||||
ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
|
||||
(map->br_startblock != HOLESTARTBLOCK));
|
||||
|
||||
|
@ -4114,8 +4114,10 @@ xfs_bmapi_allocate(
|
||||
} else {
|
||||
error = xfs_bmap_alloc_userdata(bma);
|
||||
}
|
||||
if (error || bma->blkno == NULLFSBLOCK)
|
||||
if (error)
|
||||
return error;
|
||||
if (bma->blkno == NULLFSBLOCK)
|
||||
return -ENOSPC;
|
||||
|
||||
if (bma->flags & XFS_BMAPI_ZERO) {
|
||||
error = xfs_zero_extent(bma->ip, bma->blkno, bma->length);
|
||||
@ -4295,6 +4297,15 @@ xfs_bmapi_finish(
|
||||
* extent state if necessary. Details behaviour is controlled by the flags
|
||||
* parameter. Only allocates blocks from a single allocation group, to avoid
|
||||
* locking problems.
|
||||
*
|
||||
* Returns 0 on success and places the extent mappings in mval. nmaps is used
|
||||
* as an input/output parameter where the caller specifies the maximum number
|
||||
* of mappings that may be returned and xfs_bmapi_write passes back the number
|
||||
* of mappings (including existing mappings) it found.
|
||||
*
|
||||
* Returns a negative error code on failure, including -ENOSPC when it could not
|
||||
* allocate any blocks and -ENOSR when it did allocate blocks to convert a
|
||||
* delalloc range, but those blocks were before the passed in range.
|
||||
*/
|
||||
int
|
||||
xfs_bmapi_write(
|
||||
@ -4423,10 +4434,16 @@ xfs_bmapi_write(
|
||||
ASSERT(len > 0);
|
||||
ASSERT(bma.length > 0);
|
||||
error = xfs_bmapi_allocate(&bma);
|
||||
if (error)
|
||||
if (error) {
|
||||
/*
|
||||
* If we already allocated space in a previous
|
||||
* iteration return what we go so far when
|
||||
* running out of space.
|
||||
*/
|
||||
if (error == -ENOSPC && bma.nallocs)
|
||||
break;
|
||||
goto error0;
|
||||
if (bma.blkno == NULLFSBLOCK)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a CoW allocation, record the data in
|
||||
@ -4464,7 +4481,6 @@ xfs_bmapi_write(
|
||||
if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
|
||||
eof = true;
|
||||
}
|
||||
*nmap = n;
|
||||
|
||||
error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
|
||||
whichfork);
|
||||
@ -4475,7 +4491,22 @@ xfs_bmapi_write(
|
||||
ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
|
||||
xfs_bmapi_finish(&bma, whichfork, 0);
|
||||
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
|
||||
orig_nmap, *nmap);
|
||||
orig_nmap, n);
|
||||
|
||||
/*
|
||||
* When converting delayed allocations, xfs_bmapi_allocate ignores
|
||||
* the passed in bno and always converts from the start of the found
|
||||
* delalloc extent.
|
||||
*
|
||||
* To avoid a successful return with *nmap set to 0, return the magic
|
||||
* -ENOSR error code for this particular case so that the caller can
|
||||
* handle it.
|
||||
*/
|
||||
if (!n) {
|
||||
ASSERT(bma.nallocs >= *nmap);
|
||||
return -ENOSR;
|
||||
}
|
||||
*nmap = n;
|
||||
return 0;
|
||||
error0:
|
||||
xfs_bmapi_finish(&bma, whichfork, error);
|
||||
@ -4579,9 +4610,6 @@ xfs_bmapi_convert_delalloc(
|
||||
if (error)
|
||||
goto out_finish;
|
||||
|
||||
error = -ENOSPC;
|
||||
if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
|
||||
goto out_finish;
|
||||
error = -EFSCORRUPTED;
|
||||
if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock)))
|
||||
goto out_finish;
|
||||
|
@ -2146,8 +2146,8 @@ xfs_da_grow_inode_int(
|
||||
struct xfs_inode *dp = args->dp;
|
||||
int w = args->whichfork;
|
||||
xfs_rfsblock_t nblks = dp->i_nblocks;
|
||||
struct xfs_bmbt_irec map, *mapp;
|
||||
int nmap, error, got, i, mapi;
|
||||
struct xfs_bmbt_irec map, *mapp = ↦
|
||||
int nmap, error, got, i, mapi = 1;
|
||||
|
||||
/*
|
||||
* Find a spot in the file space to put the new block.
|
||||
@ -2163,14 +2163,8 @@ xfs_da_grow_inode_int(
|
||||
error = xfs_bmapi_write(tp, dp, *bno, count,
|
||||
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
|
||||
args->total, &map, &nmap);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
ASSERT(nmap <= 1);
|
||||
if (nmap == 1) {
|
||||
mapp = ↦
|
||||
mapi = 1;
|
||||
} else if (nmap == 0 && count > 1) {
|
||||
if (error == -ENOSPC && count > 1) {
|
||||
xfs_fileoff_t b;
|
||||
int c;
|
||||
|
||||
@ -2187,16 +2181,13 @@ xfs_da_grow_inode_int(
|
||||
args->total, &mapp[mapi], &nmap);
|
||||
if (error)
|
||||
goto out_free_map;
|
||||
if (nmap < 1)
|
||||
break;
|
||||
mapi += nmap;
|
||||
b = mapp[mapi - 1].br_startoff +
|
||||
mapp[mapi - 1].br_blockcount;
|
||||
}
|
||||
} else {
|
||||
mapi = 0;
|
||||
mapp = NULL;
|
||||
}
|
||||
if (error)
|
||||
goto out_free_map;
|
||||
|
||||
/*
|
||||
* Count the blocks we got, make sure it matches the total.
|
||||
|
@ -778,12 +778,10 @@ xfs_alloc_file_space(
|
||||
{
|
||||
xfs_mount_t *mp = ip->i_mount;
|
||||
xfs_off_t count;
|
||||
xfs_filblks_t allocated_fsb;
|
||||
xfs_filblks_t allocatesize_fsb;
|
||||
xfs_extlen_t extsz, temp;
|
||||
xfs_fileoff_t startoffset_fsb;
|
||||
xfs_fileoff_t endoffset_fsb;
|
||||
int nimaps;
|
||||
int rt;
|
||||
xfs_trans_t *tp;
|
||||
xfs_bmbt_irec_t imaps[1], *imapp;
|
||||
@ -806,7 +804,6 @@ xfs_alloc_file_space(
|
||||
|
||||
count = len;
|
||||
imapp = &imaps[0];
|
||||
nimaps = 1;
|
||||
startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
|
||||
endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
|
||||
allocatesize_fsb = endoffset_fsb - startoffset_fsb;
|
||||
@ -817,6 +814,7 @@ xfs_alloc_file_space(
|
||||
while (allocatesize_fsb && !error) {
|
||||
xfs_fileoff_t s, e;
|
||||
unsigned int dblocks, rblocks, resblks;
|
||||
int nimaps = 1;
|
||||
|
||||
/*
|
||||
* Determine space reservations for data/realtime.
|
||||
@ -863,29 +861,32 @@ xfs_alloc_file_space(
|
||||
if (error)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* If the allocator cannot find a single free extent large
|
||||
* enough to cover the start block of the requested range,
|
||||
* xfs_bmapi_write will return -ENOSR.
|
||||
*
|
||||
* In that case we simply need to keep looping with the same
|
||||
* startoffset_fsb so that one of the following allocations
|
||||
* will eventually reach the requested range.
|
||||
*/
|
||||
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
|
||||
allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
|
||||
&nimaps);
|
||||
if (error)
|
||||
goto error;
|
||||
allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
|
||||
&nimaps);
|
||||
if (error) {
|
||||
if (error != -ENOSR)
|
||||
goto error;
|
||||
error = 0;
|
||||
} else {
|
||||
startoffset_fsb += imapp->br_blockcount;
|
||||
allocatesize_fsb -= imapp->br_blockcount;
|
||||
}
|
||||
|
||||
ip->i_diflags |= XFS_DIFLAG_PREALLOC;
|
||||
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
|
||||
|
||||
error = xfs_trans_commit(tp);
|
||||
xfs_iunlock(ip, XFS_ILOCK_EXCL);
|
||||
if (error)
|
||||
break;
|
||||
|
||||
allocated_fsb = imapp->br_blockcount;
|
||||
|
||||
if (nimaps == 0) {
|
||||
error = -ENOSPC;
|
||||
break;
|
||||
}
|
||||
|
||||
startoffset_fsb += allocated_fsb;
|
||||
allocatesize_fsb -= allocated_fsb;
|
||||
}
|
||||
|
||||
return error;
|
||||
|
@ -328,7 +328,6 @@ xfs_dquot_disk_alloc(
|
||||
if (error)
|
||||
return error;
|
||||
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
|
||||
ASSERT(nmaps == 1);
|
||||
ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
|
||||
(map.br_startblock != HOLESTARTBLOCK));
|
||||
|
||||
|
@ -300,14 +300,6 @@ xfs_iomap_write_direct(
|
||||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* Copy any maps to caller's array and return any error.
|
||||
*/
|
||||
if (nimaps == 0) {
|
||||
error = -ENOSPC;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
|
||||
error = xfs_alert_fsblock_zero(ip, imap);
|
||||
|
||||
|
@ -428,13 +428,6 @@ xfs_reflink_fill_cow_hole(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Allocation succeeded but the requested range was not even partially
|
||||
* satisfied? Bail out!
|
||||
*/
|
||||
if (nimaps == 0)
|
||||
return -ENOSPC;
|
||||
|
||||
convert:
|
||||
return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
|
||||
|
||||
@ -498,12 +491,6 @@ xfs_reflink_fill_delalloc(
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/*
|
||||
* Allocation succeeded but the requested range was not even
|
||||
* partially satisfied? Bail out!
|
||||
*/
|
||||
if (nimaps == 0)
|
||||
return -ENOSPC;
|
||||
} while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff);
|
||||
|
||||
return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
|
||||
|
@ -815,8 +815,6 @@ xfs_growfs_rt_alloc(
|
||||
nmap = 1;
|
||||
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
|
||||
XFS_BMAPI_METADATA, 0, &map, &nmap);
|
||||
if (!error && nmap < 1)
|
||||
error = -ENOSPC;
|
||||
if (error)
|
||||
goto out_trans_cancel;
|
||||
/*
|
||||
|
@ -23,6 +23,7 @@
|
||||
#ifndef __DRM_FRAMEBUFFER_H__
|
||||
#define __DRM_FRAMEBUFFER_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sched.h>
|
||||
@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
|
||||
unsigned num_clips);
|
||||
};
|
||||
|
||||
#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
|
||||
|
||||
/**
|
||||
* struct drm_framebuffer - frame buffer object
|
||||
*
|
||||
@ -193,6 +196,11 @@ struct drm_framebuffer {
|
||||
* IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR
|
||||
* universal plane.
|
||||
*/
|
||||
/**
|
||||
* @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
|
||||
*/
|
||||
unsigned int internal_flags;
|
||||
|
||||
int hot_x;
|
||||
/**
|
||||
* @hot_y: Y coordinate of the cursor hotspot. Used by the legacy cursor
|
||||
|
@ -1635,7 +1635,6 @@ extern struct pid *cad_pid;
|
||||
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
|
||||
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
|
||||
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
|
||||
#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
|
||||
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
|
||||
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
|
||||
#define PF_KSWAPD 0x00020000 /* I am kswapd */
|
||||
|
@ -209,9 +209,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
|
||||
atomic_inc(&entry_count);
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
|
||||
/* mark that this task has queued an async job, used by module init */
|
||||
current->flags |= PF_USED_ASYNC;
|
||||
|
||||
/* schedule for execution */
|
||||
queue_work_node(node, system_unbound_wq, &entry->work);
|
||||
|
||||
|
@ -3519,12 +3519,6 @@ static noinline int do_init_module(struct module *mod)
|
||||
}
|
||||
freeinit->module_init = mod->init_layout.base;
|
||||
|
||||
/*
|
||||
* We want to find out whether @mod uses async during init. Clear
|
||||
* PF_USED_ASYNC. async_schedule*() will set it.
|
||||
*/
|
||||
current->flags &= ~PF_USED_ASYNC;
|
||||
|
||||
do_mod_ctors(mod);
|
||||
/* Start the module */
|
||||
if (mod->init != NULL)
|
||||
@ -3547,22 +3541,13 @@ static noinline int do_init_module(struct module *mod)
|
||||
|
||||
/*
|
||||
* We need to finish all async code before the module init sequence
|
||||
* is done. This has potential to deadlock. For example, a newly
|
||||
* detected block device can trigger request_module() of the
|
||||
* default iosched from async probing task. Once userland helper
|
||||
* reaches here, async_synchronize_full() will wait on the async
|
||||
* task waiting on request_module() and deadlock.
|
||||
* is done. This has potential to deadlock if synchronous module
|
||||
* loading is requested from async (which is not allowed!).
|
||||
*
|
||||
* This deadlock is avoided by perfomring async_synchronize_full()
|
||||
* iff module init queued any async jobs. This isn't a full
|
||||
* solution as it will deadlock the same if module loading from
|
||||
* async jobs nests more than once; however, due to the various
|
||||
* constraints, this hack seems to be the best option for now.
|
||||
* Please refer to the following thread for details.
|
||||
*
|
||||
* http://thread.gmane.org/gmane.linux.kernel/1420814
|
||||
* See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
|
||||
* request_module() from async workers") for more details.
|
||||
*/
|
||||
if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
|
||||
if (!mod->async_probe_requested)
|
||||
async_synchronize_full();
|
||||
|
||||
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
|
||||
|
@ -1868,6 +1868,11 @@ static int *tgid_map;
|
||||
|
||||
#define SAVED_CMDLINES_DEFAULT 128
|
||||
#define NO_CMDLINE_MAP UINT_MAX
|
||||
/*
|
||||
* Preemption must be disabled before acquiring trace_cmdline_lock.
|
||||
* The various trace_arrays' max_lock must be acquired in a context
|
||||
* where interrupt is disabled.
|
||||
*/
|
||||
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
struct saved_cmdlines_buffer {
|
||||
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
||||
@ -2083,7 +2088,11 @@ static int trace_save_cmdline(struct task_struct *tsk)
|
||||
* the lock, but we also don't want to spin
|
||||
* nor do we want to disable interrupts,
|
||||
* so if we miss here, then better luck next time.
|
||||
*
|
||||
* This is called within the scheduler and wake up, so interrupts
|
||||
* had better been disabled and run queue lock been held.
|
||||
*/
|
||||
lockdep_assert_preemption_disabled();
|
||||
if (!arch_spin_trylock(&trace_cmdline_lock))
|
||||
return 0;
|
||||
|
||||
@ -4927,9 +4936,11 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
|
||||
char buf[64];
|
||||
int r;
|
||||
|
||||
preempt_disable();
|
||||
arch_spin_lock(&trace_cmdline_lock);
|
||||
r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
|
||||
arch_spin_unlock(&trace_cmdline_lock);
|
||||
preempt_enable();
|
||||
|
||||
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
||||
}
|
||||
@ -4954,10 +4965,12 @@ static int tracing_resize_saved_cmdlines(unsigned int val)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
arch_spin_lock(&trace_cmdline_lock);
|
||||
savedcmd_temp = savedcmd;
|
||||
savedcmd = s;
|
||||
arch_spin_unlock(&trace_cmdline_lock);
|
||||
preempt_enable();
|
||||
free_saved_cmdlines_buffer(savedcmd_temp);
|
||||
|
||||
return 0;
|
||||
|
@ -1385,9 +1385,21 @@ static void free_pcppages_bulk(struct zone *zone, int count,
|
||||
/*
|
||||
* Ensure proper count is passed which otherwise would stuck in the
|
||||
* below while (list_empty(list)) loop.
|
||||
*
|
||||
* RHEL-85453: count should never be negative, but if it is, we will
|
||||
* risk getting ourselves in the same predicament described above,
|
||||
* and the clamping below will not be of any use to us, unfortunately.
|
||||
* Upstream has indirectly addressed this condition when commit
|
||||
* 44042b449872 ("mm/page_alloc: allow high-order pages to be stored
|
||||
* on the per-cpu lists") changed the invariant for the outer while
|
||||
* loop, preventing it from being executed when count <= 0.
|
||||
* We do the same here, but we also include a warn_on assertion for
|
||||
* allowing us to capture a coredump on the rare occasions where
|
||||
* count would become less than zero.
|
||||
*/
|
||||
count = min(pcp->count, count);
|
||||
while (count) {
|
||||
WARN_ON_ONCE(unlikely(count < 0));
|
||||
while (count > 0) {
|
||||
struct list_head *list;
|
||||
|
||||
/*
|
||||
|
@ -781,15 +781,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
|
||||
|
||||
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
{
|
||||
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
|
||||
const struct Qdisc_class_ops *cops;
|
||||
unsigned long cl;
|
||||
u32 parentid;
|
||||
bool notify;
|
||||
int drops;
|
||||
|
||||
if (n == 0 && len == 0)
|
||||
return;
|
||||
drops = max_t(int, n, 0);
|
||||
rcu_read_lock();
|
||||
while ((parentid = sch->parent)) {
|
||||
@ -798,17 +795,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
|
||||
if (sch->flags & TCQ_F_NOPARENT)
|
||||
break;
|
||||
/* Notify parent qdisc only if child qdisc becomes empty.
|
||||
*
|
||||
* If child was empty even before update then backlog
|
||||
* counter is screwed and we skip notification because
|
||||
* parent class is already passive.
|
||||
*
|
||||
* If the original child was offloaded then it is allowed
|
||||
* to be seem as empty, so the parent is notified anyway.
|
||||
*/
|
||||
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
|
||||
!qdisc_is_offloaded);
|
||||
/* Notify parent qdisc only if child qdisc becomes empty. */
|
||||
notify = !sch->q.qlen;
|
||||
/* TODO: perform the search on a per txq basis */
|
||||
sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
|
||||
if (sch == NULL) {
|
||||
@ -817,6 +805,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
|
||||
}
|
||||
cops = sch->ops->cl_ops;
|
||||
if (notify && cops->qlen_notify) {
|
||||
/* Note that qlen_notify must be idempotent as it may get called
|
||||
* multiple times.
|
||||
*/
|
||||
cl = cops->find(sch, parentid);
|
||||
cops->qlen_notify(sch, cl);
|
||||
}
|
||||
|
@ -1342,6 +1342,8 @@ static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
{
|
||||
struct cbq_class *cl = (struct cbq_class *)arg;
|
||||
|
||||
if (!cl->next_alive)
|
||||
return;
|
||||
cbq_deactivate_class(cl);
|
||||
}
|
||||
|
||||
|
@ -95,10 +95,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
|
||||
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
|
||||
drop_func, dequeue_func);
|
||||
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->stats.drop_count && sch->q.qlen) {
|
||||
if (q->stats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
|
||||
q->stats.drop_count = 0;
|
||||
q->stats.drop_len = 0;
|
||||
|
@ -109,6 +109,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
return -ENOBUFS;
|
||||
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
cl->common.classid = classid;
|
||||
cl->quantum = quantum;
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
||||
@ -231,7 +232,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
|
||||
{
|
||||
struct drr_class *cl = (struct drr_class *)arg;
|
||||
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
}
|
||||
|
||||
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
|
||||
@ -392,7 +393,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
|
||||
if (unlikely(skb == NULL))
|
||||
goto out;
|
||||
if (cl->qdisc->q.qlen == 0)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
|
||||
bstats_update(&cl->bstats, skb);
|
||||
qdisc_bstats_update(sch, skb);
|
||||
@ -433,7 +434,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||
if (cl->qdisc->q.qlen)
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
qdisc_reset(cl->qdisc);
|
||||
}
|
||||
}
|
||||
|
@ -649,23 +649,24 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
|
||||
sch_tree_lock(sch);
|
||||
|
||||
q->nbands = nbands;
|
||||
for (i = nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_purge_queue(q->classes[i].qdisc);
|
||||
}
|
||||
|
||||
WRITE_ONCE(q->nbands, nbands);
|
||||
for (i = nstrict; i < q->nstrict; i++) {
|
||||
if (q->classes[i].qdisc->q.qlen) {
|
||||
list_add_tail(&q->classes[i].alist, &q->active);
|
||||
q->classes[i].deficit = quanta[i];
|
||||
}
|
||||
}
|
||||
for (i = q->nbands; i < oldbands; i++) {
|
||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||
list_del_init(&q->classes[i].alist);
|
||||
qdisc_tree_flush_backlog(q->classes[i].qdisc);
|
||||
}
|
||||
q->nstrict = nstrict;
|
||||
WRITE_ONCE(q->nstrict, nstrict);
|
||||
memcpy(q->prio2band, priomap, sizeof(priomap));
|
||||
|
||||
for (i = 0; i < q->nbands; i++)
|
||||
q->classes[i].quantum = quanta[i];
|
||||
WRITE_ONCE(q->classes[i].quantum, quanta[i]);
|
||||
|
||||
for (i = oldbands; i < q->nbands; i++) {
|
||||
q->classes[i].qdisc = queues[i];
|
||||
@ -679,7 +680,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
||||
for (i = q->nbands; i < oldbands; i++) {
|
||||
qdisc_put(q->classes[i].qdisc);
|
||||
q->classes[i].qdisc = NULL;
|
||||
q->classes[i].quantum = 0;
|
||||
WRITE_ONCE(q->classes[i].quantum, 0);
|
||||
q->classes[i].deficit = 0;
|
||||
gnet_stats_basic_sync_init(&q->classes[i].bstats);
|
||||
memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
|
||||
@ -736,6 +737,7 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
struct ets_sched *q = qdisc_priv(sch);
|
||||
struct nlattr *opts;
|
||||
struct nlattr *nest;
|
||||
u8 nbands, nstrict;
|
||||
int band;
|
||||
int prio;
|
||||
int err;
|
||||
@ -748,21 +750,22 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
if (!opts)
|
||||
goto nla_err;
|
||||
|
||||
if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
|
||||
nbands = READ_ONCE(q->nbands);
|
||||
if (nla_put_u8(skb, TCA_ETS_NBANDS, nbands))
|
||||
goto nla_err;
|
||||
|
||||
if (q->nstrict &&
|
||||
nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
|
||||
nstrict = READ_ONCE(q->nstrict);
|
||||
if (nstrict && nla_put_u8(skb, TCA_ETS_NSTRICT, nstrict))
|
||||
goto nla_err;
|
||||
|
||||
if (q->nbands > q->nstrict) {
|
||||
if (nbands > nstrict) {
|
||||
nest = nla_nest_start(skb, TCA_ETS_QUANTA);
|
||||
if (!nest)
|
||||
goto nla_err;
|
||||
|
||||
for (band = q->nstrict; band < q->nbands; band++) {
|
||||
for (band = nstrict; band < nbands; band++) {
|
||||
if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
|
||||
q->classes[band].quantum))
|
||||
READ_ONCE(q->classes[band].quantum)))
|
||||
goto nla_err;
|
||||
}
|
||||
|
||||
@ -774,7 +777,8 @@ static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
|
||||
goto nla_err;
|
||||
|
||||
for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
|
||||
if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
|
||||
if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND,
|
||||
READ_ONCE(q->prio2band[prio])))
|
||||
goto nla_err;
|
||||
}
|
||||
|
||||
|
@ -318,10 +318,8 @@ begin:
|
||||
}
|
||||
qdisc_bstats_update(sch, skb);
|
||||
flow->deficit -= qdisc_pkt_len(skb);
|
||||
/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
|
||||
* or HTB crashes. Defer it for next round.
|
||||
*/
|
||||
if (q->cstats.drop_count && sch->q.qlen) {
|
||||
|
||||
if (q->cstats.drop_count) {
|
||||
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
|
||||
q->cstats.drop_len);
|
||||
q->cstats.drop_count = 0;
|
||||
|
@ -353,7 +353,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
|
||||
*/
|
||||
static inline void htb_next_rb_node(struct rb_node **n)
|
||||
{
|
||||
*n = rb_next(*n);
|
||||
if (*n)
|
||||
*n = rb_next(*n);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -614,8 +615,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
|
||||
*/
|
||||
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
|
||||
{
|
||||
WARN_ON(!cl->prio_activity);
|
||||
|
||||
if (!cl->prio_activity)
|
||||
return;
|
||||
htb_deactivate_prios(q, cl);
|
||||
cl->prio_activity = 0;
|
||||
}
|
||||
@ -1753,8 +1754,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
|
||||
if (cl->parent)
|
||||
cl->parent->children--;
|
||||
|
||||
if (cl->prio_activity)
|
||||
htb_deactivate(q, cl);
|
||||
htb_deactivate(q, cl);
|
||||
|
||||
if (cl->cmode != HTB_CAN_SEND)
|
||||
htb_safe_rb_erase(&cl->pq_node,
|
||||
@ -1970,8 +1970,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||
/* turn parent into inner node */
|
||||
qdisc_purge_queue(parent->leaf.q);
|
||||
parent_qdisc = parent->leaf.q;
|
||||
if (parent->prio_activity)
|
||||
htb_deactivate(q, parent);
|
||||
htb_deactivate(q, parent);
|
||||
|
||||
/* remove from evt list because of level change */
|
||||
if (parent->cmode != HTB_CAN_SEND) {
|
||||
|
@ -352,7 +352,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
|
||||
struct qfq_aggregate *agg = cl->agg;
|
||||
|
||||
|
||||
list_del(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
|
||||
if (list_empty(&agg->active)) /* agg is now inactive */
|
||||
qfq_deactivate_agg(q, agg);
|
||||
}
|
||||
@ -485,6 +485,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
gnet_stats_basic_sync_init(&cl->bstats);
|
||||
cl->common.classid = classid;
|
||||
cl->deficit = lmax;
|
||||
INIT_LIST_HEAD(&cl->alist);
|
||||
|
||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
classid, NULL);
|
||||
@ -997,7 +998,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
|
||||
cl->deficit -= (int) len;
|
||||
|
||||
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
|
||||
list_del(&cl->alist);
|
||||
list_del_init(&cl->alist);
|
||||
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
|
||||
cl->deficit += agg->lmax;
|
||||
list_move_tail(&cl->alist, &agg->active);
|
||||
@ -1430,6 +1431,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
||||
struct qfq_sched *q = qdisc_priv(sch);
|
||||
struct qfq_class *cl = (struct qfq_class *)arg;
|
||||
|
||||
if (list_empty(&cl->alist))
|
||||
return;
|
||||
qfq_deactivate_class(q, cl);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user