Import of kernel-4.18.0-553.62.1.el8_10
This commit is contained in:
parent
f45d456b82
commit
11c08197f9
8
Documentation/core-api/cleanup.rst
Normal file
8
Documentation/core-api/cleanup.rst
Normal file
@ -0,0 +1,8 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
===========================
|
||||
Scope-based Cleanup Helpers
|
||||
===========================
|
||||
|
||||
.. kernel-doc:: include/linux/cleanup.h
|
||||
:doc: scope-based cleanup helpers
|
@ -12,6 +12,25 @@ Core utilities
|
||||
:maxdepth: 1
|
||||
|
||||
kernel-api
|
||||
workqueue
|
||||
watch_queue
|
||||
printk-basics
|
||||
printk-formats
|
||||
printk-index
|
||||
symbol-namespaces
|
||||
asm-annotations
|
||||
|
||||
Data structures and low-level utilities
|
||||
=======================================
|
||||
|
||||
Library functionality that is used throughout the kernel.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
kobject
|
||||
kref
|
||||
cleanup
|
||||
assoc_array
|
||||
atomic_ops
|
||||
cachetlb
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
# -*- coding: utf-8; mode: python -*-
|
||||
# pylint: disable=R0903, C0330, R0914, R0912, E0401
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
# -*- coding: utf-8; mode: python -*-
|
||||
# pylint: disable=C0330, R0903, R0912
|
||||
|
||||
|
@ -21,6 +21,7 @@ before actually making adjustments.
|
||||
Currently, these files are in /proc/sys/fs:
|
||||
- aio-max-nr
|
||||
- aio-nr
|
||||
- dentry-fs-klimit
|
||||
- dentry-state
|
||||
- dquot-max
|
||||
- dquot-nr
|
||||
@ -54,6 +55,33 @@ of any kernel data structures.
|
||||
|
||||
==============================================================
|
||||
|
||||
dentry-fs-klimit:
|
||||
|
||||
The number of positive dentries for a filesystem is limited by the
|
||||
total number of files in that filesystem. However, there is no limit
|
||||
on negative dentries which can grow to an exceedingly large number.
|
||||
|
||||
This integer value specifies a soft limit on the maximum number of
|
||||
dentries (in thousands) that are allowed in any one of the mounted
|
||||
filesystems. It has a default value of 0 which means no limit. A value
|
||||
of 10, for example, means that the limit will be 10,000.
|
||||
|
||||
If the limit is exceeded, the system will attempt to reclaim currently
|
||||
unused dentries from the LRU lists of the filesystem to reduce its total
|
||||
number. Enabling this limit does impose a performance cost, especially
|
||||
when the limit is low. So it should only be enabled if excessive number
|
||||
of negative dentries is becoming a problem in a particular workload
|
||||
environment.
|
||||
|
||||
With dentry-fs-klimit properly set, dentries reclaim should only be
|
||||
invoked infrequently if at all. If this happens frequently, it is
|
||||
either the value is set too low for the current workload or there are
|
||||
misbehaving applications generating too many negative dentries. The
|
||||
dentry-state sysctl file described below can be used to view the current
|
||||
mix of positive and negative dentries in the system.
|
||||
|
||||
==============================================================
|
||||
|
||||
dentry-state:
|
||||
|
||||
From linux/include/linux/dcache.h:
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
|
||||
#
|
||||
# Copyright (c) 2010 Rising Tide Systems
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
# add symbolic names to read_msr / write_msr in trace
|
||||
# decode_msr msr-index.h < trace
|
||||
import sys
|
||||
|
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.60.1
|
||||
RHEL_RELEASE = 553.62.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -928,6 +928,10 @@ static int __init setup_hwcaps(void)
|
||||
case 0x3932:
|
||||
strcpy(elf_platform, "z16");
|
||||
break;
|
||||
case 0x9175:
|
||||
case 0x9176:
|
||||
strcpy(elf_platform, "z17");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -835,7 +835,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_node(nid) {
|
||||
for_each_node_with_cpus(nid) {
|
||||
cpu = cpumask_first(cpumask_of_node(nid));
|
||||
c = &cpu_data(cpu);
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_ARM64=y
|
||||
CONFIG_64BIT=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_ARM64=y
|
||||
CONFIG_64BIT=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_PPC64=y
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_PPC64=y
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_MMU=y
|
||||
CONFIG_ZONE_DMA=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_MMU=y
|
||||
CONFIG_ZONE_DMA=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_MMU=y
|
||||
CONFIG_ZONE_DMA=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_64BIT=y
|
||||
CONFIG_X86_64=y
|
||||
|
@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
#
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-26)
|
||||
# Compiler: gcc (GCC) 8.5.0 20210514 (Red Hat 8.5.0-28)
|
||||
#
|
||||
CONFIG_64BIT=y
|
||||
CONFIG_X86_64=y
|
||||
|
@ -60,6 +60,8 @@ static inline void subsys_put(struct subsys_private *sp)
|
||||
kset_put(&sp->subsys);
|
||||
}
|
||||
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus);
|
||||
|
||||
struct driver_private {
|
||||
struct kobject kobj;
|
||||
struct klist klist_devices;
|
||||
@ -167,6 +169,22 @@ extern void driver_remove_groups(struct device_driver *drv,
|
||||
const struct attribute_group **groups);
|
||||
void device_driver_detach(struct device *dev);
|
||||
|
||||
static inline void device_set_driver(struct device *dev, const struct device_driver *drv)
|
||||
{
|
||||
/*
|
||||
* Majority (all?) read accesses to dev->driver happens either
|
||||
* while holding device lock or in bus/driver code that is only
|
||||
* invoked when the device is bound to a driver and there is no
|
||||
* concern of the pointer being changed while it is being read.
|
||||
* However when reading device's uevent file we read driver pointer
|
||||
* without taking device lock (so we do not block there for
|
||||
* arbitrary amount of time). We use WRITE_ONCE() here to prevent
|
||||
* tearing so that READ_ONCE() can safely be used in uevent code.
|
||||
*/
|
||||
// FIXME - this cast should not be needed "soon"
|
||||
WRITE_ONCE(dev->driver, (struct device_driver *)drv);
|
||||
}
|
||||
|
||||
extern int devres_release_all(struct device *dev);
|
||||
extern void device_block_probing(void);
|
||||
extern void device_unblock_probing(void);
|
||||
|
@ -56,7 +56,7 @@ static int __must_check bus_rescan_devices_helper(struct device *dev,
|
||||
* NULL. A call to subsys_put() must be done when finished with the pointer in
|
||||
* order for it to be properly freed.
|
||||
*/
|
||||
static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
struct subsys_private *bus_to_subsys(const struct bus_type *bus)
|
||||
{
|
||||
struct subsys_private *sp = NULL;
|
||||
struct kobject *kobj;
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
@ -2469,6 +2470,37 @@ static const char *dev_uevent_name(struct kset *kset, const struct kobject *kobj
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Try filling "DRIVER=<name>" uevent variable for a device. Because this
|
||||
* function may race with binding and unbinding the device from a driver,
|
||||
* we need to be careful. Binding is generally safe, at worst we miss the
|
||||
* fact that the device is already bound to a driver (but the driver
|
||||
* information that is delivered through uevents is best-effort, it may
|
||||
* become obsolete as soon as it is generated anyways). Unbinding is more
|
||||
* risky as driver pointer is transitioning to NULL, so READ_ONCE() should
|
||||
* be used to make sure we are dealing with the same pointer, and to
|
||||
* ensure that driver structure is not going to disappear from under us
|
||||
* we take bus' drivers klist lock. The assumption that only registered
|
||||
* driver can be bound to a device, and to unregister a driver bus code
|
||||
* will take the same lock.
|
||||
*/
|
||||
static void dev_driver_uevent(const struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct subsys_private *sp = bus_to_subsys(dev->bus);
|
||||
|
||||
if (sp) {
|
||||
CLASS(spinlock, scope)(&sp->klist_drivers.k_lock);
|
||||
if (__guard_ptr(spinlock)(&scope) || !__is_cond_ptr(spinlock)) {
|
||||
struct device_driver *drv = READ_ONCE(dev->driver);
|
||||
if (drv)
|
||||
add_uevent_var(env, "DRIVER=%s", drv->name);
|
||||
}
|
||||
|
||||
subsys_put(sp);
|
||||
}
|
||||
}
|
||||
|
||||
static int dev_uevent(struct kset *kset, struct kobject *kobj,
|
||||
struct kobj_uevent_env *env)
|
||||
{
|
||||
@ -2501,8 +2533,8 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
|
||||
if (dev->type && dev->type->name)
|
||||
add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
|
||||
|
||||
if (dev->driver)
|
||||
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
|
||||
/* Add "DRIVER=%s" variable if the device is bound to a driver */
|
||||
dev_driver_uevent(dev, env);
|
||||
|
||||
/* Add common DT information about the device */
|
||||
of_device_uevent(dev, env);
|
||||
@ -2572,11 +2604,8 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
|
||||
if (!env)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Synchronize with really_probe() */
|
||||
device_lock(dev);
|
||||
/* let the kset specific function add its keys */
|
||||
retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
|
||||
device_unlock(dev);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
@ -3603,7 +3632,7 @@ done:
|
||||
device_pm_remove(dev);
|
||||
dpm_sysfs_remove(dev);
|
||||
DPMError:
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
bus_remove_device(dev);
|
||||
BusError:
|
||||
device_remove_attrs(dev);
|
||||
|
@ -578,7 +578,7 @@ static void device_unbind_cleanup(struct device *dev)
|
||||
arch_teardown_dma_ops(dev);
|
||||
kfree(dev->dma_range_map);
|
||||
dev->dma_range_map = NULL;
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
if (dev->pm_domain && dev->pm_domain->dismiss)
|
||||
dev->pm_domain->dismiss(dev);
|
||||
@ -653,7 +653,7 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
||||
WARN_ON(!list_empty(&dev->devres_head));
|
||||
|
||||
re_probe:
|
||||
dev->driver = drv;
|
||||
device_set_driver(dev, drv);
|
||||
|
||||
/* If using pinctrl, bind pins now before probing */
|
||||
ret = pinctrl_bind_pins(dev);
|
||||
@ -986,7 +986,7 @@ static int __device_attach(struct device *dev, bool allow_async)
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
else {
|
||||
dev->driver = NULL;
|
||||
device_set_driver(dev, NULL);
|
||||
ret = 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -8335,6 +8335,7 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
list_for_each(tmp,&all_mddevs)
|
||||
if (!l--) {
|
||||
mddev = list_entry(tmp, struct mddev, all_mddevs);
|
||||
mddev_get(mddev);
|
||||
spin_unlock(&all_mddevs_lock);
|
||||
return mddev;
|
||||
}
|
||||
|
@ -1532,6 +1532,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain,
|
||||
uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes);
|
||||
}
|
||||
|
||||
static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl,
|
||||
struct uvc_fh *new_handle)
|
||||
{
|
||||
lockdep_assert_held(&handle->chain->ctrl_mutex);
|
||||
|
||||
if (new_handle) {
|
||||
if (ctrl->handle)
|
||||
dev_warn_ratelimited(&handle->stream->dev->udev->dev,
|
||||
"UVC non compliance: Setting an async control with a pending operation.");
|
||||
|
||||
if (new_handle == ctrl->handle)
|
||||
return;
|
||||
|
||||
if (ctrl->handle) {
|
||||
WARN_ON(!ctrl->handle->pending_async_ctrls);
|
||||
if (ctrl->handle->pending_async_ctrls)
|
||||
ctrl->handle->pending_async_ctrls--;
|
||||
}
|
||||
|
||||
ctrl->handle = new_handle;
|
||||
handle->pending_async_ctrls++;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Cannot clear the handle for a control not owned by us.*/
|
||||
if (WARN_ON(ctrl->handle != handle))
|
||||
return;
|
||||
|
||||
ctrl->handle = NULL;
|
||||
if (WARN_ON(!handle->pending_async_ctrls))
|
||||
return;
|
||||
handle->pending_async_ctrls--;
|
||||
}
|
||||
|
||||
void uvc_ctrl_status_event(struct uvc_video_chain *chain,
|
||||
struct uvc_control *ctrl, const u8 *data)
|
||||
{
|
||||
@ -1541,8 +1575,12 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
|
||||
|
||||
mutex_lock(&chain->ctrl_mutex);
|
||||
|
||||
/* Flush the control cache, the data might have changed. */
|
||||
ctrl->loaded = 0;
|
||||
|
||||
handle = ctrl->handle;
|
||||
ctrl->handle = NULL;
|
||||
if (handle)
|
||||
uvc_ctrl_set_handle(handle, ctrl, NULL);
|
||||
|
||||
list_for_each_entry(mapping, &ctrl->info.mappings, list) {
|
||||
s32 value = __uvc_ctrl_get_value(mapping, data);
|
||||
@ -1593,10 +1631,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain,
|
||||
struct uvc_device *dev = chain->dev;
|
||||
struct uvc_ctrl_work *w = &dev->async_ctrl;
|
||||
|
||||
if (list_empty(&ctrl->info.mappings)) {
|
||||
ctrl->handle = NULL;
|
||||
if (list_empty(&ctrl->info.mappings))
|
||||
return false;
|
||||
}
|
||||
|
||||
w->data = data;
|
||||
w->urb = urb;
|
||||
@ -1764,7 +1800,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
|
||||
}
|
||||
|
||||
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
|
||||
struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
|
||||
struct uvc_fh *handle,
|
||||
struct uvc_entity *entity,
|
||||
int rollback,
|
||||
struct uvc_control **err_ctrl)
|
||||
{
|
||||
struct uvc_control *ctrl;
|
||||
unsigned int i;
|
||||
@ -1812,6 +1851,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
|
||||
*err_ctrl = ctrl;
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!rollback && handle &&
|
||||
ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
|
||||
uvc_ctrl_set_handle(handle, ctrl, handle);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1848,18 +1891,20 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
|
||||
|
||||
/* Find the control. */
|
||||
list_for_each_entry(entity, &chain->entities, chain) {
|
||||
ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
|
||||
&err_ctrl);
|
||||
if (ret < 0)
|
||||
ret = uvc_ctrl_commit_entity(chain->dev, handle, entity,
|
||||
rollback, &err_ctrl);
|
||||
if (ret < 0) {
|
||||
if (ctrls)
|
||||
ctrls->error_idx =
|
||||
uvc_ctrl_find_ctrl_idx(entity, ctrls,
|
||||
err_ctrl);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
||||
if (!rollback)
|
||||
uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
|
||||
done:
|
||||
if (ret < 0 && ctrls)
|
||||
ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
|
||||
err_ctrl);
|
||||
mutex_unlock(&chain->ctrl_mutex);
|
||||
return ret;
|
||||
}
|
||||
@ -1892,6 +1937,8 @@ int uvc_ctrl_set(struct uvc_fh *handle,
|
||||
s32 max;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&chain->ctrl_mutex);
|
||||
|
||||
if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
|
||||
return -EACCES;
|
||||
|
||||
@ -1997,9 +2044,6 @@ int uvc_ctrl_set(struct uvc_fh *handle,
|
||||
mapping->set(mapping, value,
|
||||
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT));
|
||||
|
||||
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
|
||||
ctrl->handle = handle;
|
||||
|
||||
ctrl->dirty = 1;
|
||||
ctrl->modified = 1;
|
||||
return 0;
|
||||
@ -2165,7 +2209,7 @@ static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev,
|
||||
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
|
||||
struct uvc_xu_control_query *xqry)
|
||||
{
|
||||
struct uvc_entity *entity;
|
||||
struct uvc_entity *entity, *iter;
|
||||
struct uvc_control *ctrl;
|
||||
unsigned int i;
|
||||
bool found;
|
||||
@ -2175,16 +2219,16 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
|
||||
int ret;
|
||||
|
||||
/* Find the extension unit. */
|
||||
found = false;
|
||||
list_for_each_entry(entity, &chain->entities, chain) {
|
||||
if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT &&
|
||||
entity->id == xqry->unit) {
|
||||
found = true;
|
||||
entity = NULL;
|
||||
list_for_each_entry(iter, &chain->entities, chain) {
|
||||
if (UVC_ENTITY_TYPE(iter) == UVC_VC_EXTENSION_UNIT &&
|
||||
iter->id == xqry->unit) {
|
||||
entity = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
if (!entity) {
|
||||
uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n",
|
||||
xqry->unit);
|
||||
return -ENOENT;
|
||||
@ -2321,7 +2365,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
|
||||
ctrl->dirty = 1;
|
||||
}
|
||||
|
||||
ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
|
||||
ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -2743,6 +2787,31 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void uvc_ctrl_cleanup_fh(struct uvc_fh *handle)
|
||||
{
|
||||
struct uvc_entity *entity;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&handle->chain->ctrl_mutex);
|
||||
|
||||
if (!handle->pending_async_ctrls)
|
||||
goto done;
|
||||
|
||||
list_for_each_entry(entity, &handle->chain->dev->entities, list) {
|
||||
for (i = 0; i < entity->ncontrols; ++i) {
|
||||
if (entity->controls[i].handle != handle)
|
||||
continue;
|
||||
uvc_ctrl_set_handle(handle, &entity->controls[i], NULL);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(handle->pending_async_ctrls);
|
||||
|
||||
done:
|
||||
mutex_unlock(&handle->chain->ctrl_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cleanup device controls.
|
||||
*/
|
||||
|
@ -31,7 +31,7 @@
|
||||
|
||||
unsigned int uvc_clock_param = CLOCK_MONOTONIC;
|
||||
unsigned int uvc_hw_timestamps_param;
|
||||
unsigned int uvc_no_drop_param;
|
||||
unsigned int uvc_no_drop_param = 1;
|
||||
static unsigned int uvc_quirks_param = -1;
|
||||
unsigned int uvc_dbg_param;
|
||||
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
|
||||
@ -1919,7 +1919,7 @@ int uvc_register_video_device(struct uvc_device *dev,
|
||||
int ret;
|
||||
|
||||
/* Initialize the video buffers queue. */
|
||||
ret = uvc_queue_init(queue, type, !uvc_no_drop_param);
|
||||
ret = uvc_queue_init(queue, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2351,8 +2351,25 @@ module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
|
||||
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
|
||||
module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644);
|
||||
MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
|
||||
module_param_named(nodrop, uvc_no_drop_param, uint, 0644);
|
||||
|
||||
static int param_set_nodrop(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
pr_warn_once("uvcvideo: "
|
||||
DEPRECATED
|
||||
"nodrop parameter will be eventually removed.\n");
|
||||
return param_set_bool(val, kp);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops param_ops_nodrop = {
|
||||
.set = param_set_nodrop,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
||||
param_check_uint(nodrop, &uvc_no_drop_param);
|
||||
module_param_cb(nodrop, ¶m_ops_nodrop, &uvc_no_drop_param, 0644);
|
||||
__MODULE_PARM_TYPE(nodrop, "uint");
|
||||
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
|
||||
|
||||
module_param_named(quirks, uvc_quirks_param, uint, 0644);
|
||||
MODULE_PARM_DESC(quirks, "Forced device quirks");
|
||||
module_param_named(trace, uvc_dbg_param, uint, 0644);
|
||||
|
@ -212,8 +212,7 @@ static const struct vb2_ops uvc_meta_queue_qops = {
|
||||
.stop_streaming = uvc_stop_streaming,
|
||||
};
|
||||
|
||||
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
|
||||
int drop_corrupted)
|
||||
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -243,7 +242,6 @@ int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
|
||||
mutex_init(&queue->mutex);
|
||||
spin_lock_init(&queue->irqlock);
|
||||
INIT_LIST_HEAD(&queue->irqqueue);
|
||||
queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -475,14 +473,15 @@ static void uvc_queue_buffer_complete(struct kref *ref)
|
||||
struct vb2_buffer *vb = &buf->buf.vb2_buf;
|
||||
struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
|
||||
|
||||
if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
|
||||
if (buf->error && !uvc_no_drop_param) {
|
||||
uvc_queue_buffer_requeue(queue, buf);
|
||||
return;
|
||||
}
|
||||
|
||||
buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
|
||||
vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
|
||||
vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
|
||||
vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR :
|
||||
VB2_BUF_STATE_DONE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -269,6 +269,7 @@ int uvc_status_init(struct uvc_device *dev)
|
||||
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!dev->int_urb) {
|
||||
kfree(dev->status);
|
||||
dev->status = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -658,6 +658,8 @@ static int uvc_v4l2_release(struct file *file)
|
||||
|
||||
uvc_dbg(stream->dev, CALLS, "%s\n", __func__);
|
||||
|
||||
uvc_ctrl_cleanup_fh(handle);
|
||||
|
||||
/* Only free resources if this is a privileged handle. */
|
||||
if (uvc_has_privileges(handle))
|
||||
uvc_queue_release(&stream->queue);
|
||||
|
@ -308,7 +308,6 @@ struct uvc_buffer {
|
||||
};
|
||||
|
||||
#define UVC_QUEUE_DISCONNECTED (1 << 0)
|
||||
#define UVC_QUEUE_DROP_CORRUPTED (1 << 1)
|
||||
|
||||
struct uvc_video_queue {
|
||||
struct vb2_queue queue;
|
||||
@ -329,7 +328,11 @@ struct uvc_video_chain {
|
||||
struct uvc_entity *processing; /* Processing unit */
|
||||
struct uvc_entity *selector; /* Selector unit */
|
||||
|
||||
struct mutex ctrl_mutex; /* Protects ctrl.info */
|
||||
struct mutex ctrl_mutex; /*
|
||||
* Protects ctrl.info,
|
||||
* ctrl.handle and
|
||||
* uvc_fh.pending_async_ctrls
|
||||
*/
|
||||
|
||||
struct v4l2_prio_state prio; /* V4L2 priority state */
|
||||
u32 caps; /* V4L2 chain-wide caps */
|
||||
@ -604,6 +607,7 @@ struct uvc_fh {
|
||||
struct uvc_video_chain *chain;
|
||||
struct uvc_streaming *stream;
|
||||
enum uvc_handle_state state;
|
||||
unsigned int pending_async_ctrls;
|
||||
};
|
||||
|
||||
struct uvc_driver {
|
||||
@ -666,8 +670,7 @@ extern struct uvc_driver uvc_driver;
|
||||
struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
|
||||
|
||||
/* Video buffers queue management. */
|
||||
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
|
||||
int drop_corrupted);
|
||||
int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type);
|
||||
void uvc_queue_release(struct uvc_video_queue *queue);
|
||||
int uvc_request_buffers(struct uvc_video_queue *queue,
|
||||
struct v4l2_requestbuffers *rb);
|
||||
@ -788,6 +791,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
|
||||
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
|
||||
struct uvc_xu_control_query *xqry);
|
||||
|
||||
void uvc_ctrl_cleanup_fh(struct uvc_fh *handle);
|
||||
|
||||
/* Utility functions */
|
||||
struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
|
||||
u8 epaddr);
|
||||
|
@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
|
||||
u32 context_id = vmci_get_context_id();
|
||||
struct vmci_event_qp ev;
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach,
|
||||
* kernel.
|
||||
*/
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
|
@ -263,11 +263,17 @@ static struct airq_info *new_airq_info(int index)
|
||||
static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
|
||||
u64 *first, void **airq_info)
|
||||
{
|
||||
int i, j;
|
||||
int i, j, queue_idx, highest_queue_idx = -1;
|
||||
struct airq_info *info;
|
||||
unsigned long indicator_addr = 0;
|
||||
unsigned long bit, flags;
|
||||
|
||||
/* Array entries without an actual queue pointer must be ignored. */
|
||||
for (i = 0; i < nvqs; i++) {
|
||||
if (vqs[i])
|
||||
highest_queue_idx++;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
|
||||
mutex_lock(&airq_areas_lock);
|
||||
if (!airq_areas[i])
|
||||
@ -277,7 +283,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
|
||||
if (!info)
|
||||
return 0;
|
||||
write_lock_irqsave(&info->lock, flags);
|
||||
bit = airq_iv_alloc(info->aiv, nvqs);
|
||||
bit = airq_iv_alloc(info->aiv, highest_queue_idx + 1);
|
||||
if (bit == -1UL) {
|
||||
/* Not enough vacancies. */
|
||||
write_unlock_irqrestore(&info->lock, flags);
|
||||
@ -286,8 +292,10 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
|
||||
*first = bit;
|
||||
*airq_info = info;
|
||||
indicator_addr = (unsigned long)info->aiv->vector;
|
||||
for (j = 0; j < nvqs; j++) {
|
||||
airq_iv_set_ptr(info->aiv, bit + j,
|
||||
for (j = 0, queue_idx = 0; j < nvqs; j++) {
|
||||
if (!vqs[j])
|
||||
continue;
|
||||
airq_iv_set_ptr(info->aiv, bit + queue_idx++,
|
||||
(unsigned long)vqs[j]);
|
||||
}
|
||||
write_unlock_irqrestore(&info->lock, flags);
|
||||
|
@ -84,6 +84,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
|
||||
return -EINVAL;
|
||||
|
||||
rc = symlink_hash(link_len, link_str, md5_hash);
|
||||
if (rc) {
|
||||
cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
|
||||
|
166
fs/dcache.c
166
fs/dcache.c
@ -117,6 +117,20 @@ struct dentry_stat_t dentry_stat = {
|
||||
.age_limit = 45,
|
||||
};
|
||||
|
||||
/*
|
||||
* dentry_fs_klimit_sysctl:
|
||||
* This is sysctl parameter "dentry-fs-klimit" which specifies a soft limit
|
||||
* for the maximum number of dentries (in thousands) allowed in any one of
|
||||
* the mounted filesystems. The default is 0 which means no limit.
|
||||
*/
|
||||
int dentry_fs_klimit_sysctl __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(dentry_fs_klimit_sysctl);
|
||||
static long dentry_nlru_limit __read_mostly; /* Limit per list_lru_node */
|
||||
static struct dentry *dentry_nlru_shrink_anchor;
|
||||
static DEFINE_STATIC_KEY_FALSE(dentry_fs_klimit_on);
|
||||
static void dentry_nlru_shrink_wfunc(struct work_struct *work);
|
||||
static DECLARE_WORK(dentry_nlru_shrink_work, dentry_nlru_shrink_wfunc);
|
||||
|
||||
static DEFINE_PER_CPU(long, nr_dentry);
|
||||
static DEFINE_PER_CPU(long, nr_dentry_unused);
|
||||
static DEFINE_PER_CPU(long, nr_dentry_negative);
|
||||
@ -171,8 +185,73 @@ int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
|
||||
dentry_stat.nr_negative = get_nr_dentry_negative();
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sysctl proc handler for dentry_fs_klimit_sysctl.
|
||||
*/
|
||||
int proc_dentry_fs_klimit(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret, old = dentry_fs_klimit_sysctl;
|
||||
|
||||
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
|
||||
if (!write || ret || (dentry_fs_klimit_sysctl == old))
|
||||
return ret;
|
||||
|
||||
if (!dentry_fs_klimit_sysctl && old) {
|
||||
static_branch_disable(&dentry_fs_klimit_on);
|
||||
|
||||
/* Wait for any existing shrinking work to finish */
|
||||
if (READ_ONCE(dentry_nlru_shrink_anchor))
|
||||
flush_work(&dentry_nlru_shrink_work);
|
||||
dentry_nlru_limit = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Assuming even node distribution of dentries in multi-node system,
|
||||
* compute the per node limit with a little bit of margin for some
|
||||
* unevenness.
|
||||
*/
|
||||
if (nr_online_nodes > 1)
|
||||
dentry_nlru_limit = (dentry_fs_klimit_sysctl * 1000L + 500)/nr_online_nodes;
|
||||
else
|
||||
dentry_nlru_limit = dentry_fs_klimit_sysctl * 1000L;
|
||||
|
||||
if (dentry_fs_klimit_sysctl && !old)
|
||||
static_branch_enable(&dentry_fs_klimit_on);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(proc_dentry_fs_klimit);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize a work item to shrink the list_lru_node containing the given
|
||||
* dentry if that dentry can be saved into dentry_nlru_shrink_anchor.
|
||||
*/
|
||||
static void dentry_nlru_shrink(struct dentry *dentry)
|
||||
{
|
||||
bool queued;
|
||||
|
||||
lockdep_assert_held(&dentry->d_lock);
|
||||
|
||||
if (cmpxchg(&dentry_nlru_shrink_anchor, NULL, dentry) != NULL)
|
||||
return;
|
||||
queued = queue_work(system_long_wq, &dentry_nlru_shrink_work);
|
||||
|
||||
/*
|
||||
* With the unlikely event that anchor is cleared but the work isn't
|
||||
* totally finished, queue_work() will fail and we have to revert
|
||||
* the change.
|
||||
*/
|
||||
if (unlikely(!queued))
|
||||
WRITE_ONCE(dentry_nlru_shrink_anchor, NULL);
|
||||
else
|
||||
dentry->d_lockref.count++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Compare 2 name strings, return 0 if they match, otherwise non-zero.
|
||||
* The strings are both count bytes long, and count is non-zero.
|
||||
@ -398,12 +477,24 @@ static void dentry_unlink_inode(struct dentry * dentry)
|
||||
#define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
|
||||
static void d_lru_add(struct dentry *dentry)
|
||||
{
|
||||
long nitems;
|
||||
bool negative = d_is_negative(dentry);
|
||||
|
||||
D_FLAG_VERIFY(dentry, 0);
|
||||
dentry->d_flags |= DCACHE_LRU_LIST;
|
||||
this_cpu_inc(nr_dentry_unused);
|
||||
if (d_is_negative(dentry))
|
||||
if (negative)
|
||||
this_cpu_inc(nr_dentry_negative);
|
||||
WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
|
||||
nitems = list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru);
|
||||
WARN_ON_ONCE(!nitems);
|
||||
/*
|
||||
* If negative dentry is present and the dentry_nlru_limit is exceeded,
|
||||
* call dentry_nlru_shrink() to attempt to shrink the corresponding
|
||||
* list_lru_node.
|
||||
*/
|
||||
if (static_branch_unlikely(&dentry_fs_klimit_on) && negative &&
|
||||
(nitems > dentry_nlru_limit) && !dentry_nlru_shrink_anchor)
|
||||
dentry_nlru_shrink(dentry);
|
||||
}
|
||||
|
||||
static void d_lru_del(struct dentry *dentry)
|
||||
@ -1217,6 +1308,77 @@ static enum lru_status dentry_lru_isolate(struct list_head *item,
|
||||
return LRU_REMOVED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Actual work function to shrink dentries in the list_lru_node structure that
|
||||
* has exceeded the limit imposed by dentry-fs-klimit sysctl parameter.
|
||||
* The same dentry_lru_isolate() callback function used by memory reclaim is
|
||||
* called to shrink the LRU list.
|
||||
*/
|
||||
static void dentry_nlru_shrink_wfunc(struct work_struct *work __maybe_unused)
|
||||
{
|
||||
struct dentry *dentry = READ_ONCE(dentry_nlru_shrink_anchor);
|
||||
int nid = page_to_nid(virt_to_page(dentry));
|
||||
unsigned long start = jiffies;
|
||||
struct super_block *sb = NULL;
|
||||
long total_freed = 0;
|
||||
long target_dcnt;
|
||||
LIST_HEAD(dispose);
|
||||
char *fsname;
|
||||
|
||||
/* Set target dentry count to 3/4 of dentry_nlru_limit */
|
||||
target_dcnt = dentry_nlru_limit - dentry_nlru_limit/4;
|
||||
|
||||
/* Acquire a read lock on sb->s_umount before reclaiming */
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (((dentry->d_flags &
|
||||
(DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST) &&
|
||||
down_read_trylock(&dentry->d_sb->s_umount)) {
|
||||
sb = dentry->d_sb;
|
||||
fsname = sb->s_id;
|
||||
}
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
||||
/*
|
||||
* For simplicity, only one list_lru_node is reclaimed. If other
|
||||
* nodes of the same filesystem have exceeded the limit, new
|
||||
* works will be initiated to reclaim them sooner or later.
|
||||
*/
|
||||
while (sb) {
|
||||
struct list_lru *lru = &sb->s_dentry_lru;
|
||||
unsigned long node_cnt = list_lru_count_node(lru, nid);
|
||||
unsigned long nr_to_walk;
|
||||
long freed;
|
||||
|
||||
/*
|
||||
* If current node count is less than or just a bit more
|
||||
* than the target dentry count, consider it done.
|
||||
*/
|
||||
if (node_cnt <= target_dcnt + 16)
|
||||
break;
|
||||
|
||||
/*
|
||||
* To avoid holding nlru->lock for too long, we have to limit
|
||||
* nr_to_walk to no more than 4k per shrink operation.
|
||||
*/
|
||||
nr_to_walk = min(node_cnt - target_dcnt, 1L << 12);
|
||||
freed = list_lru_walk_node(lru, nid, dentry_lru_isolate,
|
||||
&dispose, &nr_to_walk);
|
||||
total_freed += freed;
|
||||
shrink_dentry_list(&dispose);
|
||||
cond_resched();
|
||||
if (!freed || (node_cnt - freed <= target_dcnt))
|
||||
break; /* Done */
|
||||
}
|
||||
WRITE_ONCE(dentry_nlru_shrink_anchor, NULL);
|
||||
dput(dentry);
|
||||
|
||||
if (total_freed)
|
||||
pr_info("dentry-fs-klimit: %ld dentries freed from node %d of %s in %u ms\n",
|
||||
total_freed, nid, fsname, jiffies_to_msecs(jiffies - start));
|
||||
if (sb)
|
||||
up_read(&sb->s_umount);
|
||||
}
|
||||
|
||||
/**
|
||||
* prune_dcache_sb - shrink the dcache
|
||||
* @sb: superblock
|
||||
|
@ -1803,7 +1803,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
||||
* split it in half by count; each resulting block will have at least
|
||||
* half the space free.
|
||||
*/
|
||||
if (i > 0)
|
||||
if (i >= 0)
|
||||
split = count - move;
|
||||
else
|
||||
split = count/2;
|
||||
|
@ -1137,15 +1137,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
|
||||
{
|
||||
struct inode *ea_inode;
|
||||
struct ext4_xattr_entry *entry;
|
||||
struct ext4_iloc iloc;
|
||||
bool dirty = false;
|
||||
unsigned int ea_ino;
|
||||
int err;
|
||||
int credits;
|
||||
void *end;
|
||||
|
||||
if (block_csum)
|
||||
end = (void *)bh->b_data + bh->b_size;
|
||||
else {
|
||||
ext4_get_inode_loc(parent, &iloc);
|
||||
end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size;
|
||||
}
|
||||
|
||||
/* One credit for dec ref on ea_inode, one for orphan list addition, */
|
||||
credits = 2 + extra_credits;
|
||||
|
||||
for (entry = first; !IS_LAST_ENTRY(entry);
|
||||
for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry);
|
||||
entry = EXT4_XATTR_NEXT(entry)) {
|
||||
if (!entry->e_value_inum)
|
||||
continue;
|
||||
|
436
include/linux/cleanup.h
Normal file
436
include/linux/cleanup.h
Normal file
@ -0,0 +1,436 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CLEANUP_H
|
||||
#define _LINUX_CLEANUP_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/**
|
||||
* DOC: scope-based cleanup helpers
|
||||
*
|
||||
* The "goto error" pattern is notorious for introducing subtle resource
|
||||
* leaks. It is tedious and error prone to add new resource acquisition
|
||||
* constraints into code paths that already have several unwind
|
||||
* conditions. The "cleanup" helpers enable the compiler to help with
|
||||
* this tedium and can aid in maintaining LIFO (last in first out)
|
||||
* unwind ordering to avoid unintentional leaks.
|
||||
*
|
||||
* As drivers make up the majority of the kernel code base, here is an
|
||||
* example of using these helpers to clean up PCI drivers. The target of
|
||||
* the cleanups are occasions where a goto is used to unwind a device
|
||||
* reference (pci_dev_put()), or unlock the device (pci_dev_unlock())
|
||||
* before returning.
|
||||
*
|
||||
* The DEFINE_FREE() macro can arrange for PCI device references to be
|
||||
* dropped when the associated variable goes out of scope::
|
||||
*
|
||||
* DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
|
||||
* ...
|
||||
* struct pci_dev *dev __free(pci_dev_put) =
|
||||
* pci_get_slot(parent, PCI_DEVFN(0, 0));
|
||||
*
|
||||
* The above will automatically call pci_dev_put() if @dev is non-NULL
|
||||
* when @dev goes out of scope (automatic variable scope). If a function
|
||||
* wants to invoke pci_dev_put() on error, but return @dev (i.e. without
|
||||
* freeing it) on success, it can do::
|
||||
*
|
||||
* return no_free_ptr(dev);
|
||||
*
|
||||
* ...or::
|
||||
*
|
||||
* return_ptr(dev);
|
||||
*
|
||||
* The DEFINE_GUARD() macro can arrange for the PCI device lock to be
|
||||
* dropped when the scope where guard() is invoked ends::
|
||||
*
|
||||
* DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
|
||||
* ...
|
||||
* guard(pci_dev)(dev);
|
||||
*
|
||||
* The lifetime of the lock obtained by the guard() helper follows the
|
||||
* scope of automatic variable declaration. Take the following example::
|
||||
*
|
||||
* func(...)
|
||||
* {
|
||||
* if (...) {
|
||||
* ...
|
||||
* guard(pci_dev)(dev); // pci_dev_lock() invoked here
|
||||
* ...
|
||||
* } // <- implied pci_dev_unlock() triggered here
|
||||
* }
|
||||
*
|
||||
* Observe the lock is held for the remainder of the "if ()" block not
|
||||
* the remainder of "func()".
|
||||
*
|
||||
* Now, when a function uses both __free() and guard(), or multiple
|
||||
* instances of __free(), the LIFO order of variable definition order
|
||||
* matters. GCC documentation says:
|
||||
*
|
||||
* "When multiple variables in the same scope have cleanup attributes,
|
||||
* at exit from the scope their associated cleanup functions are run in
|
||||
* reverse order of definition (last defined, first cleanup)."
|
||||
*
|
||||
* When the unwind order matters it requires that variables be defined
|
||||
* mid-function scope rather than at the top of the file. Take the
|
||||
* following example and notice the bug highlighted by "!!"::
|
||||
*
|
||||
* LIST_HEAD(list);
|
||||
* DEFINE_MUTEX(lock);
|
||||
*
|
||||
* struct object {
|
||||
* struct list_head node;
|
||||
* };
|
||||
*
|
||||
* static struct object *alloc_add(void)
|
||||
* {
|
||||
* struct object *obj;
|
||||
*
|
||||
* lockdep_assert_held(&lock);
|
||||
* obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
* if (obj) {
|
||||
* LIST_HEAD_INIT(&obj->node);
|
||||
* list_add(obj->node, &list):
|
||||
* }
|
||||
* return obj;
|
||||
* }
|
||||
*
|
||||
* static void remove_free(struct object *obj)
|
||||
* {
|
||||
* lockdep_assert_held(&lock);
|
||||
* list_del(&obj->node);
|
||||
* kfree(obj);
|
||||
* }
|
||||
*
|
||||
* DEFINE_FREE(remove_free, struct object *, if (_T) remove_free(_T))
|
||||
* static int init(void)
|
||||
* {
|
||||
* struct object *obj __free(remove_free) = NULL;
|
||||
* int err;
|
||||
*
|
||||
* guard(mutex)(&lock);
|
||||
* obj = alloc_add();
|
||||
*
|
||||
* if (!obj)
|
||||
* return -ENOMEM;
|
||||
*
|
||||
* err = other_init(obj);
|
||||
* if (err)
|
||||
* return err; // remove_free() called without the lock!!
|
||||
*
|
||||
* no_free_ptr(obj);
|
||||
* return 0;
|
||||
* }
|
||||
*
|
||||
* That bug is fixed by changing init() to call guard() and define +
|
||||
* initialize @obj in this order::
|
||||
*
|
||||
* guard(mutex)(&lock);
|
||||
* struct object *obj __free(remove_free) = alloc_add();
|
||||
*
|
||||
* Given that the "__free(...) = NULL" pattern for variables defined at
|
||||
* the top of the function poses this potential interdependency problem
|
||||
* the recommendation is to always define and assign variables in one
|
||||
* statement and not group variable definitions at the top of the
|
||||
* function when __free() is used.
|
||||
*
|
||||
* Lastly, given that the benefit of cleanup helpers is removal of
|
||||
* "goto", and that the "goto" statement can jump between scopes, the
|
||||
* expectation is that usage of "goto" and cleanup helpers is never
|
||||
* mixed in the same function. I.e. for a given routine, convert all
|
||||
* resources that need a "goto" cleanup to scope-based cleanup, or
|
||||
* convert none of them.
|
||||
*/
|
||||
|
||||
/*
|
||||
* DEFINE_FREE(name, type, free):
|
||||
* simple helper macro that defines the required wrapper for a __free()
|
||||
* based cleanup function. @free is an expression using '_T' to access the
|
||||
* variable. @free should typically include a NULL test before calling a
|
||||
* function, see the example below.
|
||||
*
|
||||
* __free(name):
|
||||
* variable attribute to add a scoped based cleanup to the variable.
|
||||
*
|
||||
* no_free_ptr(var):
|
||||
* like a non-atomic xchg(var, NULL), such that the cleanup function will
|
||||
* be inhibited -- provided it sanely deals with a NULL value.
|
||||
*
|
||||
* NOTE: this has __must_check semantics so that it is harder to accidentally
|
||||
* leak the resource.
|
||||
*
|
||||
* return_ptr(p):
|
||||
* returns p while inhibiting the __free().
|
||||
*
|
||||
* Ex.
|
||||
*
|
||||
* DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
|
||||
*
|
||||
* void *alloc_obj(...)
|
||||
* {
|
||||
* struct obj *p __free(kfree) = kmalloc(...);
|
||||
* if (!p)
|
||||
* return NULL;
|
||||
*
|
||||
* if (!init_obj(p))
|
||||
* return NULL;
|
||||
*
|
||||
* return_ptr(p);
|
||||
* }
|
||||
*
|
||||
* NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
|
||||
* kfree() is fine to be called with a NULL value. This is on purpose. This way
|
||||
* the compiler sees the end of our alloc_obj() function as:
|
||||
*
|
||||
* tmp = p;
|
||||
* p = NULL;
|
||||
* if (p)
|
||||
* kfree(p);
|
||||
* return tmp;
|
||||
*
|
||||
* And through the magic of value-propagation and dead-code-elimination, it
|
||||
* eliminates the actual cleanup call and compiles into:
|
||||
*
|
||||
* return p;
|
||||
*
|
||||
* Without the NULL test it turns into a mess and the compiler can't help us.
|
||||
*/
|
||||
|
||||
#define DEFINE_FREE(_name, _type, _free) \
|
||||
static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; }
|
||||
|
||||
#define __free(_name) __cleanup(__free_##_name)
|
||||
|
||||
#define __get_and_null(p, nullvalue) \
|
||||
({ \
|
||||
__auto_type __ptr = &(p); \
|
||||
__auto_type __val = *__ptr; \
|
||||
*__ptr = nullvalue; \
|
||||
__val; \
|
||||
})
|
||||
|
||||
static inline __must_check
|
||||
const volatile void * __must_check_fn(const volatile void *val)
|
||||
{ return val; }
|
||||
|
||||
#define no_free_ptr(p) \
|
||||
((typeof(p)) __must_check_fn(__get_and_null(p, NULL)))
|
||||
|
||||
#define return_ptr(p) return no_free_ptr(p)
|
||||
|
||||
|
||||
/*
|
||||
* DEFINE_CLASS(name, type, exit, init, init_args...):
|
||||
* helper to define the destructor and constructor for a type.
|
||||
* @exit is an expression using '_T' -- similar to FREE above.
|
||||
* @init is an expression in @init_args resulting in @type
|
||||
*
|
||||
* EXTEND_CLASS(name, ext, init, init_args...):
|
||||
* extends class @name to @name@ext with the new constructor
|
||||
*
|
||||
* CLASS(name, var)(args...):
|
||||
* declare the variable @var as an instance of the named class
|
||||
*
|
||||
* Ex.
|
||||
*
|
||||
* DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd)
|
||||
*
|
||||
* CLASS(fdget, f)(fd);
|
||||
* if (!f.file)
|
||||
* return -EBADF;
|
||||
*
|
||||
* // use 'f' without concern
|
||||
*/
|
||||
|
||||
#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \
|
||||
typedef _type class_##_name##_t; \
|
||||
static inline void class_##_name##_destructor(_type *p) \
|
||||
{ _type _T = *p; _exit; } \
|
||||
static inline _type class_##_name##_constructor(_init_args) \
|
||||
{ _type t = _init; return t; }
|
||||
|
||||
#define EXTEND_CLASS(_name, ext, _init, _init_args...) \
|
||||
typedef class_##_name##_t class_##_name##ext##_t; \
|
||||
static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\
|
||||
{ class_##_name##_destructor(p); } \
|
||||
static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
|
||||
{ class_##_name##_t t = _init; return t; }
|
||||
|
||||
#define CLASS(_name, var) \
|
||||
class_##_name##_t var __cleanup(class_##_name##_destructor) = \
|
||||
class_##_name##_constructor
|
||||
|
||||
|
||||
/*
|
||||
* DEFINE_GUARD(name, type, lock, unlock):
|
||||
* trivial wrapper around DEFINE_CLASS() above specifically
|
||||
* for locks.
|
||||
*
|
||||
* DEFINE_GUARD_COND(name, ext, condlock)
|
||||
* wrapper around EXTEND_CLASS above to add conditional lock
|
||||
* variants to a base class, eg. mutex_trylock() or
|
||||
* mutex_lock_interruptible().
|
||||
*
|
||||
* guard(name):
|
||||
* an anonymous instance of the (guard) class, not recommended for
|
||||
* conditional locks.
|
||||
*
|
||||
* if_not_guard(name, args...) { <error handling> }:
|
||||
* convenience macro for conditional guards that calls the statement that
|
||||
* follows only if the lock was not acquired (typically an error return).
|
||||
*
|
||||
* Only for conditional locks.
|
||||
*
|
||||
* scoped_guard (name, args...) { }:
|
||||
* similar to CLASS(name, scope)(args), except the variable (with the
|
||||
* explicit name 'scope') is declard in a for-loop such that its scope is
|
||||
* bound to the next (compound) statement.
|
||||
*
|
||||
* for conditional locks the loop body is skipped when the lock is not
|
||||
* acquired.
|
||||
*
|
||||
* scoped_cond_guard (name, fail, args...) { }:
|
||||
* similar to scoped_guard(), except it does fail when the lock
|
||||
* acquire fails.
|
||||
*
|
||||
* Only for conditional locks.
|
||||
*/
|
||||
|
||||
#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
|
||||
static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
|
||||
|
||||
#define DEFINE_GUARD(_name, _type, _lock, _unlock) \
|
||||
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
|
||||
DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
|
||||
static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
|
||||
{ return (void *)(__force unsigned long)*_T; }
|
||||
|
||||
#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
|
||||
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
|
||||
EXTEND_CLASS(_name, _ext, \
|
||||
({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
|
||||
class_##_name##_t _T) \
|
||||
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
|
||||
{ return class_##_name##_lock_ptr(_T); }
|
||||
|
||||
#define guard(_name) \
|
||||
CLASS(_name, __UNIQUE_ID(guard))
|
||||
|
||||
#define __guard_ptr(_name) class_##_name##_lock_ptr
|
||||
#define __is_cond_ptr(_name) class_##_name##_is_conditional
|
||||
|
||||
/*
|
||||
* Helper macro for scoped_guard().
|
||||
*
|
||||
* Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
|
||||
* compiler would be sure that for the unconditional locks the body of the
|
||||
* loop (caller-provided code glued to the else clause) could not be skipped.
|
||||
* It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
|
||||
* hard to deduce (even if could be proven true for unconditional locks).
|
||||
*/
|
||||
#define __scoped_guard(_name, _label, args...) \
|
||||
for (CLASS(_name, scope)(args); \
|
||||
__guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
|
||||
({ goto _label; })) \
|
||||
if (0) { \
|
||||
_label: \
|
||||
break; \
|
||||
} else
|
||||
|
||||
#define scoped_guard(_name, args...) \
|
||||
__scoped_guard(_name, __UNIQUE_ID(label), args)
|
||||
|
||||
#define __scoped_cond_guard(_name, _fail, _label, args...) \
|
||||
for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
|
||||
if (!__guard_ptr(_name)(&scope)) { \
|
||||
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
|
||||
_fail; \
|
||||
_label: \
|
||||
break; \
|
||||
} else
|
||||
|
||||
#define scoped_cond_guard(_name, _fail, args...) \
|
||||
__scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
|
||||
|
||||
#define __if_not_guard(_name, _id, args...) \
|
||||
BUILD_BUG_ON(!__is_cond_ptr(_name)); \
|
||||
CLASS(_name, _id)(args); \
|
||||
if (!__guard_ptr(_name)(&_id))
|
||||
|
||||
#define if_not_guard(_name, args...) \
|
||||
__if_not_guard(_name, __UNIQUE_ID(guard), args)
|
||||
|
||||
/*
|
||||
* Additional helper macros for generating lock guards with types, either for
|
||||
* locks that don't have a native type (eg. RCU, preempt) or those that need a
|
||||
* 'fat' pointer (eg. spin_lock_irqsave).
|
||||
*
|
||||
* DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
|
||||
* DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
|
||||
* DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
|
||||
*
|
||||
* will result in the following type:
|
||||
*
|
||||
* typedef struct {
|
||||
* type *lock; // 'type := void' for the _0 variant
|
||||
* __VA_ARGS__;
|
||||
* } class_##name##_t;
|
||||
*
|
||||
* As above, both _lock and _unlock are statements, except this time '_T' will
|
||||
* be a pointer to the above struct.
|
||||
*/
|
||||
|
||||
#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \
|
||||
typedef struct { \
|
||||
_type *lock; \
|
||||
__VA_ARGS__; \
|
||||
} class_##_name##_t; \
|
||||
\
|
||||
static inline void class_##_name##_destructor(class_##_name##_t *_T) \
|
||||
{ \
|
||||
if (_T->lock) { _unlock; } \
|
||||
} \
|
||||
\
|
||||
static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
|
||||
{ \
|
||||
return (void *)(__force unsigned long)_T->lock; \
|
||||
}
|
||||
|
||||
|
||||
#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \
|
||||
static inline class_##_name##_t class_##_name##_constructor(_type *l) \
|
||||
{ \
|
||||
class_##_name##_t _t = { .lock = l }, *_T = &_t; \
|
||||
_lock; \
|
||||
return _t; \
|
||||
}
|
||||
|
||||
#define __DEFINE_LOCK_GUARD_0(_name, _lock) \
|
||||
static inline class_##_name##_t class_##_name##_constructor(void) \
|
||||
{ \
|
||||
class_##_name##_t _t = { .lock = (void*)1 }, \
|
||||
*_T __maybe_unused = &_t; \
|
||||
_lock; \
|
||||
return _t; \
|
||||
}
|
||||
|
||||
#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
|
||||
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
|
||||
__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
|
||||
__DEFINE_LOCK_GUARD_1(_name, _type, _lock)
|
||||
|
||||
#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
|
||||
__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
|
||||
__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
|
||||
__DEFINE_LOCK_GUARD_0(_name, _lock)
|
||||
|
||||
#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
|
||||
__DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
|
||||
EXTEND_CLASS(_name, _ext, \
|
||||
({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
|
||||
if (_T->lock && !(_condlock)) _T->lock = NULL; \
|
||||
_t; }), \
|
||||
typeof_member(class_##_name##_t, lock) l) \
|
||||
static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
|
||||
{ return class_##_name##_lock_ptr(_T); }
|
||||
|
||||
|
||||
#endif /* _LINUX_CLEANUP_H */
|
@ -6,6 +6,14 @@
|
||||
/* Compiler specific definitions for Clang compiler */
|
||||
|
||||
#define uninitialized_var(x) x = *(&(x))
|
||||
/*
|
||||
* Clang prior to 17 is being silly and considers many __cleanup() variables
|
||||
* as unused (because they are, their sole purpose is to go out of scope).
|
||||
*
|
||||
* https://reviews.llvm.org/D152180
|
||||
*/
|
||||
#undef __cleanup
|
||||
#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
|
||||
|
||||
/* same as gcc, this was present in clang-2.6 so we can assume it works
|
||||
* with any version that can compile the kernel
|
||||
|
@ -91,6 +91,12 @@
|
||||
*/
|
||||
#define __cold __attribute__((__cold__))
|
||||
|
||||
/*
|
||||
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute
|
||||
* clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup
|
||||
*/
|
||||
#define __cleanup(func) __attribute__((__cleanup__(func)))
|
||||
|
||||
/*
|
||||
* Note the long name.
|
||||
*
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/rh_kabi.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <asm/device.h>
|
||||
|
||||
struct device;
|
||||
@ -1688,6 +1689,8 @@ extern int device_move(struct device *dev, struct device *new_parent,
|
||||
extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid);
|
||||
extern int device_is_dependent(struct device *dev, void *target);
|
||||
|
||||
DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T))
|
||||
|
||||
static inline bool device_supports_offline(struct device *dev)
|
||||
{
|
||||
return dev->bus && dev->bus->offline && dev->bus->online;
|
||||
@ -1799,6 +1802,9 @@ extern int (*platform_notify_remove)(struct device *dev);
|
||||
*/
|
||||
extern struct device *get_device(struct device *dev);
|
||||
extern void put_device(struct device *dev);
|
||||
|
||||
DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T))
|
||||
|
||||
extern bool kill_device(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_DEVTMPFS
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/posix_types.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
struct file;
|
||||
|
||||
@ -81,6 +82,8 @@ static inline void fdput_pos(struct fd f)
|
||||
fdput(f);
|
||||
}
|
||||
|
||||
DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd)
|
||||
|
||||
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
|
||||
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
|
||||
extern void set_close_on_exec(unsigned int fd, int flag);
|
||||
@ -89,6 +92,29 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
|
||||
extern int get_unused_fd_flags(unsigned flags);
|
||||
extern void put_unused_fd(unsigned int fd);
|
||||
|
||||
DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T),
|
||||
get_unused_fd_flags(flags), unsigned flags)
|
||||
|
||||
/*
|
||||
* take_fd() will take care to set @fd to -EBADF ensuring that
|
||||
* CLASS(get_unused_fd) won't call put_unused_fd(). This makes it
|
||||
* easier to rely on CLASS(get_unused_fd):
|
||||
*
|
||||
* struct file *f;
|
||||
*
|
||||
* CLASS(get_unused_fd, fd)(O_CLOEXEC);
|
||||
* if (fd < 0)
|
||||
* return fd;
|
||||
*
|
||||
* f = dentry_open(&path, O_RDONLY, current_cred());
|
||||
* if (IS_ERR(f))
|
||||
* return PTR_ERR(fd);
|
||||
*
|
||||
* fd_install(fd, f);
|
||||
* return take_fd(fd);
|
||||
*/
|
||||
#define take_fd(fd) __get_and_null(fd, -EBADF)
|
||||
|
||||
extern void fd_install(unsigned int fd, struct file *file);
|
||||
|
||||
extern void flush_delayed_fput(void);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define _LINUX_TRACE_IRQFLAGS_H
|
||||
|
||||
#include <linux/typecheck.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/percpu.h>
|
||||
|
||||
@ -260,4 +261,10 @@ extern void warn_bogus_irq_restore(void);
|
||||
|
||||
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
|
||||
|
||||
DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable())
|
||||
DEFINE_LOCK_GUARD_0(irqsave,
|
||||
local_irq_save(_T->flags),
|
||||
local_irq_restore(_T->flags),
|
||||
unsigned long flags)
|
||||
|
||||
#endif
|
||||
|
@ -104,9 +104,10 @@ void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
|
||||
* the previous list (with list_lru_del() for instance) before moving it
|
||||
* to @list_lru
|
||||
*
|
||||
* Return value: true if the list was updated, false otherwise
|
||||
* Return value: the total number of items in list_lru_node if the list was
|
||||
* updated, 0 otherwise
|
||||
*/
|
||||
bool list_lru_add(struct list_lru *lru, struct list_head *item);
|
||||
long list_lru_add(struct list_lru *lru, struct list_head *item);
|
||||
|
||||
/**
|
||||
* list_lru_del: delete an element to the lru list
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/osq_lock.h>
|
||||
#include <linux/debug_locks.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
|
||||
@ -245,4 +246,8 @@ enum mutex_trylock_recursive_enum {
|
||||
extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
|
||||
mutex_trylock_recursive(struct mutex *lock);
|
||||
|
||||
DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
|
||||
DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
|
||||
DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
|
||||
|
||||
#endif /* __LINUX_MUTEX_H */
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/printk.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
@ -134,6 +135,9 @@ extern void __init setup_per_cpu_areas(void);
|
||||
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
|
||||
extern void __percpu *__alloc_percpu(size_t size, size_t align);
|
||||
extern void free_percpu(void __percpu *__pdata);
|
||||
|
||||
DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
|
||||
|
||||
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
|
||||
|
||||
#define alloc_percpu_gfp(type, gfp) \
|
||||
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/list.h>
|
||||
|
||||
/*
|
||||
@ -393,4 +394,6 @@ static inline void migrate_enable(void) { }
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
|
||||
|
||||
#endif /* __LINUX_PREEMPT_H */
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/bottom_half.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
@ -1034,4 +1035,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
|
||||
extern int rcu_expedited;
|
||||
extern int rcu_normal;
|
||||
|
||||
DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# define __RWSEM_DEP_MAP_INIT(lockname) \
|
||||
@ -195,6 +196,13 @@ extern void up_read(struct rw_semaphore *sem);
|
||||
*/
|
||||
extern void up_write(struct rw_semaphore *sem);
|
||||
|
||||
DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
|
||||
DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
|
||||
DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
|
||||
|
||||
DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
|
||||
DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
|
||||
|
||||
/*
|
||||
* downgrade write lock to read lock
|
||||
*/
|
||||
|
@ -130,6 +130,8 @@ static inline void put_task_struct(struct task_struct *t)
|
||||
__put_task_struct(t);
|
||||
}
|
||||
|
||||
DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
|
||||
|
||||
void put_task_struct_rcu_user(struct task_struct *task);
|
||||
|
||||
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/percpu-refcount.h>
|
||||
#include <linux/cleanup.h>
|
||||
|
||||
|
||||
/*
|
||||
@ -191,6 +192,8 @@ void kfree(const void *);
|
||||
void kfree_sensitive(const void *);
|
||||
size_t __ksize(const void *);
|
||||
|
||||
DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
|
||||
|
||||
/**
|
||||
* ksize - Report actual allocation size of associated object
|
||||
*
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include <linux/stringify.h>
|
||||
#include <linux/bottom_half.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/mmiowb.h>
|
||||
|
||||
@ -516,4 +517,49 @@ void free_bucket_spinlocks(spinlock_t *locks);
|
||||
#define qwrite_unlock_irq(l) write_unlock_irq(l)
|
||||
#endif
|
||||
|
||||
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
|
||||
raw_spin_lock(_T->lock),
|
||||
raw_spin_unlock(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
|
||||
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
|
||||
raw_spin_unlock(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
|
||||
raw_spin_lock_irq(_T->lock),
|
||||
raw_spin_unlock_irq(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
|
||||
raw_spin_lock_irqsave(_T->lock, _T->flags),
|
||||
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
|
||||
unsigned long flags)
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
|
||||
raw_spin_trylock_irqsave(_T->lock, _T->flags))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
|
||||
spin_lock(_T->lock),
|
||||
spin_unlock(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
|
||||
spin_lock_irq(_T->lock),
|
||||
spin_unlock_irq(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
|
||||
spin_trylock_irq(_T->lock))
|
||||
|
||||
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
|
||||
spin_lock_irqsave(_T->lock, _T->flags),
|
||||
spin_unlock_irqrestore(_T->lock, _T->flags),
|
||||
unsigned long flags)
|
||||
|
||||
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
|
||||
spin_trylock_irqsave(_T->lock, _T->flags))
|
||||
|
||||
#endif /* __LINUX_SPINLOCK_H */
|
||||
|
@ -223,4 +223,9 @@ static inline void smp_mb__after_srcu_read_unlock(void)
|
||||
/* __srcu_read_unlock has smp_mb() internally so nothing to do here. */
|
||||
}
|
||||
|
||||
DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct,
|
||||
_T->idx = srcu_read_lock(_T->lock),
|
||||
srcu_read_unlock(_T->lock, _T->idx),
|
||||
int idx)
|
||||
|
||||
#endif
|
||||
|
@ -117,6 +117,9 @@ extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
|
||||
extern int sysctl_nr_trim_pages;
|
||||
#endif
|
||||
|
||||
extern int dentry_fs_klimit_sysctl;
|
||||
extern proc_handler proc_dentry_fs_klimit;
|
||||
|
||||
/* Constants used for minimum and maximum */
|
||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||
static int sixty = 60;
|
||||
@ -1974,6 +1977,13 @@ static struct ctl_table fs_table[] = {
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ONE,
|
||||
},
|
||||
{
|
||||
.procname = "dentry-fs-klimit",
|
||||
.data = &dentry_fs_klimit_sysctl,
|
||||
.maxlen = sizeof(dentry_fs_klimit_sysctl),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dentry_fs_klimit
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -111,12 +111,13 @@ list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
|
||||
}
|
||||
#endif /* CONFIG_MEMCG_KMEM */
|
||||
|
||||
bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
||||
long list_lru_add(struct list_lru *lru, struct list_head *item)
|
||||
{
|
||||
int nid = page_to_nid(virt_to_page(item));
|
||||
struct list_lru_node *nlru = &lru->node[nid];
|
||||
struct mem_cgroup *memcg;
|
||||
struct list_lru_one *l;
|
||||
long ret;
|
||||
|
||||
spin_lock(&nlru->lock);
|
||||
if (list_empty(item)) {
|
||||
@ -126,9 +127,9 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
|
||||
if (!l->nr_items++)
|
||||
set_shrinker_bit(memcg, nid,
|
||||
lru_shrinker_id(lru));
|
||||
nlru->nr_items++;
|
||||
ret = ++nlru->nr_items;
|
||||
spin_unlock(&nlru->lock);
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
spin_unlock(&nlru->lock);
|
||||
return false;
|
||||
|
@ -180,6 +180,7 @@ static void
|
||||
lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
unsigned int len = skb->len;
|
||||
|
||||
ATM_SKB(skb)->vcc = vcc;
|
||||
atm_account_tx(vcc, skb);
|
||||
@ -190,7 +191,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
dev->stats.tx_bytes += len;
|
||||
}
|
||||
|
||||
static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
||||
|
Binary file not shown.
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
#
|
||||
# Copyright 2004 Matt Mackall <mpm@selenic.com>
|
||||
#
|
||||
|
@ -4135,7 +4135,7 @@ sub process {
|
||||
if|for|while|switch|return|case|
|
||||
volatile|__volatile__|
|
||||
__attribute__|format|__extension__|
|
||||
asm|__asm__)$/x)
|
||||
asm|__asm__|scoped_guard)$/x)
|
||||
{
|
||||
# cpp #define statements have non-optional spaces, ie
|
||||
# if there is a space between the name and the open
|
||||
|
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# diffconfig - a tool to compare .config files.
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +1,4 @@
|
||||
#! /usr/bin/python3.6
|
||||
#! /usr/libexec/platform-python
|
||||
#
|
||||
# show_deltas: Read list of printk messages instrumented with
|
||||
# time data, and format with time deltas.
|
||||
|
Loading…
Reference in New Issue
Block a user