Import of kernel-5.14.0-570.42.2.el9_6
This commit is contained in:
parent
a3bb58653c
commit
4d6c34bc1c
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 570.41.1
|
||||
RHEL_RELEASE = 570.42.2
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[];
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
extern unsigned long max_pfn;
|
||||
u64 memory_hotplug_max(void);
|
||||
u64 hot_add_drconf_memory_max(void);
|
||||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif
|
||||
|
@ -1354,7 +1354,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
|
||||
return nid;
|
||||
}
|
||||
|
||||
static u64 hot_add_drconf_memory_max(void)
|
||||
u64 hot_add_drconf_memory_max(void)
|
||||
{
|
||||
struct device_node *memory = NULL;
|
||||
struct device_node *dn = NULL;
|
||||
|
@ -52,7 +52,8 @@ enum {
|
||||
enum {
|
||||
DDW_EXT_SIZE = 0,
|
||||
DDW_EXT_RESET_DMA_WIN = 1,
|
||||
DDW_EXT_QUERY_OUT_SIZE = 2
|
||||
DDW_EXT_QUERY_OUT_SIZE = 2,
|
||||
DDW_EXT_LIMITED_ADDR_MODE = 3
|
||||
};
|
||||
|
||||
static struct iommu_table *iommu_pseries_alloc_table(int node)
|
||||
@ -1287,17 +1288,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
|
||||
|
||||
static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
{
|
||||
resource_size_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
resource_size_t max_addr;
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
struct resource res;
|
||||
|
||||
if (of_address_to_resource(memory, 0, &res))
|
||||
continue;
|
||||
|
||||
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
|
||||
}
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||
max_addr = hot_add_drconf_memory_max();
|
||||
#else
|
||||
max_addr = memblock_end_of_DRAM();
|
||||
#endif
|
||||
|
||||
return max_addr;
|
||||
}
|
||||
@ -1334,6 +1331,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
|
||||
ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Platforms support placing PHB in limited address mode starting with LoPAR
|
||||
* level 2.13 implement. In this mode, the DMA address returned by DDW is over
|
||||
* 4GB but, less than 64-bits. This benefits IO adapters that don't support
|
||||
* 64-bits for DMA addresses.
|
||||
*/
|
||||
static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn)
|
||||
{
|
||||
int ret;
|
||||
u32 cfg_addr, reset_dma_win, las_supported;
|
||||
u64 buid;
|
||||
struct device_node *dn;
|
||||
struct pci_dn *pdn;
|
||||
|
||||
ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported);
|
||||
|
||||
/* Limited Address Space extension available on the platform but DDW in
|
||||
* limited addressing mode not supported
|
||||
*/
|
||||
if (!ret && !las_supported)
|
||||
ret = -EPROTO;
|
||||
|
||||
if (ret) {
|
||||
dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
pdn = PCI_DN(dn);
|
||||
buid = pdn->phb->buid;
|
||||
cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
|
||||
|
||||
ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid),
|
||||
BUID_LO(buid), 1);
|
||||
if (ret)
|
||||
dev_info(&dev->dev,
|
||||
"ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ",
|
||||
reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
|
||||
ret);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
|
||||
static int iommu_get_page_shift(u32 query_page_size)
|
||||
{
|
||||
@ -1401,7 +1446,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
|
||||
*
|
||||
* returns true if can map all pages (direct mapping), false otherwise..
|
||||
*/
|
||||
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask)
|
||||
{
|
||||
int len = 0, ret;
|
||||
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
|
||||
@ -1420,6 +1465,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
bool pmem_present;
|
||||
struct pci_dn *pci = PCI_DN(pdn);
|
||||
struct property *default_win = NULL;
|
||||
bool limited_addr_req = false, limited_addr_enabled = false;
|
||||
int dev_max_ddw;
|
||||
int ddw_sz;
|
||||
|
||||
dn = of_find_node_by_type(NULL, "ibm,pmemory");
|
||||
pmem_present = dn != NULL;
|
||||
@ -1446,7 +1494,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
* the ibm,ddw-applicable property holds the tokens for:
|
||||
* ibm,query-pe-dma-window
|
||||
* ibm,create-pe-dma-window
|
||||
* ibm,remove-pe-dma-window
|
||||
* for the given node in that order.
|
||||
* the property is actually in the parent, not the PE
|
||||
*/
|
||||
@ -1466,6 +1513,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
if (ret != 0)
|
||||
goto out_failed;
|
||||
|
||||
/* DMA Limited Addressing required? This is when the driver has
|
||||
* requested to create DDW but supports mask which is less than 64-bits
|
||||
*/
|
||||
limited_addr_req = (dma_mask != DMA_BIT_MASK(64));
|
||||
|
||||
/* place the PHB in Limited Addressing mode */
|
||||
if (limited_addr_req) {
|
||||
if (limited_dma_window(dev, pdn))
|
||||
goto out_failed;
|
||||
|
||||
/* PHB is in Limited address mode */
|
||||
limited_addr_enabled = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is no window available, remove the default DMA window,
|
||||
* if it's present. This will make all the resources available to the
|
||||
@ -1512,6 +1573,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
/* Maximum DMA window size that the device can address (in log2) */
|
||||
dev_max_ddw = fls64(dma_mask);
|
||||
|
||||
/* If the device DMA mask is less than 64-bits, make sure the DMA window
|
||||
* size is not bigger than what the device can access
|
||||
*/
|
||||
ddw_sz = min(order_base_2(query.largest_available_block << page_shift),
|
||||
dev_max_ddw);
|
||||
|
||||
/*
|
||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
|
||||
@ -1520,23 +1590,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
*/
|
||||
len = max_ram_len;
|
||||
if (pmem_present) {
|
||||
if (query.largest_available_block >=
|
||||
(1ULL << (MAX_PHYSMEM_BITS - page_shift)))
|
||||
if (ddw_sz >= MAX_PHYSMEM_BITS)
|
||||
len = MAX_PHYSMEM_BITS;
|
||||
else
|
||||
dev_info(&dev->dev, "Skipping ibm,pmemory");
|
||||
}
|
||||
|
||||
/* check if the available block * number of ptes will map everything */
|
||||
if (query.largest_available_block < (1ULL << (len - page_shift))) {
|
||||
if (ddw_sz < len) {
|
||||
dev_dbg(&dev->dev,
|
||||
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
|
||||
1ULL << len,
|
||||
query.largest_available_block,
|
||||
1ULL << page_shift);
|
||||
|
||||
len = order_base_2(query.largest_available_block << page_shift);
|
||||
|
||||
len = ddw_sz;
|
||||
dynamic_mapping = true;
|
||||
} else {
|
||||
direct_mapping = !default_win_removed ||
|
||||
@ -1550,8 +1618,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
*/
|
||||
if (default_win_removed && pmem_present && !direct_mapping) {
|
||||
/* DDW is big enough to be split */
|
||||
if ((query.largest_available_block << page_shift) >=
|
||||
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
|
||||
if ((1ULL << ddw_sz) >=
|
||||
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
|
||||
|
||||
direct_mapping = true;
|
||||
|
||||
/* offset of the Dynamic part of DDW */
|
||||
@ -1562,8 +1631,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
dynamic_mapping = true;
|
||||
|
||||
/* create max size DDW possible */
|
||||
len = order_base_2(query.largest_available_block
|
||||
<< page_shift);
|
||||
len = ddw_sz;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1603,7 +1671,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
|
||||
if (direct_mapping) {
|
||||
/* DDW maps the whole partition, so enable direct DMA mapping */
|
||||
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
|
||||
ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
|
||||
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
||||
if (ret) {
|
||||
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
||||
@ -1691,7 +1759,7 @@ out_remove_win:
|
||||
__remove_dma_window(pdn, ddw_avail, create.liobn);
|
||||
|
||||
out_failed:
|
||||
if (default_win_removed)
|
||||
if (default_win_removed || limited_addr_enabled)
|
||||
reset_dma_window(dev, pdn);
|
||||
|
||||
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
|
||||
@ -1710,6 +1778,9 @@ out_unlock:
|
||||
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
|
||||
(1ULL << max_ram_len);
|
||||
|
||||
dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n",
|
||||
limited_addr_req, limited_addr_enabled, direct_mapping);
|
||||
|
||||
return direct_mapping;
|
||||
}
|
||||
|
||||
@ -1835,8 +1906,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
||||
{
|
||||
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
|
||||
|
||||
/* only attempt to use a new window if 64-bit DMA is requested */
|
||||
if (dma_mask < DMA_BIT_MASK(64))
|
||||
/* For DDW, DMA mask should be more than 32-bits. For mask more then
|
||||
* 32-bits but less then 64-bits, DMA addressing is supported in
|
||||
* Limited Addressing mode.
|
||||
*/
|
||||
if (dma_mask <= DMA_BIT_MASK(32))
|
||||
return false;
|
||||
|
||||
dev_dbg(&pdev->dev, "node is %pOF\n", dn);
|
||||
@ -1849,7 +1923,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
||||
*/
|
||||
pdn = pci_dma_find(dn, NULL);
|
||||
if (pdn && PCI_DN(pdn))
|
||||
return enable_ddw(pdev, pdn);
|
||||
return enable_ddw(pdev, pdn, dma_mask);
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
||||
struct memory_notify *arg = data;
|
||||
int ret = 0;
|
||||
|
||||
/* This notifier can get called when onlining persistent memory as well.
|
||||
* TCEs are not pre-mapped for persistent memory. Persistent memory will
|
||||
* always be above ddw_memory_hotplug_max()
|
||||
*/
|
||||
|
||||
switch (action) {
|
||||
case MEM_GOING_ONLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
||||
case MEM_OFFLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
|
@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref)
|
||||
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
const struct drm_framebuffer_funcs *funcs)
|
||||
{
|
||||
unsigned int i;
|
||||
int ret;
|
||||
bool exists;
|
||||
|
||||
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
if (fb->obj[i]) {
|
||||
exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
|
||||
if (exists)
|
||||
fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&fb->filp_head);
|
||||
|
||||
fb->funcs = funcs;
|
||||
@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
|
||||
false, drm_framebuffer_free);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto err;
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
dev->mode_config.num_fb++;
|
||||
@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||
mutex_unlock(&dev->mode_config.fb_lock);
|
||||
|
||||
drm_mode_object_register(dev, &fb->base);
|
||||
out:
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_framebuffer_init);
|
||||
@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
|
||||
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct drm_device *dev = fb->dev;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < fb->format->num_planes; i++) {
|
||||
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
|
||||
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->mode_config.fb_lock);
|
||||
list_del(&fb->head);
|
||||
|
@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
||||
|
||||
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
|
||||
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Acquires a reference on the GEM buffer object's handle. Required to keep
|
||||
* the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
|
||||
* to release the reference. Does nothing if the buffer object has no handle.
|
||||
*
|
||||
* Returns:
|
||||
* True if a handle exists, or false otherwise
|
||||
*/
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
guard(mutex)(&dev->object_name_lock);
|
||||
|
||||
/*
|
||||
* First ref taken during GEM object creation, if any. Some
|
||||
* drivers set up internal framebuffers with GEM objects that
|
||||
* do not have a GEM handle. Hence, this counter can be zero.
|
||||
*/
|
||||
if (!obj->handle_count)
|
||||
return false;
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||
* @obj: GEM object to clean up.
|
||||
@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
/**
|
||||
* drm_gem_object_handle_put_unlocked - releases reference on user-space handle
|
||||
* @obj: GEM object
|
||||
*
|
||||
* Releases a reference on the GEM buffer object's handle. Possibly releases
|
||||
* the GEM buffer object and associated dma-buf objects.
|
||||
*/
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->dev;
|
||||
bool final = false;
|
||||
|
||||
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
|
||||
if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before we
|
||||
* checked for a name
|
||||
*/
|
||||
* Must bump handle count first as this may be the last
|
||||
* ref, in which case the object would disappear before
|
||||
* we checked for a name.
|
||||
*/
|
||||
|
||||
mutex_lock(&dev->object_name_lock);
|
||||
if (--obj->handle_count == 0) {
|
||||
@ -363,8 +409,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
||||
if (obj->handle_count++ == 0)
|
||||
drm_gem_object_get(obj);
|
||||
|
||||
drm_gem_object_handle_get(obj);
|
||||
|
||||
/*
|
||||
* Get the user-visible handle using idr. Preload and perform
|
||||
|
@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
|
||||
|
||||
/* drm_gem.c */
|
||||
int drm_gem_init(struct drm_device *dev);
|
||||
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
|
||||
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
|
||||
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||
struct drm_gem_object *obj,
|
||||
u32 *handlep);
|
||||
|
@ -245,17 +245,19 @@ static int __init vkms_init(void)
|
||||
if (!config)
|
||||
return -ENOMEM;
|
||||
|
||||
default_config = config;
|
||||
|
||||
config->cursor = enable_cursor;
|
||||
config->writeback = enable_writeback;
|
||||
config->overlay = enable_overlay;
|
||||
|
||||
ret = vkms_create(config);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(config);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
default_config = config;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vkms_destroy(struct vkms_config *config)
|
||||
@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config)
|
||||
|
||||
static void __exit vkms_exit(void)
|
||||
{
|
||||
if (default_config->dev)
|
||||
vkms_destroy(default_config);
|
||||
if (!default_config)
|
||||
return;
|
||||
|
||||
vkms_destroy(default_config);
|
||||
kfree(default_config);
|
||||
}
|
||||
|
||||
|
@ -6006,9 +6006,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
|
||||
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
|
||||
|
||||
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
|
||||
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
|
||||
memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
|
||||
sizeof(phba->BIOSVersion));
|
||||
phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
|
||||
|
@ -23,6 +23,7 @@
|
||||
#ifndef __DRM_FRAMEBUFFER_H__
|
||||
#define __DRM_FRAMEBUFFER_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/sched.h>
|
||||
@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
|
||||
unsigned num_clips);
|
||||
};
|
||||
|
||||
#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
|
||||
|
||||
/**
|
||||
* struct drm_framebuffer - frame buffer object
|
||||
*
|
||||
@ -188,6 +191,10 @@ struct drm_framebuffer {
|
||||
* DRM_MODE_FB_MODIFIERS.
|
||||
*/
|
||||
int flags;
|
||||
/**
|
||||
* @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
|
||||
*/
|
||||
unsigned int internal_flags;
|
||||
/**
|
||||
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
|
||||
*/
|
||||
|
@ -1379,6 +1379,15 @@ void run_posix_cpu_timers(void)
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* Ensure that release_task(tsk) can't happen while
|
||||
* handle_posix_cpu_timers() is running. Otherwise, a concurrent
|
||||
* posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and
|
||||
* miss timer->it.cpu.firing != 0.
|
||||
*/
|
||||
if (tsk->exit_state)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the actual expiry is deferred to task work context and the
|
||||
* work is already scheduled there is no point to do anything here.
|
||||
|
@ -1,3 +1,16 @@
|
||||
* Mon Sep 08 2025 Patrick Talbert <ptalbert@redhat.com> [5.14.0-570.42.2.el9_6]
|
||||
- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352}
|
||||
- powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits (CKI Backport Bot) [RHEL-113173]
|
||||
Resolves: RHEL-112780, RHEL-113173
|
||||
|
||||
* Sat Aug 30 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.42.1.el9_6]
|
||||
- powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory (Mamatha Inamdar) [RHEL-103015]
|
||||
- drm/framebuffer: Acquire internal references on GEM handles (José Expósito) [RHEL-106699] {CVE-2025-38449}
|
||||
- drm/gem: Acquire references on GEM handles for framebuffers (José Expósito) [RHEL-106699] {CVE-2025-38449}
|
||||
- drm/vkms: Fix use after free and double free on init error (CKI KWF BOT) [RHEL-99420] {CVE-2025-22097}
|
||||
- scsi: lpfc: Use memcpy() for BIOS version (Ewan D. Milne) [RHEL-105933] {CVE-2025-38332}
|
||||
Resolves: RHEL-103015, RHEL-105933, RHEL-106699, RHEL-99420
|
||||
|
||||
* Thu Aug 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.41.1.el9_6]
|
||||
- powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view (Mamatha Inamdar) [RHEL-107002]
|
||||
- net: ibmveth: make veth_pool_store stop hanging (Mamatha Inamdar) [RHEL-109494]
|
||||
|
Loading…
Reference in New Issue
Block a user