Import of kernel-5.14.0-570.42.2.el9_6
This commit is contained in:
parent
a3bb58653c
commit
4d6c34bc1c
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 570.41.1
|
RHEL_RELEASE = 570.42.2
|
||||||
|
|
||||||
#
|
#
|
||||||
# ZSTREAM
|
# ZSTREAM
|
||||||
|
@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[];
|
|||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
extern unsigned long max_pfn;
|
extern unsigned long max_pfn;
|
||||||
u64 memory_hotplug_max(void);
|
u64 memory_hotplug_max(void);
|
||||||
|
u64 hot_add_drconf_memory_max(void);
|
||||||
#else
|
#else
|
||||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||||
#endif
|
#endif
|
||||||
|
@ -1354,7 +1354,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
|
|||||||
return nid;
|
return nid;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 hot_add_drconf_memory_max(void)
|
u64 hot_add_drconf_memory_max(void)
|
||||||
{
|
{
|
||||||
struct device_node *memory = NULL;
|
struct device_node *memory = NULL;
|
||||||
struct device_node *dn = NULL;
|
struct device_node *dn = NULL;
|
||||||
|
@ -52,7 +52,8 @@ enum {
|
|||||||
enum {
|
enum {
|
||||||
DDW_EXT_SIZE = 0,
|
DDW_EXT_SIZE = 0,
|
||||||
DDW_EXT_RESET_DMA_WIN = 1,
|
DDW_EXT_RESET_DMA_WIN = 1,
|
||||||
DDW_EXT_QUERY_OUT_SIZE = 2
|
DDW_EXT_QUERY_OUT_SIZE = 2,
|
||||||
|
DDW_EXT_LIMITED_ADDR_MODE = 3
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct iommu_table *iommu_pseries_alloc_table(int node)
|
static struct iommu_table *iommu_pseries_alloc_table(int node)
|
||||||
@ -1287,17 +1288,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
|
|||||||
|
|
||||||
static phys_addr_t ddw_memory_hotplug_max(void)
|
static phys_addr_t ddw_memory_hotplug_max(void)
|
||||||
{
|
{
|
||||||
resource_size_t max_addr = memory_hotplug_max();
|
resource_size_t max_addr;
|
||||||
struct device_node *memory;
|
|
||||||
|
|
||||||
for_each_node_by_type(memory, "memory") {
|
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||||
struct resource res;
|
max_addr = hot_add_drconf_memory_max();
|
||||||
|
#else
|
||||||
if (of_address_to_resource(memory, 0, &res))
|
max_addr = memblock_end_of_DRAM();
|
||||||
continue;
|
#endif
|
||||||
|
|
||||||
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
return max_addr;
|
return max_addr;
|
||||||
}
|
}
|
||||||
@ -1334,6 +1331,54 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
|
|||||||
ret);
|
ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Platforms support placing PHB in limited address mode starting with LoPAR
|
||||||
|
* level 2.13 implement. In this mode, the DMA address returned by DDW is over
|
||||||
|
* 4GB but, less than 64-bits. This benefits IO adapters that don't support
|
||||||
|
* 64-bits for DMA addresses.
|
||||||
|
*/
|
||||||
|
static int limited_dma_window(struct pci_dev *dev, struct device_node *par_dn)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u32 cfg_addr, reset_dma_win, las_supported;
|
||||||
|
u64 buid;
|
||||||
|
struct device_node *dn;
|
||||||
|
struct pci_dn *pdn;
|
||||||
|
|
||||||
|
ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
ret = ddw_read_ext(par_dn, DDW_EXT_LIMITED_ADDR_MODE, &las_supported);
|
||||||
|
|
||||||
|
/* Limited Address Space extension available on the platform but DDW in
|
||||||
|
* limited addressing mode not supported
|
||||||
|
*/
|
||||||
|
if (!ret && !las_supported)
|
||||||
|
ret = -EPROTO;
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
dev_info(&dev->dev, "Limited Address Space for DDW not Supported, err: %d", ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
dn = pci_device_to_OF_node(dev);
|
||||||
|
pdn = PCI_DN(dn);
|
||||||
|
buid = pdn->phb->buid;
|
||||||
|
cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
|
||||||
|
|
||||||
|
ret = rtas_call(reset_dma_win, 4, 1, NULL, cfg_addr, BUID_HI(buid),
|
||||||
|
BUID_LO(buid), 1);
|
||||||
|
if (ret)
|
||||||
|
dev_info(&dev->dev,
|
||||||
|
"ibm,reset-pe-dma-windows(%x) for Limited Addr Support: %x %x %x returned %d ",
|
||||||
|
reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
|
||||||
|
ret);
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
|
/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
|
||||||
static int iommu_get_page_shift(u32 query_page_size)
|
static int iommu_get_page_shift(u32 query_page_size)
|
||||||
{
|
{
|
||||||
@ -1401,7 +1446,7 @@ static struct property *ddw_property_create(const char *propname, u32 liobn, u64
|
|||||||
*
|
*
|
||||||
* returns true if can map all pages (direct mapping), false otherwise..
|
* returns true if can map all pages (direct mapping), false otherwise..
|
||||||
*/
|
*/
|
||||||
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn, u64 dma_mask)
|
||||||
{
|
{
|
||||||
int len = 0, ret;
|
int len = 0, ret;
|
||||||
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
|
int max_ram_len = order_base_2(ddw_memory_hotplug_max());
|
||||||
@ -1420,6 +1465,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
bool pmem_present;
|
bool pmem_present;
|
||||||
struct pci_dn *pci = PCI_DN(pdn);
|
struct pci_dn *pci = PCI_DN(pdn);
|
||||||
struct property *default_win = NULL;
|
struct property *default_win = NULL;
|
||||||
|
bool limited_addr_req = false, limited_addr_enabled = false;
|
||||||
|
int dev_max_ddw;
|
||||||
|
int ddw_sz;
|
||||||
|
|
||||||
dn = of_find_node_by_type(NULL, "ibm,pmemory");
|
dn = of_find_node_by_type(NULL, "ibm,pmemory");
|
||||||
pmem_present = dn != NULL;
|
pmem_present = dn != NULL;
|
||||||
@ -1446,7 +1494,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
* the ibm,ddw-applicable property holds the tokens for:
|
* the ibm,ddw-applicable property holds the tokens for:
|
||||||
* ibm,query-pe-dma-window
|
* ibm,query-pe-dma-window
|
||||||
* ibm,create-pe-dma-window
|
* ibm,create-pe-dma-window
|
||||||
* ibm,remove-pe-dma-window
|
|
||||||
* for the given node in that order.
|
* for the given node in that order.
|
||||||
* the property is actually in the parent, not the PE
|
* the property is actually in the parent, not the PE
|
||||||
*/
|
*/
|
||||||
@ -1466,6 +1513,20 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
goto out_failed;
|
goto out_failed;
|
||||||
|
|
||||||
|
/* DMA Limited Addressing required? This is when the driver has
|
||||||
|
* requested to create DDW but supports mask which is less than 64-bits
|
||||||
|
*/
|
||||||
|
limited_addr_req = (dma_mask != DMA_BIT_MASK(64));
|
||||||
|
|
||||||
|
/* place the PHB in Limited Addressing mode */
|
||||||
|
if (limited_addr_req) {
|
||||||
|
if (limited_dma_window(dev, pdn))
|
||||||
|
goto out_failed;
|
||||||
|
|
||||||
|
/* PHB is in Limited address mode */
|
||||||
|
limited_addr_enabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is no window available, remove the default DMA window,
|
* If there is no window available, remove the default DMA window,
|
||||||
* if it's present. This will make all the resources available to the
|
* if it's present. This will make all the resources available to the
|
||||||
@ -1512,6 +1573,15 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
goto out_failed;
|
goto out_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Maximum DMA window size that the device can address (in log2) */
|
||||||
|
dev_max_ddw = fls64(dma_mask);
|
||||||
|
|
||||||
|
/* If the device DMA mask is less than 64-bits, make sure the DMA window
|
||||||
|
* size is not bigger than what the device can access
|
||||||
|
*/
|
||||||
|
ddw_sz = min(order_base_2(query.largest_available_block << page_shift),
|
||||||
|
dev_max_ddw);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||||
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
|
* Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
|
||||||
@ -1520,23 +1590,21 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
*/
|
*/
|
||||||
len = max_ram_len;
|
len = max_ram_len;
|
||||||
if (pmem_present) {
|
if (pmem_present) {
|
||||||
if (query.largest_available_block >=
|
if (ddw_sz >= MAX_PHYSMEM_BITS)
|
||||||
(1ULL << (MAX_PHYSMEM_BITS - page_shift)))
|
|
||||||
len = MAX_PHYSMEM_BITS;
|
len = MAX_PHYSMEM_BITS;
|
||||||
else
|
else
|
||||||
dev_info(&dev->dev, "Skipping ibm,pmemory");
|
dev_info(&dev->dev, "Skipping ibm,pmemory");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check if the available block * number of ptes will map everything */
|
/* check if the available block * number of ptes will map everything */
|
||||||
if (query.largest_available_block < (1ULL << (len - page_shift))) {
|
if (ddw_sz < len) {
|
||||||
dev_dbg(&dev->dev,
|
dev_dbg(&dev->dev,
|
||||||
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
|
"can't map partition max 0x%llx with %llu %llu-sized pages\n",
|
||||||
1ULL << len,
|
1ULL << len,
|
||||||
query.largest_available_block,
|
query.largest_available_block,
|
||||||
1ULL << page_shift);
|
1ULL << page_shift);
|
||||||
|
|
||||||
len = order_base_2(query.largest_available_block << page_shift);
|
len = ddw_sz;
|
||||||
|
|
||||||
dynamic_mapping = true;
|
dynamic_mapping = true;
|
||||||
} else {
|
} else {
|
||||||
direct_mapping = !default_win_removed ||
|
direct_mapping = !default_win_removed ||
|
||||||
@ -1550,8 +1618,9 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
*/
|
*/
|
||||||
if (default_win_removed && pmem_present && !direct_mapping) {
|
if (default_win_removed && pmem_present && !direct_mapping) {
|
||||||
/* DDW is big enough to be split */
|
/* DDW is big enough to be split */
|
||||||
if ((query.largest_available_block << page_shift) >=
|
if ((1ULL << ddw_sz) >=
|
||||||
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
|
MIN_DDW_VPMEM_DMA_WINDOW + (1ULL << max_ram_len)) {
|
||||||
|
|
||||||
direct_mapping = true;
|
direct_mapping = true;
|
||||||
|
|
||||||
/* offset of the Dynamic part of DDW */
|
/* offset of the Dynamic part of DDW */
|
||||||
@ -1562,8 +1631,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
dynamic_mapping = true;
|
dynamic_mapping = true;
|
||||||
|
|
||||||
/* create max size DDW possible */
|
/* create max size DDW possible */
|
||||||
len = order_base_2(query.largest_available_block
|
len = ddw_sz;
|
||||||
<< page_shift);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1603,7 +1671,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
|||||||
|
|
||||||
if (direct_mapping) {
|
if (direct_mapping) {
|
||||||
/* DDW maps the whole partition, so enable direct DMA mapping */
|
/* DDW maps the whole partition, so enable direct DMA mapping */
|
||||||
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
|
ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
|
||||||
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
||||||
@ -1691,7 +1759,7 @@ out_remove_win:
|
|||||||
__remove_dma_window(pdn, ddw_avail, create.liobn);
|
__remove_dma_window(pdn, ddw_avail, create.liobn);
|
||||||
|
|
||||||
out_failed:
|
out_failed:
|
||||||
if (default_win_removed)
|
if (default_win_removed || limited_addr_enabled)
|
||||||
reset_dma_window(dev, pdn);
|
reset_dma_window(dev, pdn);
|
||||||
|
|
||||||
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
|
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
|
||||||
@ -1710,6 +1778,9 @@ out_unlock:
|
|||||||
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
|
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset +
|
||||||
(1ULL << max_ram_len);
|
(1ULL << max_ram_len);
|
||||||
|
|
||||||
|
dev_info(&dev->dev, "lsa_required: %x, lsa_enabled: %x, direct mapping: %x\n",
|
||||||
|
limited_addr_req, limited_addr_enabled, direct_mapping);
|
||||||
|
|
||||||
return direct_mapping;
|
return direct_mapping;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1835,8 +1906,11 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
|||||||
{
|
{
|
||||||
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
|
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
|
||||||
|
|
||||||
/* only attempt to use a new window if 64-bit DMA is requested */
|
/* For DDW, DMA mask should be more than 32-bits. For mask more then
|
||||||
if (dma_mask < DMA_BIT_MASK(64))
|
* 32-bits but less then 64-bits, DMA addressing is supported in
|
||||||
|
* Limited Addressing mode.
|
||||||
|
*/
|
||||||
|
if (dma_mask <= DMA_BIT_MASK(32))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
dev_dbg(&pdev->dev, "node is %pOF\n", dn);
|
dev_dbg(&pdev->dev, "node is %pOF\n", dn);
|
||||||
@ -1849,7 +1923,7 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
|
|||||||
*/
|
*/
|
||||||
pdn = pci_dma_find(dn, NULL);
|
pdn = pci_dma_find(dn, NULL);
|
||||||
if (pdn && PCI_DN(pdn))
|
if (pdn && PCI_DN(pdn))
|
||||||
return enable_ddw(pdev, pdn);
|
return enable_ddw(pdev, pdn, dma_mask);
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -2349,11 +2423,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
|||||||
struct memory_notify *arg = data;
|
struct memory_notify *arg = data;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
/* This notifier can get called when onlining persistent memory as well.
|
||||||
|
* TCEs are not pre-mapped for persistent memory. Persistent memory will
|
||||||
|
* always be above ddw_memory_hotplug_max()
|
||||||
|
*/
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
case MEM_GOING_ONLINE:
|
case MEM_GOING_ONLINE:
|
||||||
spin_lock(&dma_win_list_lock);
|
spin_lock(&dma_win_list_lock);
|
||||||
list_for_each_entry(window, &dma_win_list, list) {
|
list_for_each_entry(window, &dma_win_list, list) {
|
||||||
if (window->direct) {
|
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||||
|
ddw_memory_hotplug_max()) {
|
||||||
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
||||||
arg->nr_pages, window->prop);
|
arg->nr_pages, window->prop);
|
||||||
}
|
}
|
||||||
@ -2365,7 +2445,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
|||||||
case MEM_OFFLINE:
|
case MEM_OFFLINE:
|
||||||
spin_lock(&dma_win_list_lock);
|
spin_lock(&dma_win_list_lock);
|
||||||
list_for_each_entry(window, &dma_win_list, list) {
|
list_for_each_entry(window, &dma_win_list, list) {
|
||||||
if (window->direct) {
|
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||||
|
ddw_memory_hotplug_max()) {
|
||||||
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
||||||
arg->nr_pages, window->prop);
|
arg->nr_pages, window->prop);
|
||||||
}
|
}
|
||||||
|
@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref)
|
|||||||
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
||||||
const struct drm_framebuffer_funcs *funcs)
|
const struct drm_framebuffer_funcs *funcs)
|
||||||
{
|
{
|
||||||
|
unsigned int i;
|
||||||
int ret;
|
int ret;
|
||||||
|
bool exists;
|
||||||
|
|
||||||
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
|
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
for (i = 0; i < fb->format->num_planes; i++) {
|
||||||
|
if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
|
||||||
|
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||||
|
if (fb->obj[i]) {
|
||||||
|
exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
|
||||||
|
if (exists)
|
||||||
|
fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&fb->filp_head);
|
INIT_LIST_HEAD(&fb->filp_head);
|
||||||
|
|
||||||
fb->funcs = funcs;
|
fb->funcs = funcs;
|
||||||
@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
|||||||
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
|
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
|
||||||
false, drm_framebuffer_free);
|
false, drm_framebuffer_free);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto err;
|
||||||
|
|
||||||
mutex_lock(&dev->mode_config.fb_lock);
|
mutex_lock(&dev->mode_config.fb_lock);
|
||||||
dev->mode_config.num_fb++;
|
dev->mode_config.num_fb++;
|
||||||
@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
|
|||||||
mutex_unlock(&dev->mode_config.fb_lock);
|
mutex_unlock(&dev->mode_config.fb_lock);
|
||||||
|
|
||||||
drm_mode_object_register(dev, &fb->base);
|
drm_mode_object_register(dev, &fb->base);
|
||||||
out:
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err:
|
||||||
|
for (i = 0; i < fb->format->num_planes; i++) {
|
||||||
|
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
|
||||||
|
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||||
|
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_framebuffer_init);
|
EXPORT_SYMBOL(drm_framebuffer_init);
|
||||||
@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
|
|||||||
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
|
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = fb->dev;
|
struct drm_device *dev = fb->dev;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
for (i = 0; i < fb->format->num_planes; i++) {
|
||||||
|
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
|
||||||
|
drm_gem_object_handle_put_unlocked(fb->obj[i]);
|
||||||
|
}
|
||||||
|
|
||||||
mutex_lock(&dev->mode_config.fb_lock);
|
mutex_lock(&dev->mode_config.fb_lock);
|
||||||
list_del(&fb->head);
|
list_del(&fb->head);
|
||||||
|
@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
EXPORT_SYMBOL(drm_gem_private_object_fini);
|
||||||
|
|
||||||
|
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
|
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
|
||||||
|
|
||||||
|
if (obj->handle_count++ == 0)
|
||||||
|
drm_gem_object_get(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
|
||||||
|
* @obj: GEM object
|
||||||
|
*
|
||||||
|
* Acquires a reference on the GEM buffer object's handle. Required to keep
|
||||||
|
* the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
|
||||||
|
* to release the reference. Does nothing if the buffer object has no handle.
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* True if a handle exists, or false otherwise
|
||||||
|
*/
|
||||||
|
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
|
||||||
|
{
|
||||||
|
struct drm_device *dev = obj->dev;
|
||||||
|
|
||||||
|
guard(mutex)(&dev->object_name_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* First ref taken during GEM object creation, if any. Some
|
||||||
|
* drivers set up internal framebuffers with GEM objects that
|
||||||
|
* do not have a GEM handle. Hence, this counter can be zero.
|
||||||
|
*/
|
||||||
|
if (!obj->handle_count)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
drm_gem_object_handle_get(obj);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_gem_object_handle_free - release resources bound to userspace handles
|
* drm_gem_object_handle_free - release resources bound to userspace handles
|
||||||
* @obj: GEM object to clean up.
|
* @obj: GEM object to clean up.
|
||||||
@ -216,19 +256,25 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
/**
|
||||||
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
* drm_gem_object_handle_put_unlocked - releases reference on user-space handle
|
||||||
|
* @obj: GEM object
|
||||||
|
*
|
||||||
|
* Releases a reference on the GEM buffer object's handle. Possibly releases
|
||||||
|
* the GEM buffer object and associated dma-buf objects.
|
||||||
|
*/
|
||||||
|
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = obj->dev;
|
struct drm_device *dev = obj->dev;
|
||||||
bool final = false;
|
bool final = false;
|
||||||
|
|
||||||
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
|
if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Must bump handle count first as this may be the last
|
* Must bump handle count first as this may be the last
|
||||||
* ref, in which case the object would disappear before we
|
* ref, in which case the object would disappear before
|
||||||
* checked for a name
|
* we checked for a name.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
mutex_lock(&dev->object_name_lock);
|
mutex_lock(&dev->object_name_lock);
|
||||||
@ -363,8 +409,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
|
||||||
if (obj->handle_count++ == 0)
|
|
||||||
drm_gem_object_get(obj);
|
drm_gem_object_handle_get(obj);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get the user-visible handle using idr. Preload and perform
|
* Get the user-visible handle using idr. Preload and perform
|
||||||
|
@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
|
|||||||
|
|
||||||
/* drm_gem.c */
|
/* drm_gem.c */
|
||||||
int drm_gem_init(struct drm_device *dev);
|
int drm_gem_init(struct drm_device *dev);
|
||||||
|
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
|
||||||
|
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
|
||||||
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
int drm_gem_handle_create_tail(struct drm_file *file_priv,
|
||||||
struct drm_gem_object *obj,
|
struct drm_gem_object *obj,
|
||||||
u32 *handlep);
|
u32 *handlep);
|
||||||
|
@ -245,17 +245,19 @@ static int __init vkms_init(void)
|
|||||||
if (!config)
|
if (!config)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
default_config = config;
|
|
||||||
|
|
||||||
config->cursor = enable_cursor;
|
config->cursor = enable_cursor;
|
||||||
config->writeback = enable_writeback;
|
config->writeback = enable_writeback;
|
||||||
config->overlay = enable_overlay;
|
config->overlay = enable_overlay;
|
||||||
|
|
||||||
ret = vkms_create(config);
|
ret = vkms_create(config);
|
||||||
if (ret)
|
if (ret) {
|
||||||
kfree(config);
|
kfree(config);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
default_config = config;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vkms_destroy(struct vkms_config *config)
|
static void vkms_destroy(struct vkms_config *config)
|
||||||
@ -279,9 +281,10 @@ static void vkms_destroy(struct vkms_config *config)
|
|||||||
|
|
||||||
static void __exit vkms_exit(void)
|
static void __exit vkms_exit(void)
|
||||||
{
|
{
|
||||||
if (default_config->dev)
|
if (!default_config)
|
||||||
vkms_destroy(default_config);
|
return;
|
||||||
|
|
||||||
|
vkms_destroy(default_config);
|
||||||
kfree(default_config);
|
kfree(default_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6006,9 +6006,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
|
|||||||
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
|
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
|
||||||
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
|
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
|
||||||
|
|
||||||
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
|
memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
|
||||||
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
|
|
||||||
sizeof(phba->BIOSVersion));
|
sizeof(phba->BIOSVersion));
|
||||||
|
phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
|
||||||
|
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||||
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
|
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
#ifndef __DRM_FRAMEBUFFER_H__
|
#ifndef __DRM_FRAMEBUFFER_H__
|
||||||
#define __DRM_FRAMEBUFFER_H__
|
#define __DRM_FRAMEBUFFER_H__
|
||||||
|
|
||||||
|
#include <linux/bits.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
|
|||||||
unsigned num_clips);
|
unsigned num_clips);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct drm_framebuffer - frame buffer object
|
* struct drm_framebuffer - frame buffer object
|
||||||
*
|
*
|
||||||
@ -188,6 +191,10 @@ struct drm_framebuffer {
|
|||||||
* DRM_MODE_FB_MODIFIERS.
|
* DRM_MODE_FB_MODIFIERS.
|
||||||
*/
|
*/
|
||||||
int flags;
|
int flags;
|
||||||
|
/**
|
||||||
|
* @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
|
||||||
|
*/
|
||||||
|
unsigned int internal_flags;
|
||||||
/**
|
/**
|
||||||
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
|
* @filp_head: Placed on &drm_file.fbs, protected by &drm_file.fbs_lock.
|
||||||
*/
|
*/
|
||||||
|
@ -1379,6 +1379,15 @@ void run_posix_cpu_timers(void)
|
|||||||
|
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Ensure that release_task(tsk) can't happen while
|
||||||
|
* handle_posix_cpu_timers() is running. Otherwise, a concurrent
|
||||||
|
* posix_cpu_timer_del() may fail to lock_task_sighand(tsk) and
|
||||||
|
* miss timer->it.cpu.firing != 0.
|
||||||
|
*/
|
||||||
|
if (tsk->exit_state)
|
||||||
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the actual expiry is deferred to task work context and the
|
* If the actual expiry is deferred to task work context and the
|
||||||
* work is already scheduled there is no point to do anything here.
|
* work is already scheduled there is no point to do anything here.
|
||||||
|
@ -1,3 +1,16 @@
|
|||||||
|
* Mon Sep 08 2025 Patrick Talbert <ptalbert@redhat.com> [5.14.0-570.42.2.el9_6]
|
||||||
|
- posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() (CKI Backport Bot) [RHEL-112780] {CVE-2025-38352}
|
||||||
|
- powerpc/pseries/iommu: create DDW for devices with DMA mask less than 64-bits (CKI Backport Bot) [RHEL-113173]
|
||||||
|
Resolves: RHEL-112780, RHEL-113173
|
||||||
|
|
||||||
|
* Sat Aug 30 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.42.1.el9_6]
|
||||||
|
- powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory (Mamatha Inamdar) [RHEL-103015]
|
||||||
|
- drm/framebuffer: Acquire internal references on GEM handles (José Expósito) [RHEL-106699] {CVE-2025-38449}
|
||||||
|
- drm/gem: Acquire references on GEM handles for framebuffers (José Expósito) [RHEL-106699] {CVE-2025-38449}
|
||||||
|
- drm/vkms: Fix use after free and double free on init error (CKI KWF BOT) [RHEL-99420] {CVE-2025-22097}
|
||||||
|
- scsi: lpfc: Use memcpy() for BIOS version (Ewan D. Milne) [RHEL-105933] {CVE-2025-38332}
|
||||||
|
Resolves: RHEL-103015, RHEL-105933, RHEL-106699, RHEL-99420
|
||||||
|
|
||||||
* Thu Aug 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.41.1.el9_6]
|
* Thu Aug 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.41.1.el9_6]
|
||||||
- powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view (Mamatha Inamdar) [RHEL-107002]
|
- powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view (Mamatha Inamdar) [RHEL-107002]
|
||||||
- net: ibmveth: make veth_pool_store stop hanging (Mamatha Inamdar) [RHEL-109494]
|
- net: ibmveth: make veth_pool_store stop hanging (Mamatha Inamdar) [RHEL-109494]
|
||||||
|
Loading…
Reference in New Issue
Block a user