Import of kernel-4.18.0-553.120.1.el8_10

This commit is contained in:
almalinux-bot-kernel 2026-04-22 05:02:39 +00:00
parent 79152bf4d5
commit 7242ccff16
24 changed files with 419 additions and 229 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 553.117.1
RHEL_RELEASE = 553.120.1
#
# ZSTREAM

View File

@ -814,6 +814,13 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
}
EXPORT_SYMBOL_GPL(kvm_require_dr);
static bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@ -872,15 +879,20 @@ EXPORT_SYMBOL_GPL(load_pdptrs);
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
/*
* Clearing CR0.PG is defined to flush the TLB from the guest's
* perspective.
*/
if (!(cr0 & X86_CR0_PG))
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
/*
* Check for async #PF completion events when enabling paging,
* as the vCPU may have previously encountered async #PFs (it's
* entirely legal for the guest to toggle paging on/off without
* waiting for the async #PF queue to drain).
*/
else if (kvm_pv_async_pf_enabled(vcpu))
kvm_make_request(KVM_REQ_APF_READY, vcpu);
}
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
@ -3246,13 +3258,6 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
{
u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
return (vcpu->arch.apf.msr_en_val & mask) == mask;
}
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
@ -3679,7 +3684,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
return 1;
if (data & 0x1) {
vcpu->arch.apf.pageready_pending = false;
/*
* Pairs with the smp_mb__after_atomic() in
* kvm_arch_async_page_present_queued().
*/
smp_store_mb(vcpu->arch.apf.pageready_pending, false);
kvm_check_async_pf_completion(vcpu);
}
break;
@ -12323,7 +12333,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
if ((work->wakeup_all || work->notpresent_injected) &&
kvm_pv_async_pf_enabled(vcpu) &&
!apf_put_user_ready(vcpu, work->arch.token)) {
vcpu->arch.apf.pageready_pending = true;
WRITE_ONCE(vcpu->arch.apf.pageready_pending, true);
kvm_apic_set_irq(vcpu, &irq, NULL);
}
@ -12334,7 +12344,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_APF_READY, vcpu);
if (!vcpu->arch.apf.pageready_pending)
/* Pairs with smp_store_mb() in kvm_set_msr_common(). */
smp_mb__after_atomic();
if (!READ_ONCE(vcpu->arch.apf.pageready_pending))
kvm_vcpu_kick(vcpu);
}

View File

@ -962,7 +962,7 @@ err:
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
if (cinfo->lockspace)
dlm_release_lockspace(cinfo->lockspace, 2);
dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
mddev->cluster_info = NULL;
kfree(cinfo);
return ret;
@ -1025,7 +1025,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2);
dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
kfree(cinfo);
return 0;
}

View File

@ -6285,10 +6285,12 @@ static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busi
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
struct dasd_eckd_private *prim_priv, *sec_priv;
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
struct gendisk *gdp;
int rc;
copy = device->copy;
if (!copy)
@ -6304,6 +6306,9 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
prim_priv = primary->private;
sec_priv = secondary->private;
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
@ -6323,6 +6328,25 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
/* swap blocklayer device link */
gdp = block->gdp;
dasd_add_link_to_gendisk(gdp, secondary);
rc = device_move(disk_to_dev(gdp), &secondary->cdev->dev, DPM_ORDER_NONE);
if (rc) {
dev_err(&primary->cdev->dev,
"copy_pair_swap: moving blockdevice parent %s->%s failed (%d)\n",
dev_name(&primary->cdev->dev),
dev_name(&secondary->cdev->dev), rc);
}
if (primary->stopped & DASD_STOPPED_QUIESCE) {
dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE);
dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE);
}
/*
* The secondary device never got through format detection, but since it
* is a copy of the primary device, the format is exactly the same;
* therefore, the detected layout can simply be copied.
*/
sec_priv->uses_cdl = prim_priv->uses_cdl;
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);

View File

@ -58,6 +58,8 @@ struct virtio_ccw_device {
struct virtio_device vdev;
__u8 config[VIRTIO_CCW_CONFIG_SIZE];
struct ccw_device *cdev;
/* we make cdev->dev.dma_parms point to this */
struct device_dma_parameters dma_parms;
__u32 curr_io;
int err;
unsigned int revision; /* Transport revision */
@ -1206,6 +1208,7 @@ static int virtio_ccw_offline(struct ccw_device *cdev)
unregister_virtio_device(&vcdev->vdev);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, NULL);
cdev->dev.dma_parms = NULL;
spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
return 0;
}
@ -1270,6 +1273,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
}
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
cdev->dev.dma_parms = &vcdev->dma_parms;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
sizeof(*vcdev->dma_area));
if (!vcdev->dma_area) {

View File

@ -1295,7 +1295,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
a.reason = FCNVME_RJT_RC_LOGIC;
a.explanation = FCNVME_RJT_EXP_NONE;
xmt_reject = true;
kfree(item);
qla24xx_free_purex_item(item);
goto out;
}

View File

@ -25,9 +25,10 @@
#include "lowcomms.h"
/*
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid (refers to <node>)
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
* /config/dlm/<cluster>/comms/<comm>/nodeid
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/release_recover
* /config/dlm/<cluster>/comms/<comm>/nodeid (refers to <comm>)
* /config/dlm/<cluster>/comms/<comm>/local
* /config/dlm/<cluster>/comms/<comm>/addr (write only)
* /config/dlm/<cluster>/comms/<comm>/addr_list (read only)
@ -208,6 +209,7 @@ enum {
enum {
NODE_ATTR_NODEID = 0,
NODE_ATTR_WEIGHT,
NODE_ATTR_RELEASE_RECOVER,
};
struct dlm_clusters {
@ -221,6 +223,8 @@ struct dlm_spaces {
struct dlm_space {
struct config_group group;
struct list_head members;
struct list_head members_gone;
int members_gone_count;
struct mutex members_lock;
int members_count;
};
@ -250,6 +254,14 @@ struct dlm_node {
int weight;
int new;
int comm_seq; /* copy of cm->seq when nd->nodeid is set */
unsigned int release_recover;
};
struct dlm_member_gone {
int nodeid;
unsigned int release_recover;
struct list_head list; /* space->members_gone */
};
static struct configfs_group_operations clusters_ops = {
@ -430,6 +442,7 @@ static struct config_group *make_space(struct config_group *g, const char *name)
configfs_add_default_group(&nds->ns_group, &sp->group);
INIT_LIST_HEAD(&sp->members);
INIT_LIST_HEAD(&sp->members_gone);
mutex_init(&sp->members_lock);
sp->members_count = 0;
return &sp->group;
@ -459,6 +472,12 @@ static void release_space(struct config_item *i)
static struct config_item *make_comm(struct config_group *g, const char *name)
{
struct dlm_comm *cm;
unsigned int nodeid;
int rv;
rv = kstrtouint(name, 0, &nodeid);
if (rv)
return ERR_PTR(rv);
cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS);
if (!cm)
@ -470,7 +489,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name)
if (!cm->seq)
cm->seq = dlm_comm_count++;
cm->nodeid = -1;
cm->nodeid = nodeid;
cm->local = 0;
cm->addr_count = 0;
cm->mark = 0;
@ -497,16 +516,25 @@ static void release_comm(struct config_item *i)
static struct config_item *make_node(struct config_group *g, const char *name)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
unsigned int nodeid;
struct dlm_node *nd;
uint32_t seq = 0;
int rv;
rv = kstrtouint(name, 0, &nodeid);
if (rv)
return ERR_PTR(rv);
nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
if (!nd)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&nd->item, name, &node_type);
nd->nodeid = -1;
nd->nodeid = nodeid;
nd->weight = 1; /* default weight of 1 if none is set */
nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */
dlm_comm_seq(nodeid, &seq, true);
nd->comm_seq = seq;
mutex_lock(&sp->members_lock);
list_add(&nd->list, &sp->members);
@ -520,10 +548,20 @@ static void drop_node(struct config_group *g, struct config_item *i)
{
struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent);
struct dlm_node *nd = config_item_to_node(i);
struct dlm_member_gone *mb_gone;
mb_gone = kzalloc(sizeof(*mb_gone), GFP_KERNEL);
if (!mb_gone)
return;
mutex_lock(&sp->members_lock);
list_del(&nd->list);
sp->members_count--;
mb_gone->nodeid = nd->nodeid;
mb_gone->release_recover = nd->release_recover;
list_add(&mb_gone->list, &sp->members_gone);
sp->members_gone_count++;
mutex_unlock(&sp->members_lock);
config_item_put(i);
@ -564,16 +602,19 @@ void dlm_config_exit(void)
static ssize_t comm_nodeid_show(struct config_item *item, char *buf)
{
return sprintf(buf, "%d\n", config_item_to_comm(item)->nodeid);
unsigned int nodeid;
int rv;
rv = kstrtouint(config_item_name(item), 0, &nodeid);
if (WARN_ON(rv))
return rv;
return sprintf(buf, "%u\n", nodeid);
}
static ssize_t comm_nodeid_store(struct config_item *item, const char *buf,
size_t len)
{
int rc = kstrtoint(buf, 0, &config_item_to_comm(item)->nodeid);
if (rc)
return rc;
return len;
}
@ -714,20 +755,19 @@ static struct configfs_attribute *comm_attrs[] = {
static ssize_t node_nodeid_show(struct config_item *item, char *buf)
{
return sprintf(buf, "%d\n", config_item_to_node(item)->nodeid);
unsigned int nodeid;
int rv;
rv = kstrtouint(config_item_name(item), 0, &nodeid);
if (WARN_ON(rv))
return rv;
return sprintf(buf, "%u\n", nodeid);
}
static ssize_t node_nodeid_store(struct config_item *item, const char *buf,
size_t len)
{
struct dlm_node *nd = config_item_to_node(item);
uint32_t seq = 0;
int rc = kstrtoint(buf, 0, &nd->nodeid);
if (rc)
return rc;
dlm_comm_seq(nd->nodeid, &seq);
nd->comm_seq = seq;
return len;
}
@ -746,12 +786,34 @@ static ssize_t node_weight_store(struct config_item *item, const char *buf,
return len;
}
static ssize_t node_release_recover_show(struct config_item *item, char *buf)
{
struct dlm_node *n = config_item_to_node(item);
return sprintf(buf, "%u\n", n->release_recover);
}
static ssize_t node_release_recover_store(struct config_item *item,
const char *buf, size_t len)
{
struct dlm_node *n = config_item_to_node(item);
int rc;
rc = kstrtouint(buf, 0, &n->release_recover);
if (rc)
return rc;
return len;
}
CONFIGFS_ATTR(node_, nodeid);
CONFIGFS_ATTR(node_, weight);
CONFIGFS_ATTR(node_, release_recover);
static struct configfs_attribute *node_attrs[] = {
[NODE_ATTR_NODEID] = &node_attr_nodeid,
[NODE_ATTR_WEIGHT] = &node_attr_weight,
[NODE_ATTR_RELEASE_RECOVER] = &node_attr_release_recover,
NULL,
};
@ -787,7 +849,7 @@ static struct dlm_comm *get_comm(int nodeid)
if (!comm_list)
return NULL;
mutex_lock(&clusters_root.subsys.su_mutex);
WARN_ON_ONCE(!mutex_is_locked(&clusters_root.subsys.su_mutex));
list_for_each_entry(i, &comm_list->cg_children, ci_entry) {
cm = config_item_to_comm(i);
@ -798,7 +860,6 @@ static struct dlm_comm *get_comm(int nodeid)
config_item_get(i);
break;
}
mutex_unlock(&clusters_root.subsys.su_mutex);
if (!found)
cm = NULL;
@ -814,9 +875,10 @@ static void put_comm(struct dlm_comm *cm)
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out)
{
struct dlm_member_gone *mb_gone, *mb_safe;
struct dlm_config_node *nodes, *node;
struct dlm_space *sp;
struct dlm_node *nd;
struct dlm_config_node *nodes, *node;
int rv, count;
sp = get_space(lsname);
@ -830,7 +892,7 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
goto out;
}
count = sp->members_count;
count = sp->members_count + sp->members_gone_count;
nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS);
if (!nodes) {
@ -849,6 +911,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
nd->new = 0;
}
/* we delay the remove on nodes until here as configfs does
* not support addtional attributes for rmdir().
*/
list_for_each_entry_safe(mb_gone, mb_safe, &sp->members_gone, list) {
node->nodeid = mb_gone->nodeid;
node->release_recover = mb_gone->release_recover;
node->gone = true;
node++;
list_del(&mb_gone->list);
sp->members_gone_count--;
kfree(mb_gone);
}
*count_out = count;
*nodes_out = nodes;
rv = 0;
@ -858,11 +934,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
return rv;
}
int dlm_comm_seq(int nodeid, uint32_t *seq)
int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked)
{
struct dlm_comm *cm = get_comm(nodeid);
struct dlm_comm *cm;
if (locked) {
cm = get_comm(nodeid);
} else {
mutex_lock(&clusters_root.subsys.su_mutex);
cm = get_comm(nodeid);
mutex_unlock(&clusters_root.subsys.su_mutex);
}
if (!cm)
return -EEXIST;
*seq = cm->seq;
put_comm(cm);
return 0;

View File

@ -17,8 +17,10 @@
struct dlm_config_node {
int nodeid;
int weight;
bool gone;
int new;
uint32_t comm_seq;
unsigned int release_recover;
};
#define DLM_MAX_ADDR_COUNT 3
@ -47,7 +49,7 @@ int dlm_config_init(void);
void dlm_config_exit(void);
int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out,
int *count_out);
int dlm_comm_seq(int nodeid, uint32_t *seq);
int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked);
int dlm_our_nodeid(void);
int dlm_our_addr(struct sockaddr_storage *addr, int num);

View File

@ -5110,26 +5110,8 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_message *ms_stub)
{
if (middle_conversion(lkb)) {
log_rinfo(ls, "%s %x middle convert in progress", __func__,
lkb->lkb_id);
/* We sent this lock to the new master. The new master will
* tell us when it's granted. We no longer need a reply, so
* use a fake reply to put the lkb into the right state.
*/
hold_lkb(lkb);
memset(ms_stub, 0, sizeof(struct dlm_message));
ms_stub->m_flags = DLM_IFL_STUB_MS;
ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
ms_stub->m_result = -EINPROGRESS;
ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
_receive_convert_reply(lkb, ms_stub);
unhold_lkb(lkb);
} else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
if (middle_conversion(lkb) || lkb->lkb_rqmode >= lkb->lkb_grmode)
lkb->lkb_flags |= DLM_IFL_RESEND;
}
/* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
conversions are async; there's no reply from the remote master */

View File

@ -196,34 +196,29 @@ static struct kobj_type dlm_ktype = {
static struct kset *dlm_kset;
static int do_uevent(struct dlm_ls *ls, int in)
static int do_uevent(struct dlm_ls *ls, int in, unsigned int release_recover)
{
int error;
char message[512] = {};
char *envp[] = { message, NULL };
if (in)
if (in) {
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
else
kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
} else {
snprintf(message, 511, "RELEASE_RECOVER=%u", release_recover);
kobject_uevent_env(&ls->ls_kobj, KOBJ_OFFLINE, envp);
}
log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
/* dlm_controld will see the uevent, do the necessary group management
and then write to sysfs to wake us */
error = wait_event_interruptible(ls->ls_uevent_wait,
test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
wait_event(ls->ls_uevent_wait,
test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
log_rinfo(ls, "group event done %d %d", error, ls->ls_uevent_result);
log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
if (error)
goto out;
error = ls->ls_uevent_result;
out:
if (error)
log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
error, ls->ls_uevent_result);
return error;
return ls->ls_uevent_result;
}
static int dlm_uevent(struct kset *kset, struct kobject *kobj,
@ -649,8 +644,8 @@ static int new_lockspace(const char *name, const char *cluster,
current lockspace members are (via configfs) and then tells the
lockspace to start running (via sysfs) in dlm_ls_start(). */
error = do_uevent(ls, 1);
if (error)
error = do_uevent(ls, 1, 0);
if (error < 0)
goto out_recoverd;
wait_for_completion(&ls->ls_members_done);
@ -665,7 +660,7 @@ static int new_lockspace(const char *name, const char *cluster,
return 0;
out_members:
do_uevent(ls, 0);
do_uevent(ls, 0, 0);
dlm_clear_members(ls);
kfree(ls->ls_node_array);
out_recoverd:
@ -749,14 +744,14 @@ static int lkb_idr_free(int id, void *p, void *data)
This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
static int lockspace_busy(struct dlm_ls *ls, int force)
static int lockspace_busy(struct dlm_ls *ls, unsigned int release_option)
{
int rv;
spin_lock(&ls->ls_lkbidr_spin);
if (force == 0) {
if (release_option == DLM_RELEASE_NO_LOCKS) {
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
} else if (force == 1) {
} else if (release_option == DLM_RELEASE_UNUSED) {
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
} else {
rv = 0;
@ -765,13 +760,13 @@ static int lockspace_busy(struct dlm_ls *ls, int force)
return rv;
}
static int release_lockspace(struct dlm_ls *ls, int force)
static int release_lockspace(struct dlm_ls *ls, unsigned int release_option)
{
struct dlm_rsb *rsb;
struct rb_node *n;
int i, busy, rv;
busy = lockspace_busy(ls, force);
busy = lockspace_busy(ls, release_option);
spin_lock(&lslist_lock);
if (ls->ls_create_count == 1) {
@ -796,8 +791,9 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_device_deregister(ls);
if (force < 3 && dlm_user_daemon_available())
do_uevent(ls, 0);
if (release_option != DLM_RELEASE_NO_EVENT &&
dlm_user_daemon_available())
do_uevent(ls, 0, (release_option == DLM_RELEASE_RECOVER));
dlm_recoverd_stop(ls);
@ -870,25 +866,24 @@ static int release_lockspace(struct dlm_ls *ls, int force)
* lockspace must continue to function as usual, participating in recoveries,
* until this returns.
*
* Force has 4 possible values:
* 0 - don't destroy locksapce if it has any LKBs
* 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
* 2 - destroy lockspace regardless of LKBs
* 3 - destroy lockspace as part of a forced shutdown
* See DLM_RELEASE defines for release_option values and their meaning.
*/
int dlm_release_lockspace(void *lockspace, int force)
int dlm_release_lockspace(void *lockspace, unsigned int release_option)
{
struct dlm_ls *ls;
int error;
if (release_option > __DLM_RELEASE_MAX)
return -EINVAL;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
dlm_put_lockspace(ls);
mutex_lock(&ls_lock);
error = release_lockspace(ls, force);
error = release_lockspace(ls, release_option);
if (!error)
ls_count--;
if (!ls_count)

View File

@ -454,7 +454,8 @@ static void dlm_lsop_recover_prep(struct dlm_ls *ls)
ls->ls_ops->recover_prep(ls->ls_ops_arg);
}
static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb,
unsigned int release_recover)
{
struct dlm_slot slot;
uint32_t seq;
@ -469,9 +470,9 @@ static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
we consider the node to have failed (versus
being removed due to dlm_release_lockspace) */
error = dlm_comm_seq(memb->nodeid, &seq);
error = dlm_comm_seq(memb->nodeid, &seq, false);
if (!error && seq == memb->comm_seq)
if (!release_recover && !error && seq == memb->comm_seq)
return;
slot.nodeid = memb->nodeid;
@ -528,6 +529,7 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
struct dlm_member *memb, *safe;
struct dlm_config_node *node;
int i, error, neg = 0, low = -1;
unsigned int release_recover;
/* previously removed members that we've not finished removing need to
count as a negative change so the "neg" recovery steps will happen */
@ -541,11 +543,21 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
node = find_config_node(rv, memb->nodeid);
if (node && !node->new)
if (!node) {
log_error(ls, "remove member %d invalid",
memb->nodeid);
return -EFAULT;
}
if (!node->new && !node->gone)
continue;
if (!node) {
log_rinfo(ls, "remove member %d", memb->nodeid);
release_recover = 0;
if (node->gone) {
release_recover = node->release_recover;
log_rinfo(ls, "remove member %d%s", memb->nodeid,
release_recover ? " (release_recover)" : "");
} else {
/* removed and re-added */
log_rinfo(ls, "remove member %d comm_seq %u %u",
@ -555,13 +567,16 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
neg++;
list_move(&memb->list, &ls->ls_nodes_gone);
ls->ls_num_nodes--;
dlm_lsop_recover_slot(ls, memb);
dlm_lsop_recover_slot(ls, memb, release_recover);
}
/* add new members to ls_nodes */
for (i = 0; i < rv->nodes_count; i++) {
node = &rv->nodes[i];
if (node->gone)
continue;
if (dlm_is_member(ls, node->nodeid))
continue;
dlm_add_member(ls, node);

View File

@ -419,7 +419,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
dlm_put_lockspace(ls);
if (error)
dlm_release_lockspace(lockspace, 0);
dlm_release_lockspace(lockspace, DLM_RELEASE_NO_LOCKS);
else
error = ls->ls_device.minor;
@ -430,7 +430,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
{
dlm_lockspace_t *lockspace;
struct dlm_ls *ls;
int error, force = 0;
int error, force = DLM_RELEASE_NO_LOCKS;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@ -440,7 +440,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
return -ENOENT;
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
force = 2;
force = DLM_RELEASE_NORMAL;
lockspace = ls->ls_local_handle;
dlm_put_lockspace(ls);

View File

@ -640,7 +640,7 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
struct gfs2_bufdata *bd;
lock_buffer(bh);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
clear_buffer_dirty(bh);
bd = bh->b_private;
if (bd) {
@ -656,7 +656,7 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
}
@ -721,7 +721,7 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
* on dirty buffers like we used to here again.
*/
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
head = bh = page_buffers(page);
do {
if (atomic_read(&bh->b_count))
@ -753,12 +753,12 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bh = bh->b_this_page;
} while (bh != head);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
return try_to_free_buffers(page);
cannot_release:
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
return 0;
}

View File

@ -67,7 +67,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
struct buffer_head *bh;
const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
if (nr_revokes == 0)
@ -83,7 +83,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
if (gfs2_withdrawing(sdp))
gfs2_withdraw(sdp);
@ -115,10 +115,10 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
* If none of these conditions are true, our revokes are all
* flushed and we can return.
*/
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
have_revokes = !list_empty(&sdp->sd_log_revokes);
log_in_flight = atomic_read(&sdp->sd_log_in_flight);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
if (have_revokes)
goto flush;
if (log_in_flight)

View File

@ -1391,7 +1391,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table)
return 0;
fail_release:
dlm_release_lockspace(ls->ls_dlm, 2);
dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL);
fail_free:
free_recover_size(ls);
fail:
@ -1429,7 +1429,7 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
release:
down_write(&ls->ls_sem);
if (ls->ls_dlm) {
dlm_release_lockspace(ls->ls_dlm, 2);
dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL);
ls->ls_dlm = NULL;
}
up_write(&ls->ls_sem);

View File

@ -773,9 +773,9 @@ void gfs2_flush_revokes(struct gfs2_sbd *sdp)
/* number of revokes we still have room for */
unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
gfs2_ail1_empty(sdp, max_revokes);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
}
/**
@ -1055,7 +1055,7 @@ repeat:
goto out_withdraw;
lops_after_commit(sdp, tr);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
sdp->sd_log_blks_reserved = 0;
spin_lock(&sdp->sd_ail_lock);
@ -1064,7 +1064,7 @@ repeat:
tr = NULL;
}
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
@ -1152,7 +1152,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
unsigned int unused;
unsigned int maxres;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
if (sdp->sd_log_tr) {
gfs2_merge_trans(sdp, tr);
@ -1170,7 +1170,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_release(sdp, unused);
sdp->sd_log_blks_reserved = reserved;
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
}
/**

View File

@ -23,30 +23,6 @@
*/
#define GFS2_LOG_FLUSH_MIN_BLOCKS 4
/**
* gfs2_log_lock - acquire the right to mess with the log manager
* @sdp: the filesystem
*
*/
static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
__acquires(&sdp->sd_log_lock)
{
spin_lock(&sdp->sd_log_lock);
}
/**
* gfs2_log_unlock - release the right to mess with the log manager
* @sdp: the filesystem
*
*/
static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
__releases(&sdp->sd_log_lock)
{
spin_unlock(&sdp->sd_log_lock);
}
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);

View File

@ -659,19 +659,19 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
unsigned n;
__be64 *ptr;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
list_sort(NULL, blist, blocknr_cmp);
bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
while(total) {
num = total;
if (total > limit)
num = limit;
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
page = gfs2_get_log_desc(sdp,
is_databuf ? GFS2_LOG_DESC_JDATA :
GFS2_LOG_DESC_METADATA, num + 1, num);
ld = page_address(page);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
ptr = (__be64 *)(ld + 1);
n = 0;
@ -685,14 +685,14 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
break;
}
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
gfs2_log_write_page(sdp, page);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
n = 0;
list_for_each_entry_continue(bd2, blist, bd_list) {
get_bh(bd2->bd_bh);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
lock_buffer(bd2->bd_bh);
if (buffer_escaped(bd2->bd_bh)) {
@ -711,7 +711,7 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
} else {
gfs2_log_write_bh(sdp, bd2->bd_bh);
}
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
if (++n >= num)
break;
}
@ -719,7 +719,7 @@ static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
BUG_ON(total < num);
total -= num;
}
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
}
static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)

View File

@ -386,7 +386,7 @@ static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
struct buffer_head *bh;
u64 end = bstart + blen;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
@ -399,7 +399,7 @@ static void gfs2_ail1_wipe(struct gfs2_sbd *sdp, u64 bstart, u32 blen)
}
}
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
}
static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
@ -459,11 +459,11 @@ void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
}
if (bh) {
lock_buffer(bh);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
gfs2_remove_from_journal(bh, ty);
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
brelse(bh);
}

View File

@ -1374,6 +1374,35 @@ out:
return ret;
}
static int gfs2_truncate_inode_pages(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *mapping = &inode->i_data;
bool need_trans = gfs2_is_jdata(ip) && mapping->nrpages;
int ret;
/*
* Truncating a jdata inode address space may create revokes in
* truncate_inode_pages() -> gfs2_invalidate_folio() -> ... ->
* gfs2_remove_from_journal(), so we need a transaction here.
*
* FIXME: During a withdraw, no new transactions can be created.
* In that case, we skip the truncate, but that doesn't help because
* truncate_inode_pages_final() will then call gfs2_invalidate_folio()
* again, and outside of a transaction.
*/
if (need_trans) {
ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
if (ret)
return ret;
}
truncate_inode_pages(mapping, 0);
if (need_trans)
gfs2_trans_end(sdp);
return 0;
}
/*
* evict_linked_inode - evict an inode whose dinode has not been unlinked
* @inode: The inode to evict
@ -1409,7 +1438,9 @@ static int evict_linked_inode(struct inode *inode, struct gfs2_holder *gh)
gfs2_ail_flush(gl, 0);
clean:
truncate_inode_pages(&inode->i_data, 0);
ret = gfs2_truncate_inode_pages(inode);
if (ret)
return ret;
truncate_inode_pages(metamapping, 0);
return 0;
}

View File

@ -174,7 +174,6 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
INIT_LIST_HEAD(&bd->bd_list);
INIT_LIST_HEAD(&bd->bd_ail_st_list);
INIT_LIST_HEAD(&bd->bd_ail_gl_list);
bh->b_private = bd;
return bd;
}
@ -203,17 +202,20 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
set_bit(TR_TOUCHED, &tr->tr_flags);
goto out;
}
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
bd = bh->b_private;
if (bd == NULL) {
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
bd = gfs2_alloc_bufdata(gl, bh);
lock_buffer(bh);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
if (bh->b_private) {
kmem_cache_free(gfs2_bufdata_cachep, bd);
bd = bh->b_private;
} else {
bh->b_private = bd;
}
}
gfs2_assert(sdp, bd->bd_gl == gl);
set_bit(TR_TOUCHED, &tr->tr_flags);
@ -224,7 +226,7 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
tr->tr_num_databuf_new++;
list_add_tail(&bd->bd_list, &tr->tr_databuf);
}
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
out:
unlock_buffer(bh);
}
@ -244,19 +246,20 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
set_bit(TR_TOUCHED, &tr->tr_flags);
goto out;
}
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
bd = bh->b_private;
if (bd == NULL) {
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
lock_page(bh->b_page);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
unlock_page(bh->b_page);
bd = gfs2_alloc_bufdata(gl, bh);
lock_buffer(bh);
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
if (bh->b_private) {
kmem_cache_free(gfs2_bufdata_cachep, bd);
bd = bh->b_private;
} else {
bh->b_private = bd;
}
}
gfs2_assert(sdp, bd->bd_gl == gl);
set_bit(TR_TOUCHED, &tr->tr_flags);
@ -287,7 +290,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
list_add(&bd->bd_list, &tr->tr_buf);
tr->tr_num_buf_new++;
out_unlock:
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
if (withdraw)
gfs2_assert_withdraw(sdp, 0);
out:
@ -309,7 +312,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
struct gfs2_bufdata *bd, *tmp;
unsigned int n = len;
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_log_lock);
list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
list_del_init(&bd->bd_list);
@ -323,7 +326,7 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
break;
}
}
gfs2_log_unlock(sdp);
spin_unlock(&sdp->sd_log_lock);
}
void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)

View File

@ -981,7 +981,7 @@ static const struct dlm_lockspace_ops ocfs2_ls_ops = {
static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn)
{
version_unlock(conn);
dlm_release_lockspace(conn->cc_lockspace, 2);
dlm_release_lockspace(conn->cc_lockspace, DLM_RELEASE_NORMAL);
conn->cc_lockspace = NULL;
ocfs2_live_connection_drop(conn->cc_private);
conn->cc_private = NULL;

View File

@ -87,13 +87,44 @@ int dlm_new_lockspace(const char *name, const char *cluster,
const struct dlm_lockspace_ops *ops, void *ops_arg,
int *ops_result, dlm_lockspace_t **lockspace);
/*
* dlm_release_lockspace() release_option values:
*
* DLM_RELEASE_NO_LOCKS returns -EBUSY if any locks (lkb's)
* exist in the local lockspace.
*
* DLM_RELEASE_UNUSED previous value that is no longer used.
*
* DLM_RELEASE_NORMAL releases the lockspace regardless of any
* locks managed in the local lockspace.
*
* DLM_RELEASE_NO_EVENT release the lockspace regardless of any
* locks managed in the local lockspace, and does not submit
* a leave event to the cluster manager, so other nodes will
* not be notified that the node should be removed from the
* list of lockspace members.
*
* DLM_RELEASE_RECOVER like DLM_RELEASE_NORMAL, but the remaining
* nodes will handle the removal of the node as if the node
* had failed, e.g. the recover_slot() callback would be used.
*/
#define DLM_RELEASE_NO_LOCKS 0
#define DLM_RELEASE_UNUSED 1
#define DLM_RELEASE_NORMAL 2
#define DLM_RELEASE_NO_EVENT 3
#define DLM_RELEASE_RECOVER 4
#define __DLM_RELEASE_MAX DLM_RELEASE_RECOVER
/*
* dlm_release_lockspace
*
* Stop a lockspace.
*
* release_option: see DLM_RELEASE values above.
*/
int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
int dlm_release_lockspace(dlm_lockspace_t *lockspace,
unsigned int release_option);
/*
* dlm_lock

View File

@ -322,60 +322,90 @@ static int loopback_snd_timer_close_cable(struct loopback_pcm *dpcm)
return 0;
}
static bool is_access_interleaved(snd_pcm_access_t access)
{
switch (access) {
case SNDRV_PCM_ACCESS_MMAP_INTERLEAVED:
case SNDRV_PCM_ACCESS_RW_INTERLEAVED:
return true;
default:
return false;
}
};
static int loopback_check_format(struct loopback_cable *cable, int stream)
{
struct loopback_pcm *dpcm_play, *dpcm_capt;
struct snd_pcm_runtime *runtime, *cruntime;
struct loopback_setup *setup;
struct snd_card *card;
int check;
bool stop_capture = false;
unsigned long flags;
int ret = 0, check;
spin_lock_irqsave(&cable->lock, flags);
dpcm_play = cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
dpcm_capt = cable->streams[SNDRV_PCM_STREAM_CAPTURE];
if (cable->valid != CABLE_VALID_BOTH) {
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
goto __notify;
return 0;
}
runtime = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]->
substream->runtime;
cruntime = cable->streams[SNDRV_PCM_STREAM_CAPTURE]->
substream->runtime;
check = runtime->format != cruntime->format ||
if (stream == SNDRV_PCM_STREAM_CAPTURE || !dpcm_play)
goto unlock;
} else {
if (!dpcm_play || !dpcm_capt)
goto unlock_eio;
runtime = dpcm_play->substream->runtime;
cruntime = dpcm_capt->substream->runtime;
if (!runtime || !cruntime)
goto unlock_eio;
check = runtime->format != cruntime->format ||
runtime->rate != cruntime->rate ||
runtime->channels != cruntime->channels ||
runtime->access != cruntime->access;
if (!check)
return 0;
if (stream == SNDRV_PCM_STREAM_CAPTURE) {
return -EIO;
} else {
snd_pcm_stop(cable->streams[SNDRV_PCM_STREAM_CAPTURE]->
substream, SNDRV_PCM_STATE_DRAINING);
__notify:
runtime = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]->
substream->runtime;
setup = get_setup(cable->streams[SNDRV_PCM_STREAM_PLAYBACK]);
card = cable->streams[SNDRV_PCM_STREAM_PLAYBACK]->loopback->card;
if (setup->format != runtime->format) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->format_id);
setup->format = runtime->format;
}
if (setup->rate != runtime->rate) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->rate_id);
setup->rate = runtime->rate;
}
if (setup->channels != runtime->channels) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->channels_id);
setup->channels = runtime->channels;
}
if (setup->access != runtime->access) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->access_id);
setup->access = runtime->access;
}
is_access_interleaved(runtime->access) !=
is_access_interleaved(cruntime->access);
if (!check)
goto unlock;
if (stream == SNDRV_PCM_STREAM_CAPTURE)
goto unlock_eio;
else if (cruntime->state == SNDRV_PCM_STATE_RUNNING)
stop_capture = true;
}
setup = get_setup(dpcm_play);
card = dpcm_play->loopback->card;
runtime = dpcm_play->substream->runtime;
if (setup->format != runtime->format) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->format_id);
setup->format = runtime->format;
}
if (setup->rate != runtime->rate) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->rate_id);
setup->rate = runtime->rate;
}
if (setup->channels != runtime->channels) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->channels_id);
setup->channels = runtime->channels;
}
if (is_access_interleaved(setup->access) !=
is_access_interleaved(runtime->access)) {
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
&setup->access_id);
setup->access = runtime->access;
}
spin_unlock_irqrestore(&cable->lock, flags);
if (stop_capture)
snd_pcm_stop(dpcm_capt->substream, SNDRV_PCM_STATE_DRAINING);
return 0;
unlock_eio:
ret = -EIO;
unlock:
spin_unlock_irqrestore(&cable->lock, flags);
return ret;
}
static void loopback_active_notify(struct loopback_pcm *dpcm)
@ -584,8 +614,7 @@ static void copy_play_buf(struct loopback_pcm *play,
size = play->pcm_buffer_size - src_off;
if (dst_off + size > capt->pcm_buffer_size)
size = capt->pcm_buffer_size - dst_off;
if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
runtime->access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED)
if (!is_access_interleaved(runtime->access))
copy_play_buf_part_n(play, capt, size, src_off, dst_off);
else
memcpy(dst + dst_off, src + src_off, size);
@ -1544,8 +1573,7 @@ static int loopback_access_get(struct snd_kcontrol *kcontrol,
mutex_lock(&loopback->cable_lock);
access = loopback->setup[kcontrol->id.subdevice][kcontrol->id.device].access;
ucontrol->value.enumerated.item[0] = access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED ||
access == SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
ucontrol->value.enumerated.item[0] = !is_access_interleaved(access);
mutex_unlock(&loopback->cable_lock);
return 0;