Import of kernel-4.18.0-553.105.1.el8_10
This commit is contained in:
parent
0936fb7ecc
commit
b2073ebb54
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.104.1
|
||||
RHEL_RELEASE = 553.105.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -15,6 +15,7 @@ struct ipl_pl_hdr {
|
||||
#define IPL_PL_FLAG_IPLPS 0x80
|
||||
#define IPL_PL_FLAG_SIPL 0x40
|
||||
#define IPL_PL_FLAG_IPLSR 0x20
|
||||
#define IPL_PL_FLAG_SBP 0x10
|
||||
|
||||
/* IPL Parameter Block header */
|
||||
struct ipl_pb_hdr {
|
||||
|
||||
@ -255,6 +255,24 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
|
||||
sys_##_prefix##_##_name##_show, \
|
||||
sys_##_prefix##_##_name##_store)
|
||||
|
||||
#define DEFINE_IPL_ATTR_BOOTPROG_RW(_prefix, _name, _fmt_out, _fmt_in, _hdr, _value) \
|
||||
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, (unsigned long long) _value) \
|
||||
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, \
|
||||
const char *buf, size_t len) \
|
||||
{ \
|
||||
unsigned long long value; \
|
||||
if (sscanf(buf, _fmt_in, &value) != 1) \
|
||||
return -EINVAL; \
|
||||
(_value) = value; \
|
||||
(_hdr).flags &= ~IPL_PL_FLAG_SBP; \
|
||||
return len; \
|
||||
} \
|
||||
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
|
||||
__ATTR(_name, 0644, \
|
||||
sys_##_prefix##_##_name##_show, \
|
||||
sys_##_prefix##_##_name##_store)
|
||||
|
||||
#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
|
||||
IPL_ATTR_SHOW_FN(_prefix, _name, _fmt_out, _value) \
|
||||
static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
|
||||
@ -835,12 +853,13 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
|
||||
reipl_block_fcp->fcp.wwpn);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
|
||||
reipl_block_fcp->fcp.lun);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_fcp->fcp.bootprog);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
|
||||
reipl_block_fcp->fcp.br_lba);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
|
||||
reipl_block_fcp->fcp.devno);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_fcp->hdr,
|
||||
reipl_block_fcp->fcp.bootprog);
|
||||
|
||||
static void reipl_get_ascii_loadparm(char *loadparm,
|
||||
struct ipl_parameter_block *ibp)
|
||||
@ -993,10 +1012,11 @@ DEFINE_IPL_ATTR_RW(reipl_nvme, fid, "0x%08llx\n", "%llx\n",
|
||||
reipl_block_nvme->nvme.fid);
|
||||
DEFINE_IPL_ATTR_RW(reipl_nvme, nsid, "0x%08llx\n", "%llx\n",
|
||||
reipl_block_nvme->nvme.nsid);
|
||||
DEFINE_IPL_ATTR_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_nvme->nvme.bootprog);
|
||||
DEFINE_IPL_ATTR_RW(reipl_nvme, br_lba, "%lld\n", "%lld\n",
|
||||
reipl_block_nvme->nvme.br_lba);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_nvme, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_nvme->hdr,
|
||||
reipl_block_nvme->nvme.bootprog);
|
||||
|
||||
static struct attribute *reipl_nvme_attrs[] = {
|
||||
&sys_reipl_nvme_fid_attr.attr,
|
||||
@ -1123,8 +1143,9 @@ static struct bin_attribute *reipl_eckd_bin_attrs[] = {
|
||||
};
|
||||
|
||||
DEFINE_IPL_CCW_ATTR_RW(reipl_eckd, device, reipl_block_eckd->eckd);
|
||||
DEFINE_IPL_ATTR_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_eckd->eckd.bootprog);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(reipl_eckd, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_eckd->hdr,
|
||||
reipl_block_eckd->eckd.bootprog);
|
||||
|
||||
static struct attribute *reipl_eckd_attrs[] = {
|
||||
&sys_reipl_eckd_device_attr.attr,
|
||||
@ -1652,12 +1673,13 @@ DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
|
||||
dump_block_fcp->fcp.wwpn);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
|
||||
dump_block_fcp->fcp.lun);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
dump_block_fcp->fcp.bootprog);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
|
||||
dump_block_fcp->fcp.br_lba);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
|
||||
dump_block_fcp->fcp.devno);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
dump_block_fcp->hdr,
|
||||
dump_block_fcp->fcp.bootprog);
|
||||
|
||||
static struct attribute *dump_fcp_attrs[] = {
|
||||
&sys_dump_fcp_device_attr.attr,
|
||||
@ -1678,10 +1700,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, fid, "0x%08llx\n", "%llx\n",
|
||||
dump_block_nvme->nvme.fid);
|
||||
DEFINE_IPL_ATTR_RW(dump_nvme, nsid, "0x%08llx\n", "%llx\n",
|
||||
dump_block_nvme->nvme.nsid);
|
||||
DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
|
||||
dump_block_nvme->nvme.bootprog);
|
||||
DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n",
|
||||
dump_block_nvme->nvme.br_lba);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_nvme, bootprog, "%lld\n", "%llx\n",
|
||||
dump_block_nvme->hdr,
|
||||
dump_block_nvme->nvme.bootprog);
|
||||
|
||||
static struct attribute *dump_nvme_attrs[] = {
|
||||
&sys_dump_nvme_fid_attr.attr,
|
||||
@ -1698,8 +1721,9 @@ static struct attribute_group dump_nvme_attr_group = {
|
||||
|
||||
/* ECKD dump device attributes */
|
||||
DEFINE_IPL_CCW_ATTR_RW(dump_eckd, device, dump_block_eckd->eckd);
|
||||
DEFINE_IPL_ATTR_RW(dump_eckd, bootprog, "%lld\n", "%llx\n",
|
||||
dump_block_eckd->eckd.bootprog);
|
||||
DEFINE_IPL_ATTR_BOOTPROG_RW(dump_eckd, bootprog, "%lld\n", "%llx\n",
|
||||
dump_block_eckd->hdr,
|
||||
dump_block_eckd->eckd.bootprog);
|
||||
|
||||
IPL_ATTR_BR_CHR_SHOW_FN(dump, dump_block_eckd->eckd);
|
||||
IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd);
|
||||
|
||||
@ -169,6 +169,11 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
||||
image.height = vc->vc_font.height;
|
||||
image.depth = 1;
|
||||
|
||||
if (image.dy >= info->var.yres)
|
||||
return;
|
||||
|
||||
image.height = min(image.height, info->var.yres - image.dy);
|
||||
|
||||
if (attribute) {
|
||||
buf = kmalloc(cellsize, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
@ -182,6 +187,18 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
||||
cnt = count;
|
||||
|
||||
image.width = vc->vc_font.width * cnt;
|
||||
|
||||
if (image.dx >= info->var.xres)
|
||||
break;
|
||||
|
||||
if (image.dx + image.width > info->var.xres) {
|
||||
image.width = info->var.xres - image.dx;
|
||||
cnt = image.width / vc->vc_font.width;
|
||||
if (cnt == 0)
|
||||
break;
|
||||
image.width = cnt * vc->vc_font.width;
|
||||
}
|
||||
|
||||
pitch = DIV_ROUND_UP(image.width, 8) + scan_align;
|
||||
pitch &= ~scan_align;
|
||||
size = pitch * image.height + buf_align;
|
||||
|
||||
@ -1009,8 +1009,8 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
|
||||
}
|
||||
|
||||
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh);
|
||||
retry:
|
||||
if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
|
||||
retry:
|
||||
window_size -= fault_in_iov_iter_readable(from, window_size);
|
||||
if (!window_size) {
|
||||
ret = -EFAULT;
|
||||
|
||||
@ -568,6 +568,7 @@ enum {
|
||||
#define HCI_ERROR_CONNECTION_TIMEOUT 0x08
|
||||
#define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d
|
||||
#define HCI_ERROR_REJ_BAD_ADDR 0x0f
|
||||
#define HCI_ERROR_INVALID_PARAMETERS 0x12
|
||||
#define HCI_ERROR_REMOTE_USER_TERM 0x13
|
||||
#define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14
|
||||
#define HCI_ERROR_REMOTE_POWER_OFF 0x15
|
||||
|
||||
@ -295,6 +295,9 @@ struct adv_monitor {
|
||||
|
||||
#define HCI_MAX_SHORT_NAME_LENGTH 10
|
||||
|
||||
#define HCI_CONN_HANDLE_UNSET 0xffff
|
||||
#define HCI_CONN_HANDLE_MAX 0x0eff
|
||||
|
||||
/* Min encryption key size to match with SMP */
|
||||
#define HCI_MIN_ENC_KEY_SIZE 7
|
||||
|
||||
@ -693,6 +696,7 @@ struct hci_conn {
|
||||
unsigned long flags;
|
||||
|
||||
enum conn_reasons conn_reason;
|
||||
__u8 abort_reason;
|
||||
|
||||
__u32 clock;
|
||||
__u16 clock_accuracy;
|
||||
@ -712,7 +716,6 @@ struct hci_conn {
|
||||
struct delayed_work auto_accept_work;
|
||||
struct delayed_work idle_work;
|
||||
struct delayed_work le_conn_timeout;
|
||||
struct work_struct le_scan_cleanup;
|
||||
|
||||
struct device dev;
|
||||
struct dentry *debugfs;
|
||||
@ -895,6 +898,7 @@ enum {
|
||||
HCI_CONN_STK_ENCRYPT,
|
||||
HCI_CONN_AUTH_INITIATOR,
|
||||
HCI_CONN_DROP,
|
||||
HCI_CONN_CANCEL,
|
||||
HCI_CONN_PARAM_REMOVAL_PEND,
|
||||
HCI_CONN_NEW_LINK_KEY,
|
||||
HCI_CONN_SCANNING,
|
||||
@ -1147,7 +1151,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
|
||||
|
||||
void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
|
||||
|
||||
void hci_le_conn_failed(struct hci_conn *conn, u8 status);
|
||||
void hci_conn_failed(struct hci_conn *conn, u8 status);
|
||||
|
||||
/*
|
||||
* hci_conn_get() and hci_conn_put() are used to control the life-time of an
|
||||
@ -1860,6 +1864,7 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
|
||||
int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status);
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason);
|
||||
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
|
||||
u16 to_multiplier);
|
||||
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
|
||||
|
||||
@ -107,4 +107,6 @@ int hci_resume_sync(struct hci_dev *hdev);
|
||||
|
||||
struct hci_conn;
|
||||
|
||||
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason);
|
||||
|
||||
int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn);
|
||||
|
||||
@ -60,7 +60,7 @@ static const struct sco_param esco_param_msbc[] = {
|
||||
};
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||
static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_conn_params *params;
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
@ -80,9 +80,28 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||
|
||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
|
||||
bdaddr_type);
|
||||
if (!params || !params->explicit_connect)
|
||||
if (!params)
|
||||
return;
|
||||
|
||||
if (params->conn) {
|
||||
hci_conn_drop(params->conn);
|
||||
hci_conn_put(params->conn);
|
||||
params->conn = NULL;
|
||||
}
|
||||
|
||||
if (!params->explicit_connect)
|
||||
return;
|
||||
|
||||
/* If the status indicates successful cancellation of
|
||||
* the attempt (i.e. Unknown Connection Id) there's no point of
|
||||
* notifying failure since we'll go back to keep trying to
|
||||
* connect. The only exception is explicit connect requests
|
||||
* where a timeout + cancel does indicate an actual failure.
|
||||
*/
|
||||
if (status && status != HCI_ERROR_UNKNOWN_CONN_ID)
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
|
||||
/* The connection attempt was doing scan for new RPA, and is
|
||||
* in scan phase. If params are not associated with any other
|
||||
* autoconnect action, remove them completely. If they are, just unmark
|
||||
@ -142,57 +161,6 @@ static void hci_conn_cleanup(struct hci_conn *conn)
|
||||
hci_dev_put(hdev);
|
||||
}
|
||||
|
||||
static void le_scan_cleanup(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn,
|
||||
le_scan_cleanup);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_conn *c = NULL;
|
||||
|
||||
BT_DBG("%s hcon %p", hdev->name, conn);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Check that the hci_conn is still around */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
|
||||
if (c == conn)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (c == conn) {
|
||||
hci_connect_le_scan_cleanup(conn);
|
||||
hci_conn_cleanup(conn);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
hci_conn_put(conn);
|
||||
}
|
||||
|
||||
static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
||||
{
|
||||
BT_DBG("%s hcon %p", conn->hdev->name, conn);
|
||||
|
||||
/* We can't call hci_conn_del/hci_conn_cleanup here since that
|
||||
* could deadlock with another hci_conn_del() call that's holding
|
||||
* hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
|
||||
* Instead, grab temporary extra references to the hci_dev and
|
||||
* hci_conn and perform the necessary cleanup in a separate work
|
||||
* callback.
|
||||
*/
|
||||
|
||||
hci_dev_hold(conn->hdev);
|
||||
hci_conn_get(conn);
|
||||
|
||||
/* Even though we hold a reference to the hdev, many other
|
||||
* things might get cleaned up meanwhile, including the hdev's
|
||||
* own workqueue, so we can't use that for scheduling.
|
||||
*/
|
||||
schedule_work(&conn->le_scan_cleanup);
|
||||
}
|
||||
|
||||
static void hci_acl_create_connection(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
@ -580,13 +548,6 @@ static void hci_conn_timeout(struct work_struct *work)
|
||||
if (refcnt > 0)
|
||||
return;
|
||||
|
||||
/* LE connections in scanning state need special handling */
|
||||
if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
|
||||
test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
hci_connect_le_scan_remove(conn);
|
||||
return;
|
||||
}
|
||||
|
||||
hci_abort_conn(conn, hci_proto_disconn_ind(conn));
|
||||
}
|
||||
|
||||
@ -667,7 +628,9 @@ static void le_conn_timeout(struct work_struct *work)
|
||||
if (conn->role == HCI_ROLE_SLAVE) {
|
||||
/* Disable LE Advertising */
|
||||
le_disable_advertising(hdev);
|
||||
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
|
||||
hci_dev_lock(hdev);
|
||||
hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
|
||||
hci_dev_unlock(hdev);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -687,6 +650,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
|
||||
bacpy(&conn->dst, dst);
|
||||
bacpy(&conn->src, &hdev->bdaddr);
|
||||
conn->handle = HCI_CONN_HANDLE_UNSET;
|
||||
conn->hdev = hdev;
|
||||
conn->type = type;
|
||||
conn->role = role;
|
||||
@ -737,7 +701,6 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
|
||||
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
|
||||
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
|
||||
INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
|
||||
|
||||
atomic_set(&conn->refcnt, 0);
|
||||
|
||||
@ -868,40 +831,11 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
|
||||
EXPORT_SYMBOL(hci_get_route);
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
static void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_conn_params *params;
|
||||
|
||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
|
||||
conn->dst_type);
|
||||
if (params && params->conn) {
|
||||
hci_conn_drop(params->conn);
|
||||
hci_conn_put(params->conn);
|
||||
params->conn = NULL;
|
||||
}
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
|
||||
/* If the status indicates successful cancellation of
|
||||
* the attempt (i.e. Unknown Connection Id) there's no point of
|
||||
* notifying failure since we'll go back to keep trying to
|
||||
* connect. The only exception is explicit connect requests
|
||||
* where a timeout + cancel does indicate an actual failure.
|
||||
*/
|
||||
if (status != HCI_ERROR_UNKNOWN_CONN_ID ||
|
||||
(params && params->explicit_connect))
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
|
||||
hci_connect_cfm(conn, status);
|
||||
|
||||
hci_conn_del(conn);
|
||||
|
||||
/* Since we may have temporarily stopped the background scanning in
|
||||
* favor of connection establishment, we should restart it.
|
||||
*/
|
||||
hci_update_passive_scan(hdev);
|
||||
hci_connect_le_scan_cleanup(conn, status);
|
||||
|
||||
/* Enable advertising in case this was a failed connection
|
||||
* attempt as a peripheral.
|
||||
@ -909,23 +843,46 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
|
||||
hci_enable_advertising(hdev);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
void hci_conn_failed(struct hci_conn *conn, u8 status)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
switch (conn->type) {
|
||||
case LE_LINK:
|
||||
hci_le_conn_failed(conn, status);
|
||||
break;
|
||||
case ACL_LINK:
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
break;
|
||||
}
|
||||
|
||||
conn->state = BT_CLOSED;
|
||||
hci_connect_cfm(conn, status);
|
||||
hci_conn_del(conn);
|
||||
}
|
||||
|
||||
static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
|
||||
{
|
||||
struct hci_conn *conn = data;
|
||||
|
||||
bt_dev_dbg(hdev, "err %d", err);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (!err) {
|
||||
hci_connect_le_scan_cleanup(conn);
|
||||
hci_connect_le_scan_cleanup(conn, 0x00);
|
||||
goto done;
|
||||
}
|
||||
|
||||
bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
|
||||
|
||||
if (!conn)
|
||||
/* Check if connection is still pending */
|
||||
if (conn != hci_lookup_le_connect(hdev))
|
||||
goto done;
|
||||
|
||||
hci_le_conn_failed(conn, err);
|
||||
hci_conn_failed(conn, err);
|
||||
|
||||
done:
|
||||
hci_dev_unlock(hdev);
|
||||
@ -1783,3 +1740,47 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
||||
static int abort_conn_sync(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
u16 handle = PTR_ERR(data);
|
||||
|
||||
conn = hci_conn_hash_lookup_handle(hdev, handle);
|
||||
if (!conn)
|
||||
return 0;
|
||||
|
||||
return hci_abort_conn_sync(hdev, conn, conn->abort_reason);
|
||||
}
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
||||
{
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
|
||||
/* If abort_reason has already been set it means the connection is
|
||||
* already being aborted so don't attempt to overwrite it.
|
||||
*/
|
||||
if (conn->abort_reason)
|
||||
return 0;
|
||||
|
||||
bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason);
|
||||
|
||||
conn->abort_reason = reason;
|
||||
|
||||
/* If the connection is pending check the command opcode since that
|
||||
* might be blocking on hci_cmd_sync_work while waiting its respective
|
||||
* event so we need to hci_cmd_sync_cancel to cancel it.
|
||||
*/
|
||||
if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) {
|
||||
switch (hci_skb_event(hdev->sent_cmd)) {
|
||||
case HCI_EV_LE_CONN_COMPLETE:
|
||||
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
|
||||
case HCI_EVT_LE_CIS_ESTABLISHED:
|
||||
hci_cmd_sync_cancel(hdev, -ECANCELED);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
|
||||
NULL);
|
||||
}
|
||||
|
||||
@ -2873,16 +2873,6 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
|
||||
|
||||
conn->resp_addr_type = peer_addr_type;
|
||||
bacpy(&conn->resp_addr, peer_addr);
|
||||
|
||||
/* We don't want the connection attempt to stick around
|
||||
* indefinitely since LE doesn't have a page timeout concept
|
||||
* like BR/EDR. Set a timer for any connection that doesn't use
|
||||
* the accept list for connecting.
|
||||
*/
|
||||
if (filter_policy == HCI_LE_USE_PEER_ADDR)
|
||||
queue_delayed_work(conn->hdev->workqueue,
|
||||
&conn->le_conn_timeout,
|
||||
conn->conn_timeout);
|
||||
}
|
||||
|
||||
static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
|
||||
@ -2892,7 +2882,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_le_conn_failed function which is triggered by the HCI
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
@ -2917,7 +2907,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_le_conn_failed function which is triggered by the HCI
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
@ -3125,8 +3115,9 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
{
|
||||
struct hci_ev_conn_complete *ev = data;
|
||||
struct hci_conn *conn;
|
||||
u8 status = ev->status;
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
@ -3164,8 +3155,25 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
if (!ev->status) {
|
||||
/* The HCI_Connection_Complete event is only sent once per connection.
|
||||
* Processing it more than once per connection can corrupt kernel memory.
|
||||
*
|
||||
* As the connection handle is set here for the first time, it indicates
|
||||
* whether the connection is already set up.
|
||||
*/
|
||||
if (conn->handle != HCI_CONN_HANDLE_UNSET) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!status) {
|
||||
conn->handle = __le16_to_cpu(ev->handle);
|
||||
if (conn->handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
|
||||
conn->handle, HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (conn->type == ACL_LINK) {
|
||||
conn->state = BT_CONFIG;
|
||||
@ -3206,19 +3214,14 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
|
||||
&cp);
|
||||
}
|
||||
} else {
|
||||
conn->state = BT_CLOSED;
|
||||
if (conn->type == ACL_LINK)
|
||||
mgmt_connect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, ev->status);
|
||||
}
|
||||
|
||||
if (conn->type == ACL_LINK)
|
||||
hci_sco_setup(conn, ev->status);
|
||||
|
||||
if (ev->status) {
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
hci_conn_del(conn);
|
||||
done:
|
||||
if (status) {
|
||||
hci_conn_failed(conn, status);
|
||||
} else if (ev->link_type == SCO_LINK) {
|
||||
switch (conn->setting & SCO_AIRMODE_MASK) {
|
||||
case SCO_AIRMODE_CVSD:
|
||||
@ -3227,7 +3230,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
break;
|
||||
}
|
||||
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
hci_connect_cfm(conn, status);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -4698,8 +4701,22 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
{
|
||||
struct hci_ev_sync_conn_complete *ev = data;
|
||||
struct hci_conn *conn;
|
||||
u8 status = ev->status;
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
|
||||
switch (ev->link_type) {
|
||||
case SCO_LINK:
|
||||
case ESCO_LINK:
|
||||
break;
|
||||
default:
|
||||
/* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
|
||||
* for HCI_Synchronous_Connection_Complete is limited to
|
||||
* either SCO or eSCO
|
||||
*/
|
||||
bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
|
||||
return;
|
||||
}
|
||||
|
||||
bt_dev_dbg(hdev, "status 0x%2.2x", status);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
@ -4722,24 +4739,28 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
switch (ev->status) {
|
||||
/* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
|
||||
* Processing it more than once per connection can corrupt kernel memory.
|
||||
*
|
||||
* As the connection handle is set here for the first time, it indicates
|
||||
* whether the connection is already set up.
|
||||
*/
|
||||
if (conn->handle != HCI_CONN_HANDLE_UNSET) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case 0x00:
|
||||
/* The synchronous connection complete event should only be
|
||||
* sent once per new connection. Receiving a successful
|
||||
* complete event when the connection status is already
|
||||
* BT_CONNECTED means that the device is misbehaving and sent
|
||||
* multiple complete event packets for the same new connection.
|
||||
*
|
||||
* Registering the device more than once can corrupt kernel
|
||||
* memory, hence upon detecting this invalid event, we report
|
||||
* an error and ignore the packet.
|
||||
*/
|
||||
if (conn->state == BT_CONNECTED) {
|
||||
bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
|
||||
goto unlock;
|
||||
conn->handle = __le16_to_cpu(ev->handle);
|
||||
if (conn->handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
|
||||
conn->handle, HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
conn->state = BT_CLOSED;
|
||||
break;
|
||||
}
|
||||
|
||||
conn->handle = __le16_to_cpu(ev->handle);
|
||||
conn->state = BT_CONNECTED;
|
||||
conn->type = ev->link_type;
|
||||
|
||||
@ -4783,8 +4804,8 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
hci_connect_cfm(conn, ev->status);
|
||||
if (ev->status)
|
||||
hci_connect_cfm(conn, status);
|
||||
if (status)
|
||||
hci_conn_del(conn);
|
||||
|
||||
unlock:
|
||||
@ -5542,7 +5563,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
*/
|
||||
hci_dev_clear_flag(hdev, HCI_LE_ADV);
|
||||
|
||||
conn = hci_lookup_le_connect(hdev);
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
|
||||
if (!conn) {
|
||||
conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
|
||||
if (!conn) {
|
||||
@ -5576,6 +5597,17 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
cancel_delayed_work(&conn->le_conn_timeout);
|
||||
}
|
||||
|
||||
/* The HCI_LE_Connection_Complete event is only sent once per connection.
|
||||
* Processing it more than once per connection can corrupt kernel memory.
|
||||
*
|
||||
* As the connection handle is set here for the first time, it indicates
|
||||
* whether the connection is already set up.
|
||||
*/
|
||||
if (conn->handle != HCI_CONN_HANDLE_UNSET) {
|
||||
bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
|
||||
|
||||
/* Lookup the identity address from the stored connection
|
||||
@ -5595,8 +5627,22 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
|
||||
|
||||
conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
|
||||
|
||||
if (status) {
|
||||
hci_le_conn_failed(conn, status);
|
||||
if (handle > HCI_CONN_HANDLE_MAX) {
|
||||
bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
|
||||
HCI_CONN_HANDLE_MAX);
|
||||
status = HCI_ERROR_INVALID_PARAMETERS;
|
||||
}
|
||||
|
||||
/* All connection failure handling is taken care of by the
|
||||
* hci_conn_failed function which is triggered by the HCI
|
||||
* request completion callbacks used for connecting.
|
||||
*/
|
||||
if (status)
|
||||
goto unlock;
|
||||
|
||||
/* Drop the connection if it has been aborted */
|
||||
if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
|
||||
hci_conn_drop(conn);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
||||
@ -1953,99 +1953,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
{
|
||||
switch (conn->state) {
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
if (conn->type == AMP_LINK) {
|
||||
struct hci_cp_disconn_phy_link cp;
|
||||
|
||||
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
|
||||
cp.reason = reason;
|
||||
hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
|
||||
&cp);
|
||||
} else {
|
||||
struct hci_cp_disconnect dc;
|
||||
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = reason;
|
||||
hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
}
|
||||
|
||||
conn->state = BT_DISCONN;
|
||||
|
||||
break;
|
||||
case BT_CONNECT:
|
||||
if (conn->type == LE_LINK) {
|
||||
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
||||
break;
|
||||
hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
|
||||
0, NULL);
|
||||
} else if (conn->type == ACL_LINK) {
|
||||
if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
|
||||
break;
|
||||
hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
|
||||
6, &conn->dst);
|
||||
}
|
||||
break;
|
||||
case BT_CONNECT2:
|
||||
if (conn->type == ACL_LINK) {
|
||||
struct hci_cp_reject_conn_req rej;
|
||||
|
||||
bacpy(&rej.bdaddr, &conn->dst);
|
||||
rej.reason = reason;
|
||||
|
||||
hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
||||
struct hci_cp_reject_sync_conn_req rej;
|
||||
|
||||
bacpy(&rej.bdaddr, &conn->dst);
|
||||
|
||||
/* SCO rejection has its own limited set of
|
||||
* allowed error values (0x0D-0x0F) which isn't
|
||||
* compatible with most values passed to this
|
||||
* function. To be safe hard-code one of the
|
||||
* values that's suitable for SCO.
|
||||
*/
|
||||
rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
|
||||
|
||||
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
conn->state = BT_CLOSED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
if (status)
|
||||
bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status);
|
||||
}
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
||||
{
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
hci_req_init(&req, conn->hdev);
|
||||
|
||||
__hci_abort_conn(&req, conn, reason);
|
||||
|
||||
err = hci_req_run(&req, abort_conn_complete);
|
||||
if (err && err != -ENODATA) {
|
||||
bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int le_scan_disable(struct hci_request *req, unsigned long opt)
|
||||
{
|
||||
hci_req_add_le_scan_disable(req, false);
|
||||
|
||||
@ -120,7 +120,6 @@ void __hci_req_update_scan(struct hci_request *req);
|
||||
int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
||||
bool use_rpa, u8 *own_addr_type);
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason);
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason);
|
||||
|
||||
|
||||
@ -245,8 +245,9 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
|
||||
skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
|
||||
if (IS_ERR(skb)) {
|
||||
bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
|
||||
PTR_ERR(skb));
|
||||
if (!event)
|
||||
bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
|
||||
PTR_ERR(skb));
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
|
||||
@ -4368,19 +4369,27 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
}
|
||||
|
||||
static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
|
||||
struct hci_conn *conn)
|
||||
struct hci_conn *conn, u8 reason)
|
||||
{
|
||||
/* Return reason if scanning since the connection shall probably be
|
||||
* cleanup directly.
|
||||
*/
|
||||
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
||||
return reason;
|
||||
|
||||
if (conn->role == HCI_ROLE_SLAVE ||
|
||||
test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
|
||||
return 0;
|
||||
|
||||
return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
|
||||
6, &conn->dst, HCI_CMD_TIMEOUT);
|
||||
0, NULL, HCI_CMD_TIMEOUT);
|
||||
}
|
||||
|
||||
static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
|
||||
static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
{
|
||||
if (conn->type == LE_LINK)
|
||||
return hci_le_connect_cancel_sync(hdev, conn);
|
||||
return hci_le_connect_cancel_sync(hdev, conn, reason);
|
||||
|
||||
if (hdev->hci_ver < BLUETOOTH_VER_1_2)
|
||||
return 0;
|
||||
@ -4424,37 +4433,77 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
|
||||
}
|
||||
|
||||
static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
|
||||
{
|
||||
int err = 0;
|
||||
u16 handle = conn->handle;
|
||||
struct hci_conn *c;
|
||||
|
||||
switch (conn->state) {
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
return hci_disconnect_sync(hdev, conn, reason);
|
||||
case BT_CONNECT:
|
||||
return hci_connect_cancel_sync(hdev, conn);
|
||||
case BT_CONNECT2:
|
||||
return hci_reject_conn_sync(hdev, conn, reason);
|
||||
default:
|
||||
conn->state = BT_CLOSED;
|
||||
err = hci_disconnect_sync(hdev, conn, reason);
|
||||
break;
|
||||
case BT_CONNECT:
|
||||
err = hci_connect_cancel_sync(hdev, conn, reason);
|
||||
break;
|
||||
case BT_CONNECT2:
|
||||
err = hci_reject_conn_sync(hdev, conn, reason);
|
||||
break;
|
||||
default:
|
||||
hci_dev_lock(hdev);
|
||||
conn->state = BT_CLOSED;
|
||||
hci_disconn_cfm(conn, reason);
|
||||
hci_conn_del(conn);
|
||||
hci_dev_unlock(hdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Check if the connection hasn't been cleanup while waiting
|
||||
* commands to complete.
|
||||
*/
|
||||
c = hci_conn_hash_lookup_handle(hdev, handle);
|
||||
if (!c || c != conn) {
|
||||
err = 0;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Cleanup hci_conn object if it cannot be cancelled as it
|
||||
* likelly means the controller and host stack are out of sync
|
||||
* or in case of LE it was still scanning so it can be cleanup
|
||||
* safely.
|
||||
*/
|
||||
hci_conn_failed(conn, reason);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
|
||||
{
|
||||
struct hci_conn *conn, *tmp;
|
||||
int err;
|
||||
struct list_head *head = &hdev->conn_hash.list;
|
||||
struct hci_conn *conn;
|
||||
|
||||
list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
|
||||
err = hci_abort_conn_sync(hdev, conn, reason);
|
||||
if (err)
|
||||
return err;
|
||||
rcu_read_lock();
|
||||
while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
|
||||
/* Make sure the connection is not freed while unlocking */
|
||||
conn = hci_conn_get(conn);
|
||||
rcu_read_unlock();
|
||||
/* Disregard possible errors since hci_conn_del shall have been
|
||||
* called even in case of errors had occurred since it would
|
||||
* then cause hci_conn_failed to be called which calls
|
||||
* hci_conn_del internally.
|
||||
*/
|
||||
hci_abort_conn_sync(hdev, conn, reason);
|
||||
hci_conn_put(conn);
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This function perform power off HCI command sequence as follows:
|
||||
@ -5316,6 +5365,9 @@ int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
|
||||
conn->conn_timeout, NULL);
|
||||
|
||||
done:
|
||||
if (err == -ETIMEDOUT)
|
||||
hci_le_connect_cancel_sync(hdev, conn, 0x00);
|
||||
|
||||
/* Re-enable advertising after the connection attempt is finished. */
|
||||
hci_resume_advertising_sync(hdev);
|
||||
return err;
|
||||
|
||||
@ -2542,6 +2542,37 @@ static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
skip_sk);
|
||||
}
|
||||
|
||||
static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd = data;
|
||||
struct mgmt_cp_unpair_device *cp = cmd->param;
|
||||
|
||||
if (!err)
|
||||
device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
|
||||
|
||||
cmd->cmd_complete(cmd, err);
|
||||
mgmt_pending_free(cmd);
|
||||
}
|
||||
|
||||
static int unpair_device_sync(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd = data;
|
||||
struct mgmt_cp_unpair_device *cp = cmd->param;
|
||||
struct hci_conn *conn;
|
||||
|
||||
if (cp->addr.type == BDADDR_BREDR)
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
|
||||
&cp->addr.bdaddr);
|
||||
else
|
||||
conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
|
||||
le_addr_type(cp->addr.type));
|
||||
|
||||
if (!conn)
|
||||
return 0;
|
||||
|
||||
return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
|
||||
}
|
||||
|
||||
static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 len)
|
||||
{
|
||||
@ -2652,7 +2683,7 @@ done:
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
|
||||
cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
|
||||
sizeof(*cp));
|
||||
if (!cmd) {
|
||||
err = -ENOMEM;
|
||||
@ -2661,9 +2692,10 @@ done:
|
||||
|
||||
cmd->cmd_complete = addr_cmd_complete;
|
||||
|
||||
err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
|
||||
err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
|
||||
unpair_device_complete);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
mgmt_pending_free(cmd);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
@ -621,26 +621,26 @@ static int smc_clc_prfx_match6_rcu(struct net_device *dev,
|
||||
int smc_clc_prfx_match(struct socket *clcsock,
|
||||
struct smc_clc_msg_proposal_prefix *prop)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_get(clcsock->sk);
|
||||
struct net_device *dev;
|
||||
struct dst_entry *dst;
|
||||
int rc;
|
||||
|
||||
if (!dst) {
|
||||
rc = -ENOTCONN;
|
||||
rcu_read_lock();
|
||||
|
||||
dst = __sk_dst_get(clcsock->sk);
|
||||
dev = dst ? dst_dev_rcu(dst) : NULL;
|
||||
if (!dev) {
|
||||
rc = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
if (!dst->dev) {
|
||||
rc = -ENODEV;
|
||||
goto out_rel;
|
||||
}
|
||||
rcu_read_lock();
|
||||
|
||||
if (!prop->ipv6_prefixes_cnt)
|
||||
rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
|
||||
rc = smc_clc_prfx_match4_rcu(dev, prop);
|
||||
else
|
||||
rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
|
||||
rcu_read_unlock();
|
||||
out_rel:
|
||||
dst_release(dst);
|
||||
rc = smc_clc_prfx_match6_rcu(dev, prop);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user