Import of kernel-4.18.0-553.85.1.el8_10
This commit is contained in:
parent
641c751d02
commit
b8906f1e90
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 553.84.1
|
RHEL_RELEASE = 553.85.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# ZSTREAM
|
# ZSTREAM
|
||||||
|
|||||||
@ -1274,6 +1274,13 @@ nouveau_connector_create(struct drm_device *dev,
|
|||||||
int type, ret = 0;
|
int type, ret = 0;
|
||||||
bool dummy;
|
bool dummy;
|
||||||
|
|
||||||
|
// Don't attempt to create the connector if we wouldn't be able to actually use it.
|
||||||
|
if (!(disp->disp.conn_mask & BIT(index))) {
|
||||||
|
NV_DEBUG(drm, "conn %02x:%02x%02x: disabled by disp conn_mask (%08lx), skipping\n",
|
||||||
|
index, dcbe->location, dcbe->type, disp->disp.conn_mask);
|
||||||
|
return ERR_PTR(-EBUSY);
|
||||||
|
}
|
||||||
|
|
||||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||||
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
|
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
|
||||||
nv_connector = nouveau_connector(connector);
|
nv_connector = nouveau_connector(connector);
|
||||||
@ -1404,7 +1411,7 @@ nouveau_connector_create(struct drm_device *dev,
|
|||||||
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
|
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
|
||||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||||
|
|
||||||
if (nv_connector->dcb && (disp->disp.conn_mask & BIT(nv_connector->index))) {
|
if (nv_connector->dcb) {
|
||||||
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
|
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
|
||||||
&nv_connector->conn);
|
&nv_connector->conn);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|||||||
@ -51,6 +51,7 @@
|
|||||||
#define I40E_MAX_VEB 16
|
#define I40E_MAX_VEB 16
|
||||||
|
|
||||||
#define I40E_MAX_NUM_DESCRIPTORS 4096
|
#define I40E_MAX_NUM_DESCRIPTORS 4096
|
||||||
|
#define I40E_MAX_NUM_DESCRIPTORS_XL710 8160
|
||||||
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
|
#define I40E_MAX_CSR_SPACE (4 * 1024 * 1024 - 64 * 1024)
|
||||||
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
|
#define I40E_DEFAULT_NUM_DESCRIPTORS 512
|
||||||
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
|
#define I40E_REQ_DESCRIPTOR_MULTIPLE 32
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
/* Copyright(c) 2013 - 2018 Intel Corporation. */
|
/* Copyright(c) 2013 - 2018 Intel Corporation. */
|
||||||
|
|
||||||
#include "i40e_status.h"
|
|
||||||
#include "i40e_type.h"
|
#include "i40e_type.h"
|
||||||
#include "i40e_register.h"
|
#include "i40e_register.h"
|
||||||
#include "i40e_adminq.h"
|
#include "i40e_adminq.h"
|
||||||
@ -284,7 +283,7 @@ static int i40e_config_asq_regs(struct i40e_hw *hw)
|
|||||||
/* Check one register to verify that config was applied */
|
/* Check one register to verify that config was applied */
|
||||||
reg = rd32(hw, hw->aq.asq.bal);
|
reg = rd32(hw, hw->aq.asq.bal);
|
||||||
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
|
if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
|
||||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
ret_code = -EIO;
|
||||||
|
|
||||||
return ret_code;
|
return ret_code;
|
||||||
}
|
}
|
||||||
@ -316,7 +315,7 @@ static int i40e_config_arq_regs(struct i40e_hw *hw)
|
|||||||
/* Check one register to verify that config was applied */
|
/* Check one register to verify that config was applied */
|
||||||
reg = rd32(hw, hw->aq.arq.bal);
|
reg = rd32(hw, hw->aq.arq.bal);
|
||||||
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
|
if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
|
||||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
ret_code = -EIO;
|
||||||
|
|
||||||
return ret_code;
|
return ret_code;
|
||||||
}
|
}
|
||||||
@ -340,14 +339,14 @@ static int i40e_init_asq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
if (hw->aq.asq.count > 0) {
|
if (hw->aq.asq.count > 0) {
|
||||||
/* queue already initialized */
|
/* queue already initialized */
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* verify input for valid configuration */
|
/* verify input for valid configuration */
|
||||||
if ((hw->aq.num_asq_entries == 0) ||
|
if ((hw->aq.num_asq_entries == 0) ||
|
||||||
(hw->aq.asq_buf_size == 0)) {
|
(hw->aq.asq_buf_size == 0)) {
|
||||||
ret_code = I40E_ERR_CONFIG;
|
ret_code = -EIO;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,14 +398,14 @@ static int i40e_init_arq(struct i40e_hw *hw)
|
|||||||
|
|
||||||
if (hw->aq.arq.count > 0) {
|
if (hw->aq.arq.count > 0) {
|
||||||
/* queue already initialized */
|
/* queue already initialized */
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* verify input for valid configuration */
|
/* verify input for valid configuration */
|
||||||
if ((hw->aq.num_arq_entries == 0) ||
|
if ((hw->aq.num_arq_entries == 0) ||
|
||||||
(hw->aq.arq_buf_size == 0)) {
|
(hw->aq.arq_buf_size == 0)) {
|
||||||
ret_code = I40E_ERR_CONFIG;
|
ret_code = -EIO;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,7 +451,7 @@ static int i40e_shutdown_asq(struct i40e_hw *hw)
|
|||||||
mutex_lock(&hw->aq.asq_mutex);
|
mutex_lock(&hw->aq.asq_mutex);
|
||||||
|
|
||||||
if (hw->aq.asq.count == 0) {
|
if (hw->aq.asq.count == 0) {
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto shutdown_asq_out;
|
goto shutdown_asq_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -486,7 +485,7 @@ static int i40e_shutdown_arq(struct i40e_hw *hw)
|
|||||||
mutex_lock(&hw->aq.arq_mutex);
|
mutex_lock(&hw->aq.arq_mutex);
|
||||||
|
|
||||||
if (hw->aq.arq.count == 0) {
|
if (hw->aq.arq.count == 0) {
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto shutdown_arq_out;
|
goto shutdown_arq_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -594,7 +593,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
|
|||||||
(hw->aq.num_asq_entries == 0) ||
|
(hw->aq.num_asq_entries == 0) ||
|
||||||
(hw->aq.arq_buf_size == 0) ||
|
(hw->aq.arq_buf_size == 0) ||
|
||||||
(hw->aq.asq_buf_size == 0)) {
|
(hw->aq.asq_buf_size == 0)) {
|
||||||
ret_code = I40E_ERR_CONFIG;
|
ret_code = -EIO;
|
||||||
goto init_adminq_exit;
|
goto init_adminq_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -626,13 +625,13 @@ int i40e_init_adminq(struct i40e_hw *hw)
|
|||||||
&hw->aq.api_maj_ver,
|
&hw->aq.api_maj_ver,
|
||||||
&hw->aq.api_min_ver,
|
&hw->aq.api_min_ver,
|
||||||
NULL);
|
NULL);
|
||||||
if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
|
if (ret_code != -EIO)
|
||||||
break;
|
break;
|
||||||
retry++;
|
retry++;
|
||||||
msleep(100);
|
msleep(100);
|
||||||
i40e_resume_aq(hw);
|
i40e_resume_aq(hw);
|
||||||
} while (retry < 10);
|
} while (retry < 10);
|
||||||
if (ret_code != I40E_SUCCESS)
|
if (ret_code != 0)
|
||||||
goto init_adminq_free_arq;
|
goto init_adminq_free_arq;
|
||||||
|
|
||||||
/* Some features were introduced in different FW API version
|
/* Some features were introduced in different FW API version
|
||||||
@ -672,7 +671,7 @@ int i40e_init_adminq(struct i40e_hw *hw)
|
|||||||
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
|
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
|
||||||
|
|
||||||
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
|
||||||
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
|
ret_code = -EIO;
|
||||||
goto init_adminq_free_arq;
|
goto init_adminq_free_arq;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -799,7 +798,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
if (hw->aq.asq.count == 0) {
|
if (hw->aq.asq.count == 0) {
|
||||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: Admin queue not initialized.\n");
|
"AQTX: Admin queue not initialized.\n");
|
||||||
status = I40E_ERR_QUEUE_EMPTY;
|
status = -EIO;
|
||||||
goto asq_send_command_error;
|
goto asq_send_command_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -809,7 +808,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
if (val >= hw->aq.num_asq_entries) {
|
if (val >= hw->aq.num_asq_entries) {
|
||||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: head overrun at %d\n", val);
|
"AQTX: head overrun at %d\n", val);
|
||||||
status = I40E_ERR_ADMIN_QUEUE_FULL;
|
status = -ENOSPC;
|
||||||
goto asq_send_command_error;
|
goto asq_send_command_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -840,7 +839,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
I40E_DEBUG_AQ_MESSAGE,
|
I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: Invalid buffer size: %d.\n",
|
"AQTX: Invalid buffer size: %d.\n",
|
||||||
buff_size);
|
buff_size);
|
||||||
status = I40E_ERR_INVALID_SIZE;
|
status = -EINVAL;
|
||||||
goto asq_send_command_error;
|
goto asq_send_command_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -848,7 +847,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw,
|
i40e_debug(hw,
|
||||||
I40E_DEBUG_AQ_MESSAGE,
|
I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: Async flag not set along with postpone flag");
|
"AQTX: Async flag not set along with postpone flag");
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto asq_send_command_error;
|
goto asq_send_command_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -863,7 +862,7 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw,
|
i40e_debug(hw,
|
||||||
I40E_DEBUG_AQ_MESSAGE,
|
I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: Error queue is full.\n");
|
"AQTX: Error queue is full.\n");
|
||||||
status = I40E_ERR_ADMIN_QUEUE_FULL;
|
status = -ENOSPC;
|
||||||
goto asq_send_command_error;
|
goto asq_send_command_error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -940,9 +939,9 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
|
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
|
||||||
status = 0;
|
status = 0;
|
||||||
else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
|
else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
|
||||||
status = I40E_ERR_NOT_READY;
|
status = -EBUSY;
|
||||||
else
|
else
|
||||||
status = I40E_ERR_ADMIN_QUEUE_ERROR;
|
status = -EIO;
|
||||||
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
|
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -960,11 +959,11 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
|
|||||||
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
|
if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
|
||||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: AQ Critical error.\n");
|
"AQTX: AQ Critical error.\n");
|
||||||
status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
|
status = -EIO;
|
||||||
} else {
|
} else {
|
||||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQTX: Writeback timeout.\n");
|
"AQTX: Writeback timeout.\n");
|
||||||
status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
|
status = -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1106,7 +1105,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
|
|||||||
if (hw->aq.arq.count == 0) {
|
if (hw->aq.arq.count == 0) {
|
||||||
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQRX: Admin queue not initialized.\n");
|
"AQRX: Admin queue not initialized.\n");
|
||||||
ret_code = I40E_ERR_QUEUE_EMPTY;
|
ret_code = -EIO;
|
||||||
goto clean_arq_element_err;
|
goto clean_arq_element_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1114,7 +1113,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
|
|||||||
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
|
ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
|
||||||
if (ntu == ntc) {
|
if (ntu == ntc) {
|
||||||
/* nothing to do - shouldn't need to update ring's values */
|
/* nothing to do - shouldn't need to update ring's values */
|
||||||
ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
|
ret_code = -EALREADY;
|
||||||
goto clean_arq_element_out;
|
goto clean_arq_element_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1126,7 +1125,7 @@ int i40e_clean_arq_element(struct i40e_hw *hw,
|
|||||||
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
|
(enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
|
||||||
flags = le16_to_cpu(desc->flags);
|
flags = le16_to_cpu(desc->flags);
|
||||||
if (flags & I40E_AQ_FLAG_ERR) {
|
if (flags & I40E_AQ_FLAG_ERR) {
|
||||||
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
|
ret_code = -EIO;
|
||||||
i40e_debug(hw,
|
i40e_debug(hw,
|
||||||
I40E_DEBUG_AQ_MESSAGE,
|
I40E_DEBUG_AQ_MESSAGE,
|
||||||
"AQRX: Event received with error 0x%X.\n",
|
"AQRX: Event received with error 0x%X.\n",
|
||||||
|
|||||||
@ -5,7 +5,6 @@
|
|||||||
#define _I40E_ADMINQ_H_
|
#define _I40E_ADMINQ_H_
|
||||||
|
|
||||||
#include "i40e_osdep.h"
|
#include "i40e_osdep.h"
|
||||||
#include "i40e_status.h"
|
|
||||||
#include "i40e_adminq_cmd.h"
|
#include "i40e_adminq_cmd.h"
|
||||||
|
|
||||||
#define I40E_ADMINQ_DESC(R, i) \
|
#define I40E_ADMINQ_DESC(R, i) \
|
||||||
@ -116,10 +115,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
|
|||||||
-EFBIG, /* I40E_AQ_RC_EFBIG */
|
-EFBIG, /* I40E_AQ_RC_EFBIG */
|
||||||
};
|
};
|
||||||
|
|
||||||
/* aq_rc is invalid if AQ timed out */
|
|
||||||
if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
|
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
|
||||||
|
|||||||
@ -56,7 +56,7 @@ int i40e_set_mac_type(struct i40e_hw *hw)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
status = I40E_ERR_DEVICE_NOT_SUPPORTED;
|
status = -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
|
hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n",
|
||||||
@ -660,7 +660,7 @@ int i40e_init_shared_code(struct i40e_hw *hw)
|
|||||||
case I40E_MAC_X722:
|
case I40E_MAC_X722:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
hw->phy.get_link_info = true;
|
hw->phy.get_link_info = true;
|
||||||
@ -780,7 +780,7 @@ int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
|
|||||||
if (flags & I40E_AQC_PORT_ADDR_VALID)
|
if (flags & I40E_AQC_PORT_ADDR_VALID)
|
||||||
ether_addr_copy(mac_addr, addrs.port_mac);
|
ether_addr_copy(mac_addr, addrs.port_mac);
|
||||||
else
|
else
|
||||||
status = I40E_ERR_INVALID_MAC_ADDR;
|
status = -EINVAL;
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -858,7 +858,7 @@ int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
|
|||||||
pba_size--;
|
pba_size--;
|
||||||
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
|
if (pba_num_size < (((u32)pba_size * 2) + 1)) {
|
||||||
hw_dbg(hw, "Buffer too small for PBA data.\n");
|
hw_dbg(hw, "Buffer too small for PBA data.\n");
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < pba_size; i++) {
|
for (i = 0; i < pba_size; i++) {
|
||||||
@ -955,7 +955,7 @@ static int i40e_poll_globr(struct i40e_hw *hw,
|
|||||||
hw_dbg(hw, "Global reset failed.\n");
|
hw_dbg(hw, "Global reset failed.\n");
|
||||||
hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
|
hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg);
|
||||||
|
|
||||||
return I40E_ERR_RESET_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define I40E_PF_RESET_WAIT_COUNT_A0 200
|
#define I40E_PF_RESET_WAIT_COUNT_A0 200
|
||||||
@ -995,7 +995,7 @@ int i40e_pf_reset(struct i40e_hw *hw)
|
|||||||
}
|
}
|
||||||
if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
||||||
hw_dbg(hw, "Global reset polling failed to complete.\n");
|
hw_dbg(hw, "Global reset polling failed to complete.\n");
|
||||||
return I40E_ERR_RESET_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now Wait for the FW to be ready */
|
/* Now Wait for the FW to be ready */
|
||||||
@ -1014,7 +1014,7 @@ int i40e_pf_reset(struct i40e_hw *hw)
|
|||||||
I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
|
I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
|
||||||
hw_dbg(hw, "wait for FW Reset complete timedout\n");
|
hw_dbg(hw, "wait for FW Reset complete timedout\n");
|
||||||
hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
|
hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg);
|
||||||
return I40E_ERR_RESET_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If there was a Global Reset in progress when we got here,
|
/* If there was a Global Reset in progress when we got here,
|
||||||
@ -1040,10 +1040,10 @@ int i40e_pf_reset(struct i40e_hw *hw)
|
|||||||
}
|
}
|
||||||
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
|
||||||
if (i40e_poll_globr(hw, grst_del))
|
if (i40e_poll_globr(hw, grst_del))
|
||||||
return I40E_ERR_RESET_FAILED;
|
return -EIO;
|
||||||
} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
|
} else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
|
||||||
hw_dbg(hw, "PF reset polling failed to complete.\n");
|
hw_dbg(hw, "PF reset polling failed to complete.\n");
|
||||||
return I40E_ERR_RESET_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1319,7 +1319,7 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!abilities)
|
if (!abilities)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
@ -1342,12 +1342,12 @@ i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
|
|||||||
|
|
||||||
switch (hw->aq.asq_last_status) {
|
switch (hw->aq.asq_last_status) {
|
||||||
case I40E_AQ_RC_EIO:
|
case I40E_AQ_RC_EIO:
|
||||||
status = I40E_ERR_UNKNOWN_PHY;
|
status = -EIO;
|
||||||
break;
|
break;
|
||||||
case I40E_AQ_RC_EAGAIN:
|
case I40E_AQ_RC_EAGAIN:
|
||||||
usleep_range(1000, 2000);
|
usleep_range(1000, 2000);
|
||||||
total_delay++;
|
total_delay++;
|
||||||
status = I40E_ERR_TIMEOUT;
|
status = -EIO;
|
||||||
break;
|
break;
|
||||||
/* also covers I40E_AQ_RC_OK */
|
/* also covers I40E_AQ_RC_OK */
|
||||||
default:
|
default:
|
||||||
@ -1397,7 +1397,7 @@ int i40e_aq_set_phy_config(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!config)
|
if (!config)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
i40e_aqc_opc_set_phy_config);
|
i40e_aqc_opc_set_phy_config);
|
||||||
@ -2347,7 +2347,7 @@ int i40e_aq_send_driver_version(struct i40e_hw *hw,
|
|||||||
u16 len;
|
u16 len;
|
||||||
|
|
||||||
if (dv == NULL)
|
if (dv == NULL)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
|
||||||
|
|
||||||
@ -2465,7 +2465,7 @@ int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
|
|||||||
|
|
||||||
/* SEIDs need to either both be set or both be 0 for floating VEB */
|
/* SEIDs need to either both be set or both be 0 for floating VEB */
|
||||||
if (!!uplink_seid != !!downlink_seid)
|
if (!!uplink_seid != !!downlink_seid)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
|
||||||
|
|
||||||
@ -2520,7 +2520,7 @@ int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (veb_seid == 0)
|
if (veb_seid == 0)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
i40e_aqc_opc_get_veb_parameters);
|
i40e_aqc_opc_get_veb_parameters);
|
||||||
@ -2610,7 +2610,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
|
|||||||
u16 buf_size;
|
u16 buf_size;
|
||||||
|
|
||||||
if (count == 0 || !mv_list || !hw)
|
if (count == 0 || !mv_list || !hw)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
|
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
|
||||||
|
|
||||||
@ -2643,7 +2643,7 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
|
|||||||
u16 buf_size;
|
u16 buf_size;
|
||||||
|
|
||||||
if (count == 0 || !mv_list || !hw)
|
if (count == 0 || !mv_list || !hw)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
|
buf_size = i40e_prepare_add_macvlan(mv_list, &desc, count, seid);
|
||||||
|
|
||||||
@ -2673,7 +2673,7 @@ i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (count == 0 || !mv_list || !hw)
|
if (count == 0 || !mv_list || !hw)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
buf_size = count * sizeof(*mv_list);
|
buf_size = count * sizeof(*mv_list);
|
||||||
|
|
||||||
@ -2720,7 +2720,7 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
|
|||||||
u16 buf_size;
|
u16 buf_size;
|
||||||
|
|
||||||
if (count == 0 || !mv_list || !hw)
|
if (count == 0 || !mv_list || !hw)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
buf_size = count * sizeof(*mv_list);
|
buf_size = count * sizeof(*mv_list);
|
||||||
|
|
||||||
@ -2826,7 +2826,7 @@ int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
|
|||||||
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
|
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
|
||||||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
|
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
|
||||||
if (count == 0 || !mr_list)
|
if (count == 0 || !mr_list)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
|
return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid,
|
||||||
@ -2862,7 +2862,7 @@ int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
|
|||||||
* not matter.
|
* not matter.
|
||||||
*/
|
*/
|
||||||
if (count == 0 || !mr_list)
|
if (count == 0 || !mr_list)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
|
return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid,
|
||||||
@ -2927,7 +2927,7 @@ int i40e_aq_debug_read_register(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (reg_val == NULL)
|
if (reg_val == NULL)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
|
||||||
|
|
||||||
@ -3066,7 +3066,7 @@ int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
|
|||||||
|
|
||||||
/* In offset the highest byte must be zeroed. */
|
/* In offset the highest byte must be zeroed. */
|
||||||
if (offset & 0xFF000000) {
|
if (offset & 0xFF000000) {
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto i40e_aq_read_nvm_exit;
|
goto i40e_aq_read_nvm_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3111,7 +3111,7 @@ int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
|
|||||||
|
|
||||||
/* In offset the highest byte must be zeroed. */
|
/* In offset the highest byte must be zeroed. */
|
||||||
if (offset & 0xFF000000) {
|
if (offset & 0xFF000000) {
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto i40e_aq_erase_nvm_exit;
|
goto i40e_aq_erase_nvm_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3403,7 +3403,7 @@ int i40e_aq_discover_capabilities(struct i40e_hw *hw,
|
|||||||
|
|
||||||
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
|
if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
|
||||||
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
|
list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3451,7 +3451,7 @@ int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
|
|||||||
|
|
||||||
/* In offset the highest byte must be zeroed. */
|
/* In offset the highest byte must be zeroed. */
|
||||||
if (offset & 0xFF000000) {
|
if (offset & 0xFF000000) {
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto i40e_aq_update_nvm_exit;
|
goto i40e_aq_update_nvm_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3508,7 +3508,7 @@ int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
|
|||||||
I40E_AQ_NVM_REARRANGE_TO_STRUCT);
|
I40E_AQ_NVM_REARRANGE_TO_STRUCT);
|
||||||
|
|
||||||
if (!rearrange_nvm) {
|
if (!rearrange_nvm) {
|
||||||
status = I40E_ERR_PARAM;
|
status = -EINVAL;
|
||||||
goto i40e_aq_rearrange_nvm_exit;
|
goto i40e_aq_rearrange_nvm_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3545,7 +3545,7 @@ int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (buff_size == 0 || !buff)
|
if (buff_size == 0 || !buff)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
|
||||||
/* Indirect Command */
|
/* Indirect Command */
|
||||||
@ -3593,7 +3593,7 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
|
|||||||
|
|
||||||
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
|
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
|
||||||
if (buff_size == 0 || !buff)
|
if (buff_size == 0 || !buff)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
i40e_aqc_opc_lldp_set_local_mib);
|
i40e_aqc_opc_lldp_set_local_mib);
|
||||||
@ -3662,7 +3662,7 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
|
|||||||
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
|
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
|
||||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||||
"Restore LLDP not supported by current FW version.\n");
|
"Restore LLDP not supported by current FW version.\n");
|
||||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
|
||||||
@ -3764,7 +3764,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
|
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
|
||||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
return -ENODEV;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
i40e_aqc_opc_set_dcb_parameters);
|
i40e_aqc_opc_set_dcb_parameters);
|
||||||
@ -3795,7 +3795,7 @@ int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (buff_size == 0 || !buff)
|
if (buff_size == 0 || !buff)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
|
||||||
|
|
||||||
@ -3883,7 +3883,7 @@ int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (seid == 0)
|
if (seid == 0)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
|
||||||
|
|
||||||
@ -3957,7 +3957,7 @@ static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
|
|||||||
cmd_param_flag = false;
|
cmd_param_flag = false;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, opcode);
|
i40e_fill_default_direct_cmd_desc(&desc, opcode);
|
||||||
@ -4183,7 +4183,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
|
|||||||
fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
|
fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (settings->fcoe_cntx_num) {
|
switch (settings->fcoe_cntx_num) {
|
||||||
@ -4195,7 +4195,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
|
|||||||
fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
|
fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Validate PE settings passed */
|
/* Validate PE settings passed */
|
||||||
@ -4213,7 +4213,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
|
|||||||
case I40E_HASH_FILTER_SIZE_1M:
|
case I40E_HASH_FILTER_SIZE_1M:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (settings->pe_cntx_num) {
|
switch (settings->pe_cntx_num) {
|
||||||
@ -4229,7 +4229,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
|
|||||||
case I40E_DMA_CNTX_SIZE_256K:
|
case I40E_DMA_CNTX_SIZE_256K:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
|
/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
|
||||||
@ -4237,7 +4237,7 @@ i40e_validate_filter_settings(struct i40e_hw *hw,
|
|||||||
fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
|
fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
|
||||||
>> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
|
>> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
|
||||||
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
|
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
|
||||||
return I40E_ERR_INVALID_SIZE;
|
return -EINVAL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -4259,7 +4259,7 @@ int i40e_set_filter_control(struct i40e_hw *hw,
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
if (!settings)
|
if (!settings)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Validate the input settings */
|
/* Validate the input settings */
|
||||||
ret = i40e_validate_filter_settings(hw, settings);
|
ret = i40e_validate_filter_settings(hw, settings);
|
||||||
@ -4341,7 +4341,7 @@ int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (vsi_seid == 0)
|
if (vsi_seid == 0)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
if (is_add) {
|
if (is_add) {
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
@ -4416,7 +4416,7 @@ static int i40e_aq_alternate_read(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!reg_val0)
|
if (!reg_val0)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
|
||||||
cmd_resp->address0 = cpu_to_le32(reg_addr0);
|
cmd_resp->address0 = cpu_to_le32(reg_addr0);
|
||||||
@ -4552,7 +4552,7 @@ int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (buff_size == 0 || !buff)
|
if (buff_size == 0 || !buff)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc,
|
i40e_fill_default_direct_cmd_desc(&desc,
|
||||||
i40e_aqc_opc_debug_dump_internals);
|
i40e_aqc_opc_debug_dump_internals);
|
||||||
@ -4670,7 +4670,7 @@ int i40e_read_phy_register_clause22(struct i40e_hw *hw,
|
|||||||
u16 reg, u8 phy_addr, u16 *value)
|
u16 reg, u8 phy_addr, u16 *value)
|
||||||
{
|
{
|
||||||
u8 port_num = (u8)hw->func_caps.mdio_port_num;
|
u8 port_num = (u8)hw->func_caps.mdio_port_num;
|
||||||
int status = I40E_ERR_TIMEOUT;
|
int status = -EIO;
|
||||||
u32 command = 0;
|
u32 command = 0;
|
||||||
u16 retry = 1000;
|
u16 retry = 1000;
|
||||||
|
|
||||||
@ -4715,7 +4715,7 @@ int i40e_write_phy_register_clause22(struct i40e_hw *hw,
|
|||||||
u16 reg, u8 phy_addr, u16 value)
|
u16 reg, u8 phy_addr, u16 value)
|
||||||
{
|
{
|
||||||
u8 port_num = (u8)hw->func_caps.mdio_port_num;
|
u8 port_num = (u8)hw->func_caps.mdio_port_num;
|
||||||
int status = I40E_ERR_TIMEOUT;
|
int status = -EIO;
|
||||||
u32 command = 0;
|
u32 command = 0;
|
||||||
u16 retry = 1000;
|
u16 retry = 1000;
|
||||||
|
|
||||||
@ -4756,7 +4756,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
|
|||||||
u8 page, u16 reg, u8 phy_addr, u16 *value)
|
u8 page, u16 reg, u8 phy_addr, u16 *value)
|
||||||
{
|
{
|
||||||
u8 port_num = hw->func_caps.mdio_port_num;
|
u8 port_num = hw->func_caps.mdio_port_num;
|
||||||
int status = I40E_ERR_TIMEOUT;
|
int status = -EIO;
|
||||||
u32 command = 0;
|
u32 command = 0;
|
||||||
u16 retry = 1000;
|
u16 retry = 1000;
|
||||||
|
|
||||||
@ -4790,7 +4790,7 @@ int i40e_read_phy_register_clause45(struct i40e_hw *hw,
|
|||||||
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
|
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
|
||||||
(I40E_GLGEN_MSCA_MDICMD_MASK) |
|
(I40E_GLGEN_MSCA_MDICMD_MASK) |
|
||||||
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
|
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
|
||||||
status = I40E_ERR_TIMEOUT;
|
status = -EIO;
|
||||||
retry = 1000;
|
retry = 1000;
|
||||||
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
|
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
|
||||||
do {
|
do {
|
||||||
@ -4830,7 +4830,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw,
|
|||||||
u8 page, u16 reg, u8 phy_addr, u16 value)
|
u8 page, u16 reg, u8 phy_addr, u16 value)
|
||||||
{
|
{
|
||||||
u8 port_num = hw->func_caps.mdio_port_num;
|
u8 port_num = hw->func_caps.mdio_port_num;
|
||||||
int status = I40E_ERR_TIMEOUT;
|
int status = -EIO;
|
||||||
u16 retry = 1000;
|
u16 retry = 1000;
|
||||||
u32 command = 0;
|
u32 command = 0;
|
||||||
|
|
||||||
@ -4866,7 +4866,7 @@ int i40e_write_phy_register_clause45(struct i40e_hw *hw,
|
|||||||
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
|
(I40E_MDIO_CLAUSE45_STCODE_MASK) |
|
||||||
(I40E_GLGEN_MSCA_MDICMD_MASK) |
|
(I40E_GLGEN_MSCA_MDICMD_MASK) |
|
||||||
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
|
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
|
||||||
status = I40E_ERR_TIMEOUT;
|
status = -EIO;
|
||||||
retry = 1000;
|
retry = 1000;
|
||||||
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
|
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
|
||||||
do {
|
do {
|
||||||
@ -4915,7 +4915,7 @@ int i40e_write_phy_register(struct i40e_hw *hw,
|
|||||||
phy_addr, value);
|
phy_addr, value);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
status = I40E_ERR_UNKNOWN_PHY;
|
status = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4954,7 +4954,7 @@ int i40e_read_phy_register(struct i40e_hw *hw,
|
|||||||
phy_addr, value);
|
phy_addr, value);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
status = I40E_ERR_UNKNOWN_PHY;
|
status = -EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5144,7 +5144,7 @@ int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
|
|||||||
I40E_PHY_COM_REG_PAGE, true,
|
I40E_PHY_COM_REG_PAGE, true,
|
||||||
I40E_PHY_LED_PROV_REG_1,
|
I40E_PHY_LED_PROV_REG_1,
|
||||||
®_val_aq, NULL);
|
®_val_aq, NULL);
|
||||||
if (status == I40E_SUCCESS)
|
if (status == 0)
|
||||||
*val = (u16)reg_val_aq;
|
*val = (u16)reg_val_aq;
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
@ -5239,7 +5239,7 @@ int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
|
|||||||
int status;
|
int status;
|
||||||
|
|
||||||
if (!reg_val)
|
if (!reg_val)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
|
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
|
||||||
|
|
||||||
@ -5679,7 +5679,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
|
|||||||
|
|
||||||
if (track_id == I40E_DDP_TRACKID_INVALID) {
|
if (track_id == I40E_DDP_TRACKID_INVALID) {
|
||||||
i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
|
i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_cnt = profile->device_table_count;
|
dev_cnt = profile->device_table_count;
|
||||||
@ -5692,7 +5692,7 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
|
|||||||
if (dev_cnt && i == dev_cnt) {
|
if (dev_cnt && i == dev_cnt) {
|
||||||
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
||||||
"Device doesn't support DDP\n");
|
"Device doesn't support DDP\n");
|
||||||
return I40E_ERR_DEVICE_NOT_SUPPORTED;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
I40E_SECTION_TABLE(profile, sec_tbl);
|
I40E_SECTION_TABLE(profile, sec_tbl);
|
||||||
@ -5707,14 +5707,14 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
|
|||||||
sec->section.type == SECTION_TYPE_RB_AQ) {
|
sec->section.type == SECTION_TYPE_RB_AQ) {
|
||||||
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
||||||
"Not a roll-back package\n");
|
"Not a roll-back package\n");
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (sec->section.type == SECTION_TYPE_RB_AQ ||
|
if (sec->section.type == SECTION_TYPE_RB_AQ ||
|
||||||
sec->section.type == SECTION_TYPE_RB_MMIO) {
|
sec->section.type == SECTION_TYPE_RB_MMIO) {
|
||||||
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
i40e_debug(hw, I40E_DEBUG_PACKAGE,
|
||||||
"Not an original package\n");
|
"Not an original package\n");
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -17,7 +17,7 @@ int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
|
|||||||
u32 reg;
|
u32 reg;
|
||||||
|
|
||||||
if (!status)
|
if (!status)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
reg = rd32(hw, I40E_PRTDCB_GENS);
|
reg = rd32(hw, I40E_PRTDCB_GENS);
|
||||||
*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
|
*status = (u16)((reg & I40E_PRTDCB_GENS_DCBX_STATUS_MASK) >>
|
||||||
@ -508,7 +508,7 @@ int i40e_lldp_to_dcb_config(u8 *lldpmib,
|
|||||||
u16 type;
|
u16 type;
|
||||||
|
|
||||||
if (!lldpmib || !dcbcfg)
|
if (!lldpmib || !dcbcfg)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
/* set to the start of LLDPDU */
|
/* set to the start of LLDPDU */
|
||||||
lldpmib += ETH_HLEN;
|
lldpmib += ETH_HLEN;
|
||||||
@ -874,7 +874,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!hw->func_caps.dcb)
|
if (!hw->func_caps.dcb)
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* Read LLDP NVM area */
|
/* Read LLDP NVM area */
|
||||||
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
|
if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
|
||||||
@ -885,7 +885,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
|||||||
else if (hw->mac.type == I40E_MAC_X722)
|
else if (hw->mac.type == I40E_MAC_X722)
|
||||||
offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
|
offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
|
||||||
else
|
else
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ret = i40e_read_nvm_module_data(hw,
|
ret = i40e_read_nvm_module_data(hw,
|
||||||
I40E_SR_EMP_SR_SETTINGS_PTR,
|
I40E_SR_EMP_SR_SETTINGS_PTR,
|
||||||
@ -897,7 +897,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
|||||||
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
|
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
return I40E_ERR_NOT_READY;
|
return -EBUSY;
|
||||||
|
|
||||||
/* Get the LLDP AdminStatus for the current port */
|
/* Get the LLDP AdminStatus for the current port */
|
||||||
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
|
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
|
||||||
@ -906,7 +906,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
|||||||
/* LLDP agent disabled */
|
/* LLDP agent disabled */
|
||||||
if (!adminstatus) {
|
if (!adminstatus) {
|
||||||
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
|
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
|
||||||
return I40E_ERR_NOT_READY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get DCBX status */
|
/* Get DCBX status */
|
||||||
@ -922,7 +922,7 @@ int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
} else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
|
} else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
|
||||||
return I40E_ERR_NOT_READY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Configure the LLDP MIB change event */
|
/* Configure the LLDP MIB change event */
|
||||||
@ -949,7 +949,7 @@ i40e_get_fw_lldp_status(struct i40e_hw *hw,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!lldp_status)
|
if (!lldp_status)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
/* Allocate buffer for the LLDPDU */
|
/* Allocate buffer for the LLDPDU */
|
||||||
ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
|
ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
|
||||||
@ -1299,7 +1299,7 @@ int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
|
|||||||
sizeof(tlv->typelength) + length);
|
sizeof(tlv->typelength) + length);
|
||||||
} while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
|
} while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
|
||||||
*miblen = offset;
|
*miblen = offset;
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1957,7 +1957,7 @@ int i40e_read_lldp_cfg(struct i40e_hw *hw,
|
|||||||
u32 mem;
|
u32 mem;
|
||||||
|
|
||||||
if (!lldp_cfg)
|
if (!lldp_cfg)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
|
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@ -344,7 +344,7 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
|
|||||||
if (is_add) {
|
if (is_add) {
|
||||||
status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
|
status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
|
||||||
if (status) {
|
if (status) {
|
||||||
if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
|
if (status == -ENODEV) {
|
||||||
netdev_err(netdev,
|
netdev_err(netdev,
|
||||||
"Profile is not supported by the device.");
|
"Profile is not supported by the device.");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|||||||
@ -1310,7 +1310,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
|||||||
ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
|
ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
|
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
|
||||||
} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
|
} else if (ret == -EIO) {
|
||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"AQ command send failed Opcode %x AQ Error: %d\n",
|
"AQ command send failed Opcode %x AQ Error: %d\n",
|
||||||
desc->opcode, pf->hw.aq.asq_last_status);
|
desc->opcode, pf->hw.aq.asq_last_status);
|
||||||
@ -1371,7 +1371,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
|||||||
buffer_len, NULL);
|
buffer_len, NULL);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
|
dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n");
|
||||||
} else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) {
|
} else if (ret == -EIO) {
|
||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"AQ command send failed Opcode %x AQ Error: %d\n",
|
"AQ command send failed Opcode %x AQ Error: %d\n",
|
||||||
desc->opcode, pf->hw.aq.asq_last_status);
|
desc->opcode, pf->hw.aq.asq_last_status);
|
||||||
|
|||||||
@ -28,7 +28,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_DIAG,
|
i40e_debug(hw, I40E_DEBUG_DIAG,
|
||||||
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
|
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n",
|
||||||
__func__, reg, pat, val);
|
__func__, reg, pat, val);
|
||||||
return I40E_ERR_DIAG_TEST_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -38,7 +38,7 @@ static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_DIAG,
|
i40e_debug(hw, I40E_DEBUG_DIAG,
|
||||||
"%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
|
"%s: reg restore test failed - reg 0x%08x orig_val 0x%08x val 0x%08x\n",
|
||||||
__func__, reg, orig_val, val);
|
__func__, reg, orig_val, val);
|
||||||
return I40E_ERR_DIAG_TEST_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -126,5 +126,5 @@ int i40e_diag_eeprom_test(struct i40e_hw *hw)
|
|||||||
BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
|
BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
|
||||||
return i40e_validate_nvm_checksum(hw, NULL);
|
return i40e_validate_nvm_checksum(hw, NULL);
|
||||||
else
|
else
|
||||||
return I40E_ERR_DIAG_TEST_FAILED;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2013,6 +2013,18 @@ static void i40e_get_drvinfo(struct net_device *netdev,
|
|||||||
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
|
drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 i40e_get_max_num_descriptors(struct i40e_pf *pf)
|
||||||
|
{
|
||||||
|
struct i40e_hw *hw = &pf->hw;
|
||||||
|
|
||||||
|
switch (hw->mac.type) {
|
||||||
|
case I40E_MAC_XL710:
|
||||||
|
return I40E_MAX_NUM_DESCRIPTORS_XL710;
|
||||||
|
default:
|
||||||
|
return I40E_MAX_NUM_DESCRIPTORS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void i40e_get_ringparam(struct net_device *netdev,
|
static void i40e_get_ringparam(struct net_device *netdev,
|
||||||
struct ethtool_ringparam *ring,
|
struct ethtool_ringparam *ring,
|
||||||
struct kernel_ethtool_ringparam *kernel_ring,
|
struct kernel_ethtool_ringparam *kernel_ring,
|
||||||
@ -2022,8 +2034,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
|
|||||||
struct i40e_pf *pf = np->vsi->back;
|
struct i40e_pf *pf = np->vsi->back;
|
||||||
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
|
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
|
||||||
|
|
||||||
ring->rx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
|
ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
|
||||||
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
|
ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
|
||||||
ring->rx_mini_max_pending = 0;
|
ring->rx_mini_max_pending = 0;
|
||||||
ring->rx_jumbo_max_pending = 0;
|
ring->rx_jumbo_max_pending = 0;
|
||||||
ring->rx_pending = vsi->rx_rings[0]->count;
|
ring->rx_pending = vsi->rx_rings[0]->count;
|
||||||
@ -2048,12 +2060,12 @@ static int i40e_set_ringparam(struct net_device *netdev,
|
|||||||
struct kernel_ethtool_ringparam *kernel_ring,
|
struct kernel_ethtool_ringparam *kernel_ring,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
{
|
{
|
||||||
|
u32 new_rx_count, new_tx_count, max_num_descriptors;
|
||||||
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
|
struct i40e_ring *tx_rings = NULL, *rx_rings = NULL;
|
||||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||||
struct i40e_hw *hw = &np->vsi->back->hw;
|
struct i40e_hw *hw = &np->vsi->back->hw;
|
||||||
struct i40e_vsi *vsi = np->vsi;
|
struct i40e_vsi *vsi = np->vsi;
|
||||||
struct i40e_pf *pf = vsi->back;
|
struct i40e_pf *pf = vsi->back;
|
||||||
u32 new_rx_count, new_tx_count;
|
|
||||||
u16 tx_alloc_queue_pairs;
|
u16 tx_alloc_queue_pairs;
|
||||||
int timeout = 50;
|
int timeout = 50;
|
||||||
int i, err = 0;
|
int i, err = 0;
|
||||||
@ -2061,14 +2073,15 @@ static int i40e_set_ringparam(struct net_device *netdev,
|
|||||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (ring->tx_pending > I40E_MAX_NUM_DESCRIPTORS ||
|
max_num_descriptors = i40e_get_max_num_descriptors(pf);
|
||||||
|
if (ring->tx_pending > max_num_descriptors ||
|
||||||
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
|
ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS ||
|
||||||
ring->rx_pending > I40E_MAX_NUM_DESCRIPTORS ||
|
ring->rx_pending > max_num_descriptors ||
|
||||||
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
|
ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) {
|
||||||
netdev_info(netdev,
|
netdev_info(netdev,
|
||||||
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
|
"Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n",
|
||||||
ring->tx_pending, ring->rx_pending,
|
ring->tx_pending, ring->rx_pending,
|
||||||
I40E_MIN_NUM_DESCRIPTORS, I40E_MAX_NUM_DESCRIPTORS);
|
I40E_MIN_NUM_DESCRIPTORS, max_num_descriptors);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5692,8 +5705,8 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
|
|||||||
struct i40e_vsi *vsi = np->vsi;
|
struct i40e_vsi *vsi = np->vsi;
|
||||||
struct i40e_pf *pf = vsi->back;
|
struct i40e_pf *pf = vsi->back;
|
||||||
struct i40e_hw *hw = &pf->hw;
|
struct i40e_hw *hw = &pf->hw;
|
||||||
int status = I40E_SUCCESS;
|
|
||||||
__le16 eee_capability;
|
__le16 eee_capability;
|
||||||
|
int status = 0;
|
||||||
|
|
||||||
/* Deny parameters we don't support */
|
/* Deny parameters we don't support */
|
||||||
if (i40e_is_eee_param_supported(netdev, edata))
|
if (i40e_is_eee_param_supported(netdev, edata))
|
||||||
|
|||||||
@ -4,7 +4,6 @@
|
|||||||
#include "i40e.h"
|
#include "i40e.h"
|
||||||
#include "i40e_osdep.h"
|
#include "i40e_osdep.h"
|
||||||
#include "i40e_register.h"
|
#include "i40e_register.h"
|
||||||
#include "i40e_status.h"
|
|
||||||
#include "i40e_alloc.h"
|
#include "i40e_alloc.h"
|
||||||
#include "i40e_hmc.h"
|
#include "i40e_hmc.h"
|
||||||
#include "i40e_type.h"
|
#include "i40e_type.h"
|
||||||
@ -26,18 +25,18 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
|
|||||||
enum i40e_memory_type mem_type __attribute__((unused));
|
enum i40e_memory_type mem_type __attribute__((unused));
|
||||||
struct i40e_hmc_sd_entry *sd_entry;
|
struct i40e_hmc_sd_entry *sd_entry;
|
||||||
bool dma_mem_alloc_done = false;
|
bool dma_mem_alloc_done = false;
|
||||||
int ret_code = I40E_SUCCESS;
|
|
||||||
struct i40e_dma_mem mem;
|
struct i40e_dma_mem mem;
|
||||||
|
int ret_code = 0;
|
||||||
u64 alloc_len;
|
u64 alloc_len;
|
||||||
|
|
||||||
if (NULL == hmc_info->sd_table.sd_entry) {
|
if (NULL == hmc_info->sd_table.sd_entry) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
|
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_entry\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sd_index >= hmc_info->sd_table.sd_cnt) {
|
if (sd_index >= hmc_info->sd_table.sd_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_SD_INDEX;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
|
hw_dbg(hw, "i40e_add_sd_table_entry: bad sd_index\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -121,7 +120,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw,
|
|||||||
u64 *pd_addr;
|
u64 *pd_addr;
|
||||||
|
|
||||||
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
|
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
|
hw_dbg(hw, "i40e_add_pd_table_entry: bad pd_index\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -200,13 +199,13 @@ int i40e_remove_pd_bp(struct i40e_hw *hw,
|
|||||||
sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
|
sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
|
||||||
rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
|
rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
|
||||||
if (sd_idx >= hmc_info->sd_table.sd_cnt) {
|
if (sd_idx >= hmc_info->sd_table.sd_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
|
hw_dbg(hw, "i40e_remove_pd_bp: bad idx\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
|
sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
|
||||||
if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
|
if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
|
||||||
ret_code = I40E_ERR_INVALID_SD_TYPE;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
|
hw_dbg(hw, "i40e_remove_pd_bp: wrong sd_entry type\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -251,7 +250,7 @@ int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
|
|||||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||||
I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
|
I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
|
||||||
if (sd_entry->u.bp.ref_cnt) {
|
if (sd_entry->u.bp.ref_cnt) {
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
|
I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
|
||||||
@ -276,7 +275,7 @@ int i40e_remove_sd_bp_new(struct i40e_hw *hw,
|
|||||||
struct i40e_hmc_sd_entry *sd_entry;
|
struct i40e_hmc_sd_entry *sd_entry;
|
||||||
|
|
||||||
if (!is_pf)
|
if (!is_pf)
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
/* get the entry and decrease its ref counter */
|
/* get the entry and decrease its ref counter */
|
||||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||||
@ -299,7 +298,7 @@ int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
|
|||||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||||
|
|
||||||
if (sd_entry->u.pd_table.ref_cnt) {
|
if (sd_entry->u.pd_table.ref_cnt) {
|
||||||
ret_code = I40E_ERR_NOT_READY;
|
ret_code = -EBUSY;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,7 +324,7 @@ int i40e_remove_pd_page_new(struct i40e_hw *hw,
|
|||||||
struct i40e_hmc_sd_entry *sd_entry;
|
struct i40e_hmc_sd_entry *sd_entry;
|
||||||
|
|
||||||
if (!is_pf)
|
if (!is_pf)
|
||||||
return I40E_NOT_SUPPORTED;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
sd_entry = &hmc_info->sd_table.sd_entry[idx];
|
||||||
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
|
I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
|
||||||
|
|||||||
@ -111,7 +111,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||||||
|
|
||||||
/* validate values requested by driver don't exceed HMC capacity */
|
/* validate values requested by driver don't exceed HMC capacity */
|
||||||
if (txq_num > obj->max_cnt) {
|
if (txq_num > obj->max_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
hw_dbg(hw, "i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||||
txq_num, obj->max_cnt, ret_code);
|
txq_num, obj->max_cnt, ret_code);
|
||||||
goto init_lan_hmc_out;
|
goto init_lan_hmc_out;
|
||||||
@ -134,7 +134,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||||||
|
|
||||||
/* validate values requested by driver don't exceed HMC capacity */
|
/* validate values requested by driver don't exceed HMC capacity */
|
||||||
if (rxq_num > obj->max_cnt) {
|
if (rxq_num > obj->max_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
hw_dbg(hw, "i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||||
rxq_num, obj->max_cnt, ret_code);
|
rxq_num, obj->max_cnt, ret_code);
|
||||||
goto init_lan_hmc_out;
|
goto init_lan_hmc_out;
|
||||||
@ -157,7 +157,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||||||
|
|
||||||
/* validate values requested by driver don't exceed HMC capacity */
|
/* validate values requested by driver don't exceed HMC capacity */
|
||||||
if (fcoe_cntx_num > obj->max_cnt) {
|
if (fcoe_cntx_num > obj->max_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
hw_dbg(hw, "i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||||
fcoe_cntx_num, obj->max_cnt, ret_code);
|
fcoe_cntx_num, obj->max_cnt, ret_code);
|
||||||
goto init_lan_hmc_out;
|
goto init_lan_hmc_out;
|
||||||
@ -180,7 +180,7 @@ int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
|
|||||||
|
|
||||||
/* validate values requested by driver don't exceed HMC capacity */
|
/* validate values requested by driver don't exceed HMC capacity */
|
||||||
if (fcoe_filt_num > obj->max_cnt) {
|
if (fcoe_filt_num > obj->max_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
hw_dbg(hw, "i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
|
||||||
fcoe_filt_num, obj->max_cnt, ret_code);
|
fcoe_filt_num, obj->max_cnt, ret_code);
|
||||||
goto init_lan_hmc_out;
|
goto init_lan_hmc_out;
|
||||||
@ -289,30 +289,30 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
u32 i, j;
|
u32 i, j;
|
||||||
|
|
||||||
if (NULL == info) {
|
if (NULL == info) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
|
hw_dbg(hw, "i40e_create_lan_hmc_object: bad info ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (NULL == info->hmc_info) {
|
if (NULL == info->hmc_info) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
|
hw_dbg(hw, "i40e_create_lan_hmc_object: bad hmc_info ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
|
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
|
hw_dbg(hw, "i40e_create_lan_hmc_object: bad signature\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
|
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if ((info->start_idx + info->count) >
|
if ((info->start_idx + info->count) >
|
||||||
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
|
hw_dbg(hw, "i40e_create_lan_hmc_object: returns error %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -324,7 +324,7 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
&sd_idx, &sd_lmt);
|
&sd_idx, &sd_lmt);
|
||||||
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
|
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
|
||||||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
|
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_SD_INDEX;
|
ret_code = -EINVAL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
/* find pd index */
|
/* find pd index */
|
||||||
@ -393,7 +393,7 @@ static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
j, sd_entry->entry_type);
|
j, sd_entry->entry_type);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret_code = I40E_ERR_INVALID_SD_TYPE;
|
ret_code = -EINVAL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -417,7 +417,7 @@ exit_sd_error:
|
|||||||
i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
|
i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ret_code = I40E_ERR_INVALID_SD_TYPE;
|
ret_code = -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
j--;
|
j--;
|
||||||
@ -474,7 +474,7 @@ try_type_paged:
|
|||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* unsupported type */
|
/* unsupported type */
|
||||||
ret_code = I40E_ERR_INVALID_SD_TYPE;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
|
hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
goto configure_lan_hmc_out;
|
goto configure_lan_hmc_out;
|
||||||
@ -530,34 +530,34 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
u32 i, j;
|
u32 i, j;
|
||||||
|
|
||||||
if (NULL == info) {
|
if (NULL == info) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
|
hw_dbg(hw, "i40e_delete_hmc_object: bad info ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (NULL == info->hmc_info) {
|
if (NULL == info->hmc_info) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
|
hw_dbg(hw, "i40e_delete_hmc_object: bad info->hmc_info ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
|
if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
|
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->signature\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL == info->hmc_info->sd_table.sd_entry) {
|
if (NULL == info->hmc_info->sd_table.sd_entry) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
|
hw_dbg(hw, "i40e_delete_hmc_object: bad sd_entry\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NULL == info->hmc_info->hmc_obj) {
|
if (NULL == info->hmc_info->hmc_obj) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
|
hw_dbg(hw, "i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
|
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -565,7 +565,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
|
|
||||||
if ((info->start_idx + info->count) >
|
if ((info->start_idx + info->count) >
|
||||||
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
|
hw_dbg(hw, "i40e_delete_hmc_object: returns error %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -599,7 +599,7 @@ static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
|
|||||||
&sd_idx, &sd_lmt);
|
&sd_idx, &sd_lmt);
|
||||||
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
|
if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
|
||||||
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
|
sd_lmt > info->hmc_info->sd_table.sd_cnt) {
|
||||||
ret_code = I40E_ERR_INVALID_SD_INDEX;
|
ret_code = -EINVAL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -987,29 +987,29 @@ int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
|
|||||||
int ret_code = 0;
|
int ret_code = 0;
|
||||||
|
|
||||||
if (NULL == hmc_info) {
|
if (NULL == hmc_info) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
|
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (NULL == hmc_info->hmc_obj) {
|
if (NULL == hmc_info->hmc_obj) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
|
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (NULL == object_base) {
|
if (NULL == object_base) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
|
hw_dbg(hw, "i40e_hmc_get_object_va: bad object_base ptr\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
|
if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
|
||||||
ret_code = I40E_ERR_BAD_PTR;
|
ret_code = -EINVAL;
|
||||||
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
|
hw_dbg(hw, "i40e_hmc_get_object_va: bad hmc_info->signature\n");
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
|
if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
|
||||||
hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
|
hw_dbg(hw, "i40e_hmc_get_object_va: returns error %d\n",
|
||||||
ret_code);
|
ret_code);
|
||||||
ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
|
ret_code = -EINVAL;
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
/* find sd index and limit */
|
/* find sd index and limit */
|
||||||
|
|||||||
@ -5725,7 +5725,7 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!vsi)
|
if (!vsi)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
pf = vsi->back;
|
pf = vsi->back;
|
||||||
hw = &pf->hw;
|
hw = &pf->hw;
|
||||||
|
|
||||||
@ -7169,7 +7169,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
|
|||||||
*/
|
*/
|
||||||
if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
|
if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
|
||||||
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
|
dev_info(&pf->pdev->dev, "DCB is not supported.\n");
|
||||||
err = I40E_NOT_SUPPORTED;
|
err = -EOPNOTSUPP;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
|
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
|
||||||
@ -7479,7 +7479,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
|
|||||||
if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
|
if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
|
||||||
non_zero_phy_type = true;
|
non_zero_phy_type = true;
|
||||||
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
|
else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
|
|
||||||
/* To force link we need to set bits for all supported PHY types,
|
/* To force link we need to set bits for all supported PHY types,
|
||||||
* but there are now more than 32, so we need to split the bitmap
|
* but there are now more than 32, so we need to split the bitmap
|
||||||
@ -7530,7 +7530,7 @@ static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
|
|||||||
|
|
||||||
i40e_aq_set_link_restart_an(hw, is_up, NULL);
|
i40e_aq_set_link_restart_an(hw, is_up, NULL);
|
||||||
|
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -8377,7 +8377,7 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
|
|||||||
};
|
};
|
||||||
|
|
||||||
if (filter->flags >= ARRAY_SIZE(flag_table))
|
if (filter->flags >= ARRAY_SIZE(flag_table))
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
|
|
||||||
memset(&cld_filter, 0, sizeof(cld_filter));
|
memset(&cld_filter, 0, sizeof(cld_filter));
|
||||||
|
|
||||||
@ -8591,7 +8591,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
|
dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
|
||||||
match.mask->dst);
|
match.mask->dst);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8601,7 +8601,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
|
dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
|
||||||
match.mask->src);
|
match.mask->src);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ether_addr_copy(filter->dst_mac, match.key->dst);
|
ether_addr_copy(filter->dst_mac, match.key->dst);
|
||||||
@ -8619,7 +8619,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
|
dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
|
||||||
match.mask->vlan_id);
|
match.mask->vlan_id);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8643,7 +8643,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
|
dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
|
||||||
&match.mask->dst);
|
&match.mask->dst);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8653,13 +8653,13 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
|
dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
|
||||||
&match.mask->src);
|
&match.mask->src);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
|
if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
|
||||||
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
|
dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
filter->dst_ipv4 = match.key->dst;
|
filter->dst_ipv4 = match.key->dst;
|
||||||
filter->src_ipv4 = match.key->src;
|
filter->src_ipv4 = match.key->src;
|
||||||
@ -8677,7 +8677,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
ipv6_addr_loopback(&match.key->src)) {
|
ipv6_addr_loopback(&match.key->src)) {
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"Bad ipv6, addr is LOOPBACK\n");
|
"Bad ipv6, addr is LOOPBACK\n");
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
if (!ipv6_addr_any(&match.mask->dst) ||
|
if (!ipv6_addr_any(&match.mask->dst) ||
|
||||||
!ipv6_addr_any(&match.mask->src))
|
!ipv6_addr_any(&match.mask->src))
|
||||||
@ -8699,7 +8699,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
|
dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
|
||||||
be16_to_cpu(match.mask->src));
|
be16_to_cpu(match.mask->src));
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8709,7 +8709,7 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
|
|||||||
} else {
|
} else {
|
||||||
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
|
dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
|
||||||
be16_to_cpu(match.mask->dst));
|
be16_to_cpu(match.mask->dst));
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -9917,11 +9917,11 @@ static void i40e_link_event(struct i40e_pf *pf)
|
|||||||
status = i40e_get_link_status(&pf->hw, &new_link);
|
status = i40e_get_link_status(&pf->hw, &new_link);
|
||||||
|
|
||||||
/* On success, disable temp link polling */
|
/* On success, disable temp link polling */
|
||||||
if (status == I40E_SUCCESS) {
|
if (status == 0) {
|
||||||
clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
|
clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
|
||||||
} else {
|
} else {
|
||||||
/* Enable link polling temporarily until i40e_get_link_status
|
/* Enable link polling temporarily until i40e_get_link_status
|
||||||
* returns I40E_SUCCESS
|
* returns 0
|
||||||
*/
|
*/
|
||||||
set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
|
set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
|
||||||
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
|
dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
|
||||||
@ -10175,7 +10175,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
|
|||||||
|
|
||||||
do {
|
do {
|
||||||
ret = i40e_clean_arq_element(hw, &event, &pending);
|
ret = i40e_clean_arq_element(hw, &event, &pending);
|
||||||
if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
|
if (ret == -EALREADY)
|
||||||
break;
|
break;
|
||||||
else if (ret) {
|
else if (ret) {
|
||||||
dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
|
dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
|
||||||
@ -12582,7 +12582,7 @@ int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
|
|||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"Commit BW only works on partition 1! This is partition %d",
|
"Commit BW only works on partition 1! This is partition %d",
|
||||||
pf->hw.partition_id);
|
pf->hw.partition_id);
|
||||||
ret = I40E_NOT_SUPPORTED;
|
ret = -EOPNOTSUPP;
|
||||||
goto bw_commit_out;
|
goto bw_commit_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -12664,10 +12664,10 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
|
|||||||
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
|
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
|
||||||
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
|
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
|
||||||
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
|
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
|
||||||
int read_status = I40E_SUCCESS;
|
|
||||||
u16 sr_emp_sr_settings_ptr = 0;
|
u16 sr_emp_sr_settings_ptr = 0;
|
||||||
u16 features_enable = 0;
|
u16 features_enable = 0;
|
||||||
u16 link_behavior = 0;
|
u16 link_behavior = 0;
|
||||||
|
int read_status = 0;
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
|
|
||||||
read_status = i40e_read_nvm_word(&pf->hw,
|
read_status = i40e_read_nvm_word(&pf->hw,
|
||||||
@ -15467,12 +15467,12 @@ static int i40e_pf_loop_reset(struct i40e_pf *pf)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = i40e_pf_reset(hw);
|
ret = i40e_pf_reset(hw);
|
||||||
while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
|
while (ret != 0 && time_before(jiffies, time_end)) {
|
||||||
usleep_range(10000, 20000);
|
usleep_range(10000, 20000);
|
||||||
ret = i40e_pf_reset(hw);
|
ret = i40e_pf_reset(hw);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret == I40E_SUCCESS)
|
if (ret == 0)
|
||||||
pf->pfr_count++;
|
pf->pfr_count++;
|
||||||
else
|
else
|
||||||
dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
|
dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
|
||||||
@ -15515,10 +15515,10 @@ static int i40e_handle_resets(struct i40e_pf *pf)
|
|||||||
const int pfr = i40e_pf_loop_reset(pf);
|
const int pfr = i40e_pf_loop_reset(pf);
|
||||||
const bool is_empr = i40e_check_fw_empr(pf);
|
const bool is_empr = i40e_check_fw_empr(pf);
|
||||||
|
|
||||||
if (is_empr || pfr != I40E_SUCCESS)
|
if (is_empr || pfr != 0)
|
||||||
dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
|
dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
|
||||||
|
|
||||||
return is_empr ? I40E_ERR_RESET_FAILED : pfr;
|
return is_empr ? -EIO : pfr;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -15811,7 +15811,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||||||
|
|
||||||
err = i40e_init_adminq(hw);
|
err = i40e_init_adminq(hw);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err == I40E_ERR_FIRMWARE_API_VERSION)
|
if (err == -EIO)
|
||||||
dev_info(&pdev->dev,
|
dev_info(&pdev->dev,
|
||||||
"The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
|
"The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
|
||||||
hw->aq.api_maj_ver,
|
hw->aq.api_maj_ver,
|
||||||
|
|||||||
@ -37,7 +37,7 @@ int i40e_init_nvm(struct i40e_hw *hw)
|
|||||||
nvm->blank_nvm_mode = false;
|
nvm->blank_nvm_mode = false;
|
||||||
} else { /* Blank programming mode */
|
} else { /* Blank programming mode */
|
||||||
nvm->blank_nvm_mode = true;
|
nvm->blank_nvm_mode = true;
|
||||||
ret_code = I40E_ERR_NVM_BLANK_MODE;
|
ret_code = -EIO;
|
||||||
i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
|
i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,8 +111,8 @@ i40e_i40e_acquire_nvm_exit:
|
|||||||
**/
|
**/
|
||||||
void i40e_release_nvm(struct i40e_hw *hw)
|
void i40e_release_nvm(struct i40e_hw *hw)
|
||||||
{
|
{
|
||||||
int ret_code = I40E_SUCCESS;
|
|
||||||
u32 total_delay = 0;
|
u32 total_delay = 0;
|
||||||
|
int ret_code = 0;
|
||||||
|
|
||||||
if (hw->nvm.blank_nvm_mode)
|
if (hw->nvm.blank_nvm_mode)
|
||||||
return;
|
return;
|
||||||
@ -122,7 +122,7 @@ void i40e_release_nvm(struct i40e_hw *hw)
|
|||||||
/* there are some rare cases when trying to release the resource
|
/* there are some rare cases when trying to release the resource
|
||||||
* results in an admin Q timeout, so handle them correctly
|
* results in an admin Q timeout, so handle them correctly
|
||||||
*/
|
*/
|
||||||
while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
|
while ((ret_code == -EIO) &&
|
||||||
(total_delay < hw->aq.asq_cmd_timeout)) {
|
(total_delay < hw->aq.asq_cmd_timeout)) {
|
||||||
usleep_range(1000, 2000);
|
usleep_range(1000, 2000);
|
||||||
ret_code = i40e_aq_release_resource(hw,
|
ret_code = i40e_aq_release_resource(hw,
|
||||||
@ -140,7 +140,7 @@ void i40e_release_nvm(struct i40e_hw *hw)
|
|||||||
**/
|
**/
|
||||||
static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
|
static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
|
||||||
{
|
{
|
||||||
int ret_code = I40E_ERR_TIMEOUT;
|
int ret_code = -EIO;
|
||||||
u32 srctl, wait_cnt;
|
u32 srctl, wait_cnt;
|
||||||
|
|
||||||
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
|
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
|
||||||
@ -152,7 +152,7 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
|
|||||||
}
|
}
|
||||||
udelay(5);
|
udelay(5);
|
||||||
}
|
}
|
||||||
if (ret_code == I40E_ERR_TIMEOUT)
|
if (ret_code == -EIO)
|
||||||
i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
|
i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
|
||||||
return ret_code;
|
return ret_code;
|
||||||
}
|
}
|
||||||
@ -168,14 +168,14 @@ static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
|
|||||||
static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
|
static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
|
||||||
u16 *data)
|
u16 *data)
|
||||||
{
|
{
|
||||||
int ret_code = I40E_ERR_TIMEOUT;
|
int ret_code = -EIO;
|
||||||
u32 sr_reg;
|
u32 sr_reg;
|
||||||
|
|
||||||
if (offset >= hw->nvm.sr_size) {
|
if (offset >= hw->nvm.sr_size) {
|
||||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||||
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
|
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
|
||||||
offset, hw->nvm.sr_size);
|
offset, hw->nvm.sr_size);
|
||||||
ret_code = I40E_ERR_PARAM;
|
ret_code = -EINVAL;
|
||||||
goto read_nvm_exit;
|
goto read_nvm_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,7 +222,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
|
|||||||
bool last_command)
|
bool last_command)
|
||||||
{
|
{
|
||||||
struct i40e_asq_cmd_details cmd_details;
|
struct i40e_asq_cmd_details cmd_details;
|
||||||
int ret_code = I40E_ERR_NVM;
|
int ret_code = -EIO;
|
||||||
|
|
||||||
memset(&cmd_details, 0, sizeof(cmd_details));
|
memset(&cmd_details, 0, sizeof(cmd_details));
|
||||||
cmd_details.wb_desc = &hw->nvm_wb_desc;
|
cmd_details.wb_desc = &hw->nvm_wb_desc;
|
||||||
@ -267,7 +267,7 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
|
|||||||
static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
|
static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
|
||||||
u16 *data)
|
u16 *data)
|
||||||
{
|
{
|
||||||
int ret_code = I40E_ERR_TIMEOUT;
|
int ret_code = -EIO;
|
||||||
|
|
||||||
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
|
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
|
||||||
*data = le16_to_cpu(*(__le16 *)data);
|
*data = le16_to_cpu(*(__le16 *)data);
|
||||||
@ -348,7 +348,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||||
"Reading nvm word failed.Error code: %d.\n",
|
"Reading nvm word failed.Error code: %d.\n",
|
||||||
status);
|
status);
|
||||||
return I40E_ERR_NVM;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
|
#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
|
||||||
@ -358,7 +358,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
|
|||||||
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
|
if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
|
||||||
ptr_value == I40E_NVM_INVALID_VAL) {
|
ptr_value == I40E_NVM_INVALID_VAL) {
|
||||||
i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
|
i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
|
||||||
return I40E_ERR_BAD_PTR;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check whether the module is in SR mapped area or outside */
|
/* Check whether the module is in SR mapped area or outside */
|
||||||
@ -367,7 +367,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||||
"Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
|
"Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
|
||||||
|
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
/* Read from the Shadow RAM */
|
/* Read from the Shadow RAM */
|
||||||
|
|
||||||
@ -377,7 +377,7 @@ int i40e_read_nvm_module_data(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||||
"Reading nvm word failed.Error code: %d.\n",
|
"Reading nvm word failed.Error code: %d.\n",
|
||||||
status);
|
status);
|
||||||
return I40E_ERR_NVM;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = ptr_value + module_offset + specific_ptr +
|
offset = ptr_value + module_offset + specific_ptr +
|
||||||
@ -549,7 +549,7 @@ static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
|
|||||||
bool last_command)
|
bool last_command)
|
||||||
{
|
{
|
||||||
struct i40e_asq_cmd_details cmd_details;
|
struct i40e_asq_cmd_details cmd_details;
|
||||||
int ret_code = I40E_ERR_NVM;
|
int ret_code = -EIO;
|
||||||
|
|
||||||
memset(&cmd_details, 0, sizeof(cmd_details));
|
memset(&cmd_details, 0, sizeof(cmd_details));
|
||||||
cmd_details.wb_desc = &hw->nvm_wb_desc;
|
cmd_details.wb_desc = &hw->nvm_wb_desc;
|
||||||
@ -614,7 +614,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
|
|||||||
/* read pointer to VPD area */
|
/* read pointer to VPD area */
|
||||||
ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
|
ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
|
||||||
if (ret_code) {
|
if (ret_code) {
|
||||||
ret_code = I40E_ERR_NVM_CHECKSUM;
|
ret_code = -EIO;
|
||||||
goto i40e_calc_nvm_checksum_exit;
|
goto i40e_calc_nvm_checksum_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -622,7 +622,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
|
|||||||
ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
|
ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
|
||||||
&pcie_alt_module);
|
&pcie_alt_module);
|
||||||
if (ret_code) {
|
if (ret_code) {
|
||||||
ret_code = I40E_ERR_NVM_CHECKSUM;
|
ret_code = -EIO;
|
||||||
goto i40e_calc_nvm_checksum_exit;
|
goto i40e_calc_nvm_checksum_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
|
|||||||
|
|
||||||
ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
|
ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
|
||||||
if (ret_code) {
|
if (ret_code) {
|
||||||
ret_code = I40E_ERR_NVM_CHECKSUM;
|
ret_code = -EIO;
|
||||||
goto i40e_calc_nvm_checksum_exit;
|
goto i40e_calc_nvm_checksum_exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -724,7 +724,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
|
|||||||
* calculated checksum
|
* calculated checksum
|
||||||
*/
|
*/
|
||||||
if (checksum_local != checksum_sr)
|
if (checksum_local != checksum_sr)
|
||||||
ret_code = I40E_ERR_NVM_CHECKSUM;
|
ret_code = -EIO;
|
||||||
|
|
||||||
/* If the user cares, return the calculated checksum */
|
/* If the user cares, return the calculated checksum */
|
||||||
if (checksum)
|
if (checksum)
|
||||||
@ -839,7 +839,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
|
|||||||
if (upd_cmd == I40E_NVMUPD_STATUS) {
|
if (upd_cmd == I40E_NVMUPD_STATUS) {
|
||||||
if (!cmd->data_size) {
|
if (!cmd->data_size) {
|
||||||
*perrno = -EFAULT;
|
*perrno = -EFAULT;
|
||||||
return I40E_ERR_BUF_TOO_SHORT;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bytes[0] = hw->nvmupd_state;
|
bytes[0] = hw->nvmupd_state;
|
||||||
@ -896,7 +896,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
status = I40E_ERR_NOT_READY;
|
status = -EBUSY;
|
||||||
*perrno = -EBUSY;
|
*perrno = -EBUSY;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -904,7 +904,7 @@ int i40e_nvmupd_command(struct i40e_hw *hw,
|
|||||||
/* invalid state, should never happen */
|
/* invalid state, should never happen */
|
||||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||||
"NVMUPD: no such state %d\n", hw->nvmupd_state);
|
"NVMUPD: no such state %d\n", hw->nvmupd_state);
|
||||||
status = I40E_NOT_SUPPORTED;
|
status = -EOPNOTSUPP;
|
||||||
*perrno = -ESRCH;
|
*perrno = -ESRCH;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1045,7 +1045,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||||
"NVMUPD: bad cmd %s in init state\n",
|
"NVMUPD: bad cmd %s in init state\n",
|
||||||
i40e_nvm_update_state_str[upd_cmd]);
|
i40e_nvm_update_state_str[upd_cmd]);
|
||||||
status = I40E_ERR_NVM;
|
status = -EIO;
|
||||||
*perrno = -ESRCH;
|
*perrno = -ESRCH;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1087,7 +1087,7 @@ static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||||
"NVMUPD: bad cmd %s in reading state.\n",
|
"NVMUPD: bad cmd %s in reading state.\n",
|
||||||
i40e_nvm_update_state_str[upd_cmd]);
|
i40e_nvm_update_state_str[upd_cmd]);
|
||||||
status = I40E_NOT_SUPPORTED;
|
status = -EOPNOTSUPP;
|
||||||
*perrno = -ESRCH;
|
*perrno = -ESRCH;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1174,7 +1174,7 @@ retry:
|
|||||||
i40e_debug(hw, I40E_DEBUG_NVM,
|
i40e_debug(hw, I40E_DEBUG_NVM,
|
||||||
"NVMUPD: bad cmd %s in writing state.\n",
|
"NVMUPD: bad cmd %s in writing state.\n",
|
||||||
i40e_nvm_update_state_str[upd_cmd]);
|
i40e_nvm_update_state_str[upd_cmd]);
|
||||||
status = I40E_NOT_SUPPORTED;
|
status = -EOPNOTSUPP;
|
||||||
*perrno = -ESRCH;
|
*perrno = -ESRCH;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1398,7 +1398,7 @@ static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
|
|||||||
"NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
|
"NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
|
||||||
cmd->data_size, aq_desc_len);
|
cmd->data_size, aq_desc_len);
|
||||||
*perrno = -EINVAL;
|
*perrno = -EINVAL;
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
aq_desc = (struct i40e_aq_desc *)bytes;
|
aq_desc = (struct i40e_aq_desc *)bytes;
|
||||||
|
|
||||||
@ -1473,7 +1473,7 @@ static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
|
|||||||
i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
|
i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
|
||||||
__func__, cmd->offset, aq_total_len);
|
__func__, cmd->offset, aq_total_len);
|
||||||
*perrno = -EINVAL;
|
*perrno = -EINVAL;
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* check copylength range */
|
/* check copylength range */
|
||||||
|
|||||||
@ -1132,7 +1132,7 @@ int i40e_ptp_alloc_pins(struct i40e_pf *pf)
|
|||||||
|
|
||||||
if (!pf->ptp_pins) {
|
if (!pf->ptp_pins) {
|
||||||
dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
|
dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n");
|
||||||
return -I40E_ERR_NO_MEMORY;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
pf->ptp_pins->sdp3_2 = off;
|
pf->ptp_pins->sdp3_2 = off;
|
||||||
|
|||||||
@ -1,43 +0,0 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
|
||||||
/* Copyright(c) 2013 - 2018 Intel Corporation. */
|
|
||||||
|
|
||||||
#ifndef _I40E_STATUS_H_
|
|
||||||
#define _I40E_STATUS_H_
|
|
||||||
|
|
||||||
/* Error Codes */
|
|
||||||
enum i40e_status_code {
|
|
||||||
I40E_SUCCESS = 0,
|
|
||||||
I40E_ERR_NVM = -1,
|
|
||||||
I40E_ERR_NVM_CHECKSUM = -2,
|
|
||||||
I40E_ERR_CONFIG = -4,
|
|
||||||
I40E_ERR_PARAM = -5,
|
|
||||||
I40E_ERR_UNKNOWN_PHY = -7,
|
|
||||||
I40E_ERR_INVALID_MAC_ADDR = -10,
|
|
||||||
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
|
|
||||||
I40E_ERR_RESET_FAILED = -15,
|
|
||||||
I40E_ERR_NO_AVAILABLE_VSI = -17,
|
|
||||||
I40E_ERR_NO_MEMORY = -18,
|
|
||||||
I40E_ERR_BAD_PTR = -19,
|
|
||||||
I40E_ERR_INVALID_SIZE = -26,
|
|
||||||
I40E_ERR_QUEUE_EMPTY = -32,
|
|
||||||
I40E_ERR_TIMEOUT = -37,
|
|
||||||
I40E_ERR_INVALID_SD_INDEX = -45,
|
|
||||||
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
|
|
||||||
I40E_ERR_INVALID_SD_TYPE = -47,
|
|
||||||
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
|
|
||||||
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
|
|
||||||
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
|
|
||||||
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
|
|
||||||
I40E_ERR_BUF_TOO_SHORT = -55,
|
|
||||||
I40E_ERR_ADMIN_QUEUE_FULL = -56,
|
|
||||||
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
|
|
||||||
I40E_ERR_NVM_BLANK_MODE = -59,
|
|
||||||
I40E_ERR_NOT_IMPLEMENTED = -60,
|
|
||||||
I40E_ERR_DIAG_TEST_FAILED = -62,
|
|
||||||
I40E_ERR_NOT_READY = -63,
|
|
||||||
I40E_NOT_SUPPORTED = -64,
|
|
||||||
I40E_ERR_FIRMWARE_API_VERSION = -65,
|
|
||||||
I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66,
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif /* _I40E_STATUS_H_ */
|
|
||||||
@ -4,7 +4,6 @@
|
|||||||
#ifndef _I40E_TYPE_H_
|
#ifndef _I40E_TYPE_H_
|
||||||
#define _I40E_TYPE_H_
|
#define _I40E_TYPE_H_
|
||||||
|
|
||||||
#include "i40e_status.h"
|
|
||||||
#include "i40e_osdep.h"
|
#include "i40e_osdep.h"
|
||||||
#include "i40e_register.h"
|
#include "i40e_register.h"
|
||||||
#include "i40e_adminq.h"
|
#include "i40e_adminq.h"
|
||||||
|
|||||||
@ -420,7 +420,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
|
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
|
||||||
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
|
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
|
||||||
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
|
||||||
(itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
|
FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
|
||||||
wr32(hw, reg_idx, reg);
|
wr32(hw, reg_idx, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -628,6 +628,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
|
|
||||||
/* only set the required fields */
|
/* only set the required fields */
|
||||||
tx_ctx.base = info->dma_ring_addr / 128;
|
tx_ctx.base = info->dma_ring_addr / 128;
|
||||||
|
|
||||||
|
/* ring_len has to be multiple of 8 */
|
||||||
|
if (!IS_ALIGNED(info->ring_len, 8) ||
|
||||||
|
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error_context;
|
||||||
|
}
|
||||||
tx_ctx.qlen = info->ring_len;
|
tx_ctx.qlen = info->ring_len;
|
||||||
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
|
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
|
||||||
tx_ctx.rdylist_act = 0;
|
tx_ctx.rdylist_act = 0;
|
||||||
@ -693,6 +700,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
|
|
||||||
/* only set the required fields */
|
/* only set the required fields */
|
||||||
rx_ctx.base = info->dma_ring_addr / 128;
|
rx_ctx.base = info->dma_ring_addr / 128;
|
||||||
|
|
||||||
|
/* ring_len has to be multiple of 32 */
|
||||||
|
if (!IS_ALIGNED(info->ring_len, 32) ||
|
||||||
|
info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto error_param;
|
||||||
|
}
|
||||||
rx_ctx.qlen = info->ring_len;
|
rx_ctx.qlen = info->ring_len;
|
||||||
|
|
||||||
if (info->splithdr_enabled) {
|
if (info->splithdr_enabled) {
|
||||||
@ -1346,14 +1360,14 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
|
|||||||
bool alluni)
|
bool alluni)
|
||||||
{
|
{
|
||||||
struct i40e_pf *pf = vf->pf;
|
struct i40e_pf *pf = vf->pf;
|
||||||
int aq_ret = I40E_SUCCESS;
|
|
||||||
struct i40e_vsi *vsi;
|
struct i40e_vsi *vsi;
|
||||||
|
int aq_ret = 0;
|
||||||
u16 num_vlans;
|
u16 num_vlans;
|
||||||
s16 *vl;
|
s16 *vl;
|
||||||
|
|
||||||
vsi = i40e_find_vsi_from_id(pf, vsi_id);
|
vsi = i40e_find_vsi_from_id(pf, vsi_id);
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
|
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
if (vf->port_vlan_id) {
|
if (vf->port_vlan_id) {
|
||||||
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
|
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
|
||||||
@ -1363,7 +1377,7 @@ static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
|
|||||||
i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
|
i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
|
||||||
|
|
||||||
if (!vl)
|
if (!vl)
|
||||||
return I40E_ERR_NO_MEMORY;
|
return -ENOMEM;
|
||||||
|
|
||||||
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
|
aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
|
||||||
vl, num_vlans);
|
vl, num_vlans);
|
||||||
@ -1430,6 +1444,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
|
|||||||
* functions that may still be running at this point.
|
* functions that may still be running at this point.
|
||||||
*/
|
*/
|
||||||
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
|
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
|
||||||
|
clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
|
||||||
|
|
||||||
/* In the case of a VFLR, the HW has already reset the VF and we
|
/* In the case of a VFLR, the HW has already reset the VF and we
|
||||||
* just need to clean up, so don't hit the VFRTRIG register.
|
* just need to clean up, so don't hit the VFRTRIG register.
|
||||||
@ -2035,7 +2050,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (VF_IS_V10(&vf->vf_ver))
|
if (VF_IS_V10(&vf->vf_ver))
|
||||||
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
|
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
|
||||||
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
|
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
|
||||||
I40E_SUCCESS, (u8 *)&info,
|
0, (u8 *)&info,
|
||||||
sizeof(struct virtchnl_version_info));
|
sizeof(struct virtchnl_version_info));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2096,15 +2111,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
size_t len = 0;
|
size_t len = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
|
i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
|
||||||
|
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
|
||||||
|
test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
|
||||||
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = struct_size(vfres, vsi_res, num_vsis);
|
len = struct_size(vfres, vsi_res, num_vsis);
|
||||||
vfres = kzalloc(len, GFP_KERNEL);
|
vfres = kzalloc(len, GFP_KERNEL);
|
||||||
if (!vfres) {
|
if (!vfres) {
|
||||||
aq_ret = I40E_ERR_NO_MEMORY;
|
aq_ret = -ENOMEM;
|
||||||
len = 0;
|
len = 0;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -2157,7 +2175,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
|
"VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
|
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
|
||||||
@ -2197,6 +2215,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
vf->default_lan_addr.addr);
|
vf->default_lan_addr.addr);
|
||||||
}
|
}
|
||||||
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
|
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
|
||||||
|
set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
|
||||||
|
|
||||||
err:
|
err:
|
||||||
/* send the response back to the VF */
|
/* send the response back to the VF */
|
||||||
@ -2225,7 +2244,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
|
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
|
||||||
@ -2241,12 +2260,12 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
|
if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
|
if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2313,17 +2332,17 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
|
if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
|
if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2331,7 +2350,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
for (i = 0; i < vf->num_tc; i++)
|
for (i = 0; i < vf->num_tc; i++)
|
||||||
num_qps_all += vf->ch[i].num_qps;
|
num_qps_all += vf->ch[i].num_qps;
|
||||||
if (num_qps_all != qci->num_queue_pairs) {
|
if (num_qps_all != qci->num_queue_pairs) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2344,7 +2363,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (!vf->adq_enabled) {
|
if (!vf->adq_enabled) {
|
||||||
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
|
if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
|
||||||
qpi->txq.queue_id)) {
|
qpi->txq.queue_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2353,14 +2372,14 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (qpi->txq.vsi_id != qci->vsi_id ||
|
if (qpi->txq.vsi_id != qci->vsi_id ||
|
||||||
qpi->rxq.vsi_id != qci->vsi_id ||
|
qpi->rxq.vsi_id != qci->vsi_id ||
|
||||||
qpi->rxq.queue_id != vsi_queue_id) {
|
qpi->rxq.queue_id != vsi_queue_id) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vf->adq_enabled) {
|
if (vf->adq_enabled) {
|
||||||
if (idx >= ARRAY_SIZE(vf->ch)) {
|
if (idx >= vf->num_tc) {
|
||||||
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
|
aq_ret = -ENODEV;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
vsi_id = vf->ch[idx].vsi_id;
|
vsi_id = vf->ch[idx].vsi_id;
|
||||||
@ -2370,7 +2389,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
&qpi->rxq) ||
|
&qpi->rxq) ||
|
||||||
i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
|
i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
|
||||||
&qpi->txq)) {
|
&qpi->txq)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2380,8 +2399,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
* to its appropriate VSIs based on TC mapping
|
* to its appropriate VSIs based on TC mapping
|
||||||
*/
|
*/
|
||||||
if (vf->adq_enabled) {
|
if (vf->adq_enabled) {
|
||||||
if (idx >= ARRAY_SIZE(vf->ch)) {
|
if (idx >= vf->num_tc) {
|
||||||
aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
|
aq_ret = -ENODEV;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
if (j == (vf->ch[idx].num_qps - 1)) {
|
if (j == (vf->ch[idx].num_qps - 1)) {
|
||||||
@ -2404,7 +2423,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
vsi->num_queue_pairs = vf->ch[i].num_qps;
|
vsi->num_queue_pairs = vf->ch[i].num_qps;
|
||||||
|
|
||||||
if (i40e_update_adq_vsi_queues(vsi, i)) {
|
if (i40e_update_adq_vsi_queues(vsi, i)) {
|
||||||
aq_ret = I40E_ERR_CONFIG;
|
aq_ret = -EIO;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2430,8 +2449,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
|
|||||||
u16 vsi_queue_id, queue_id;
|
u16 vsi_queue_id, queue_id;
|
||||||
|
|
||||||
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
|
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
|
||||||
if (vf->adq_enabled) {
|
u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
|
||||||
vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
|
|
||||||
|
if (vf->adq_enabled && idx < vf->num_tc) {
|
||||||
|
vsi_id = vf->ch[idx].vsi_id;
|
||||||
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
|
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
|
||||||
} else {
|
} else {
|
||||||
queue_id = vsi_queue_id;
|
queue_id = vsi_queue_id;
|
||||||
@ -2462,13 +2483,13 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (irqmap_info->num_vectors >
|
if (irqmap_info->num_vectors >
|
||||||
vf->pf->hw.func_caps.num_msix_vectors_vf) {
|
vf->pf->hw.func_caps.num_msix_vectors_vf) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2477,18 +2498,18 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
/* validate msg params */
|
/* validate msg params */
|
||||||
if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
|
if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
|
!i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
vsi_id = map->vsi_id;
|
vsi_id = map->vsi_id;
|
||||||
|
|
||||||
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
|
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
|
if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2577,29 +2598,29 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use the queue bit map sent by the VF */
|
/* Use the queue bit map sent by the VF */
|
||||||
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
|
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
|
||||||
true)) {
|
true)) {
|
||||||
aq_ret = I40E_ERR_TIMEOUT;
|
aq_ret = -EIO;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
|
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
|
||||||
true)) {
|
true)) {
|
||||||
aq_ret = I40E_ERR_TIMEOUT;
|
aq_ret = -EIO;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2608,7 +2629,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
/* zero belongs to LAN VSI */
|
/* zero belongs to LAN VSI */
|
||||||
for (i = 1; i < vf->num_tc; i++) {
|
for (i = 1; i < vf->num_tc; i++) {
|
||||||
if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
|
if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
|
||||||
aq_ret = I40E_ERR_TIMEOUT;
|
aq_ret = -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2634,29 +2655,29 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use the queue bit map sent by the VF */
|
/* Use the queue bit map sent by the VF */
|
||||||
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
|
if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
|
||||||
false)) {
|
false)) {
|
||||||
aq_ret = I40E_ERR_TIMEOUT;
|
aq_ret = -EIO;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
|
if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
|
||||||
false)) {
|
false)) {
|
||||||
aq_ret = I40E_ERR_TIMEOUT;
|
aq_ret = -EIO;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
error_param:
|
error_param:
|
||||||
@ -2788,18 +2809,18 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
memset(&stats, 0, sizeof(struct i40e_eth_stats));
|
memset(&stats, 0, sizeof(struct i40e_eth_stats));
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||||
if (!vsi) {
|
if (!vsi) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
i40e_update_eth_stats(vsi);
|
i40e_update_eth_stats(vsi);
|
||||||
@ -2878,7 +2899,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
|||||||
is_zero_ether_addr(addr)) {
|
is_zero_ether_addr(addr)) {
|
||||||
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
|
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
|
||||||
addr);
|
addr);
|
||||||
return I40E_ERR_INVALID_MAC_ADDR;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the host VMM administrator has set the VF MAC address
|
/* If the host VMM administrator has set the VF MAC address
|
||||||
@ -3014,7 +3035,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
||||||
ret = I40E_ERR_PARAM;
|
ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3043,7 +3064,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"Unable to add MAC filter %pM for VF %d\n",
|
"Unable to add MAC filter %pM for VF %d\n",
|
||||||
al->list[i].addr, vf->vf_id);
|
al->list[i].addr, vf->vf_id);
|
||||||
ret = I40E_ERR_PARAM;
|
ret = -EINVAL;
|
||||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
@ -3083,7 +3104,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
||||||
ret = I40E_ERR_PARAM;
|
ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3092,7 +3113,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
is_zero_ether_addr(al->list[i].addr)) {
|
is_zero_ether_addr(al->list[i].addr)) {
|
||||||
dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
|
dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
|
||||||
al->list[i].addr, vf->vf_id);
|
al->list[i].addr, vf->vf_id);
|
||||||
ret = I40E_ERR_INVALID_MAC_ADDR;
|
ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3114,7 +3135,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
|
if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
|
||||||
ret = I40E_ERR_INVALID_MAC_ADDR;
|
ret = -EINVAL;
|
||||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
@ -3176,13 +3197,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
}
|
}
|
||||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < vfl->num_elements; i++) {
|
for (i = 0; i < vfl->num_elements; i++) {
|
||||||
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
|
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
|
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
|
||||||
goto error_param;
|
goto error_param;
|
||||||
@ -3190,7 +3211,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
}
|
}
|
||||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||||
if (vsi->info.pvid) {
|
if (vsi->info.pvid) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3241,13 +3262,13 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < vfl->num_elements; i++) {
|
for (i = 0; i < vfl->num_elements; i++) {
|
||||||
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
|
if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3255,7 +3276,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
vsi = pf->vsi[vf->lan_vsi_idx];
|
vsi = pf->vsi[vf->lan_vsi_idx];
|
||||||
if (vsi->info.pvid) {
|
if (vsi->info.pvid) {
|
||||||
if (vfl->num_elements > 1 || vfl->vlan_id[0])
|
if (vfl->num_elements > 1 || vfl->vlan_id[0])
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3296,7 +3317,7 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
|
|||||||
|
|
||||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
|
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3325,13 +3346,13 @@ static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
|
|||||||
|
|
||||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
|
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto error_param;
|
goto error_param;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config) {
|
if (config) {
|
||||||
if (i40e_config_rdma_qvlist(vf, qvlist_info))
|
if (i40e_config_rdma_qvlist(vf, qvlist_info))
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
} else {
|
} else {
|
||||||
i40e_release_rdma_qvlist(vf);
|
i40e_release_rdma_qvlist(vf);
|
||||||
}
|
}
|
||||||
@ -3362,7 +3383,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
|
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
|
||||||
vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
|
vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3393,13 +3414,13 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
|
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
|
||||||
vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
|
vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < vrl->lut_entries; i++)
|
for (i = 0; i < vrl->lut_entries; i++)
|
||||||
if (vrl->lut[i] >= vf->num_queue_pairs) {
|
if (vrl->lut[i] >= vf->num_queue_pairs) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3426,14 +3447,14 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
|
|||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
len = sizeof(struct virtchnl_rss_hena);
|
len = sizeof(struct virtchnl_rss_hena);
|
||||||
|
|
||||||
vrh = kzalloc(len, GFP_KERNEL);
|
vrh = kzalloc(len, GFP_KERNEL);
|
||||||
if (!vrh) {
|
if (!vrh) {
|
||||||
aq_ret = I40E_ERR_NO_MEMORY;
|
aq_ret = -ENOMEM;
|
||||||
len = 0;
|
len = 0;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -3462,7 +3483,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
|
i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
|
||||||
@ -3487,7 +3508,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3513,7 +3534,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3545,16 +3566,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
|
|||||||
bool found = false;
|
bool found = false;
|
||||||
int bkt;
|
int bkt;
|
||||||
|
|
||||||
if (!tc_filter->action) {
|
if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
|
||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"VF %d: Currently ADq doesn't support Drop Action\n",
|
"VF %d: ADQ doesn't support this action (%d)\n",
|
||||||
vf->vf_id);
|
vf->vf_id, tc_filter->action);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* action_meta is TC number here to which the filter is applied */
|
/* action_meta is TC number here to which the filter is applied */
|
||||||
if (!tc_filter->action_meta ||
|
if (!tc_filter->action_meta ||
|
||||||
tc_filter->action_meta > I40E_MAX_VF_VSI) {
|
tc_filter->action_meta >= vf->num_tc) {
|
||||||
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
|
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
|
||||||
vf->vf_id, tc_filter->action_meta);
|
vf->vf_id, tc_filter->action_meta);
|
||||||
goto err;
|
goto err;
|
||||||
@ -3601,7 +3622,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
|
"VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3654,9 +3675,9 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
err:
|
err:
|
||||||
return I40E_ERR_CONFIG;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3740,7 +3761,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3748,7 +3769,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"VF %d: ADq not enabled, can't apply cloud filter\n",
|
"VF %d: ADq not enabled, can't apply cloud filter\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3756,7 +3777,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"VF %d: Invalid input, can't apply cloud filter\n",
|
"VF %d: Invalid input, can't apply cloud filter\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3852,6 +3873,8 @@ err:
|
|||||||
aq_ret);
|
aq_ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_vc_add_cloud_filter
|
* i40e_vc_add_cloud_filter
|
||||||
* @vf: pointer to the VF info
|
* @vf: pointer to the VF info
|
||||||
@ -3871,7 +3894,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3879,7 +3902,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"VF %d: ADq is not enabled, can't apply cloud filter\n",
|
"VF %d: ADq is not enabled, can't apply cloud filter\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3887,7 +3910,15 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_info(&pf->pdev->dev,
|
dev_info(&pf->pdev->dev,
|
||||||
"VF %d: Invalid input/s, can't apply cloud filter\n",
|
"VF %d: Invalid input/s, can't apply cloud filter\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
|
goto err_out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
|
||||||
|
dev_warn(&pf->pdev->dev,
|
||||||
|
"VF %d: Max number of filters reached, can't apply cloud filter\n",
|
||||||
|
vf->vf_id);
|
||||||
|
aq_ret = -ENOSPC;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3980,7 +4011,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
u64 speed = 0;
|
u64 speed = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3988,7 +4019,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (vf->spoofchk) {
|
if (vf->spoofchk) {
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"Spoof check is ON, turn it OFF to enable ADq\n");
|
"Spoof check is ON, turn it OFF to enable ADq\n");
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3996,7 +4027,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
|
"VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4005,7 +4036,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
|
"VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
|
||||||
vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
|
vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4017,7 +4048,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
|
"VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
|
||||||
vf->vf_id, i, tci->list[i].count,
|
vf->vf_id, i, tci->list[i].count,
|
||||||
I40E_DEFAULT_QUEUES_PER_VF);
|
I40E_DEFAULT_QUEUES_PER_VF);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4028,7 +4059,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"No queues left to allocate to VF %d\n",
|
"No queues left to allocate to VF %d\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
} else {
|
} else {
|
||||||
/* we need to allocate max VF queues to enable ADq so as to
|
/* we need to allocate max VF queues to enable ADq so as to
|
||||||
@ -4043,7 +4074,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
if (speed == SPEED_UNKNOWN) {
|
if (speed == SPEED_UNKNOWN) {
|
||||||
dev_err(&pf->pdev->dev,
|
dev_err(&pf->pdev->dev,
|
||||||
"Cannot detect link speed\n");
|
"Cannot detect link speed\n");
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4056,7 +4087,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
"Invalid max tx rate %llu specified for VF %d.",
|
"Invalid max tx rate %llu specified for VF %d.",
|
||||||
tci->list[i].max_tx_rate,
|
tci->list[i].max_tx_rate,
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
} else {
|
} else {
|
||||||
vf->ch[i].max_tx_rate =
|
vf->ch[i].max_tx_rate =
|
||||||
@ -4072,7 +4103,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
/* reset the VF in order to allocate resources */
|
/* reset the VF in order to allocate resources */
|
||||||
i40e_vc_reset_vf(vf, true);
|
i40e_vc_reset_vf(vf, true);
|
||||||
|
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
|
|
||||||
/* send the response to the VF */
|
/* send the response to the VF */
|
||||||
err:
|
err:
|
||||||
@ -4091,7 +4122,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
int aq_ret = 0;
|
int aq_ret = 0;
|
||||||
|
|
||||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4106,13 +4137,13 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
|
|||||||
} else {
|
} else {
|
||||||
dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
|
dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
|
||||||
vf->vf_id);
|
vf->vf_id);
|
||||||
aq_ret = I40E_ERR_PARAM;
|
aq_ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* reset the VF in order to allocate resources */
|
/* reset the VF in order to allocate resources */
|
||||||
i40e_vc_reset_vf(vf, true);
|
i40e_vc_reset_vf(vf, true);
|
||||||
|
|
||||||
return I40E_SUCCESS;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
|
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
|
||||||
@ -4146,21 +4177,16 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
|
|||||||
|
|
||||||
/* Check if VF is disabled. */
|
/* Check if VF is disabled. */
|
||||||
if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
|
if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
|
||||||
return I40E_ERR_PARAM;
|
return -EINVAL;
|
||||||
|
|
||||||
/* perform basic checks on the msg */
|
/* perform basic checks on the msg */
|
||||||
ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
|
ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
|
i40e_vc_send_resp_to_vf(vf, v_opcode, -EINVAL);
|
||||||
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
|
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
|
||||||
local_vf_id, v_opcode, msglen);
|
local_vf_id, v_opcode, msglen);
|
||||||
switch (ret) {
|
return ret;
|
||||||
case VIRTCHNL_STATUS_ERR_PARAM:
|
|
||||||
return -EPERM;
|
|
||||||
default:
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (v_opcode) {
|
switch (v_opcode) {
|
||||||
@ -4253,7 +4279,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
|
|||||||
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
|
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
|
||||||
v_opcode, local_vf_id);
|
v_opcode, local_vf_id);
|
||||||
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
|
ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
|
||||||
I40E_ERR_NOT_IMPLEMENTED);
|
-EOPNOTSUPP);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -39,7 +39,8 @@ enum i40e_vf_states {
|
|||||||
I40E_VF_STATE_MC_PROMISC,
|
I40E_VF_STATE_MC_PROMISC,
|
||||||
I40E_VF_STATE_UC_PROMISC,
|
I40E_VF_STATE_UC_PROMISC,
|
||||||
I40E_VF_STATE_PRE_ENABLE,
|
I40E_VF_STATE_PRE_ENABLE,
|
||||||
I40E_VF_STATE_RESETTING
|
I40E_VF_STATE_RESETTING,
|
||||||
|
I40E_VF_STATE_RESOURCES_LOADED,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* VF capabilities */
|
/* VF capabilities */
|
||||||
|
|||||||
@ -166,83 +166,6 @@ nfs_page_group_lock_head(struct nfs_page *req)
|
|||||||
return head;
|
return head;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* nfs_unroll_locks - unlock all newly locked reqs and wait on @req
|
|
||||||
* @head: head request of page group, must be holding head lock
|
|
||||||
* @req: request that couldn't lock and needs to wait on the req bit lock
|
|
||||||
*
|
|
||||||
* This is a helper function for nfs_lock_and_join_requests
|
|
||||||
* returns 0 on success, < 0 on error.
|
|
||||||
*/
|
|
||||||
static void
|
|
||||||
nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
|
|
||||||
{
|
|
||||||
struct nfs_page *tmp;
|
|
||||||
|
|
||||||
/* relinquish all the locks successfully grabbed this run */
|
|
||||||
for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
|
|
||||||
if (!kref_read(&tmp->wb_kref))
|
|
||||||
continue;
|
|
||||||
nfs_unlock_and_release_request(tmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* nfs_page_group_lock_subreq - try to lock a subrequest
|
|
||||||
* @head: head request of page group
|
|
||||||
* @subreq: request to lock
|
|
||||||
*
|
|
||||||
* This is a helper function for nfs_lock_and_join_requests which
|
|
||||||
* must be called with the head request and page group both locked.
|
|
||||||
* On error, it returns with the page group unlocked.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!kref_get_unless_zero(&subreq->wb_kref))
|
|
||||||
return 0;
|
|
||||||
while (!nfs_lock_request(subreq)) {
|
|
||||||
nfs_page_group_unlock(head);
|
|
||||||
ret = nfs_wait_on_request(subreq);
|
|
||||||
if (!ret)
|
|
||||||
ret = nfs_page_group_lock(head);
|
|
||||||
if (ret < 0) {
|
|
||||||
nfs_unroll_locks(head, subreq);
|
|
||||||
nfs_release_request(subreq);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* nfs_page_group_lock_subrequests - try to lock the subrequests
|
|
||||||
* @head: head request of page group
|
|
||||||
*
|
|
||||||
* This is a helper function for nfs_lock_and_join_requests which
|
|
||||||
* must be called with the head request locked.
|
|
||||||
*/
|
|
||||||
int nfs_page_group_lock_subrequests(struct nfs_page *head)
|
|
||||||
{
|
|
||||||
struct nfs_page *subreq;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nfs_page_group_lock(head);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
/* lock each request in the page group */
|
|
||||||
for (subreq = head->wb_this_page; subreq != head;
|
|
||||||
subreq = subreq->wb_this_page) {
|
|
||||||
ret = nfs_page_group_lock_subreq(head, subreq);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
nfs_page_group_unlock(head);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nfs_page_set_headlock - set the request PG_HEADLOCK
|
* nfs_page_set_headlock - set the request PG_HEADLOCK
|
||||||
* @req: request that is to be locked
|
* @req: request that is to be locked
|
||||||
@ -309,13 +232,14 @@ nfs_page_group_unlock(struct nfs_page *req)
|
|||||||
nfs_page_clear_headlock(req);
|
nfs_page_clear_headlock(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/**
|
||||||
* nfs_page_group_sync_on_bit_locked
|
* nfs_page_group_sync_on_bit_locked - Test if all requests have @bit set
|
||||||
|
* @req: request in page group
|
||||||
|
* @bit: PG_* bit that is used to sync page group
|
||||||
*
|
*
|
||||||
* must be called with page group lock held
|
* must be called with page group lock held
|
||||||
*/
|
*/
|
||||||
static bool
|
bool nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
|
||||||
nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
|
|
||||||
{
|
{
|
||||||
struct nfs_page *head = req->wb_head;
|
struct nfs_page *head = req->wb_head;
|
||||||
struct nfs_page *tmp;
|
struct nfs_page *tmp;
|
||||||
|
|||||||
138
fs/nfs/write.c
138
fs/nfs/write.c
@ -147,20 +147,10 @@ nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
|
||||||
nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
|
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!test_bit(PG_REMOVE, &req->wb_flags))
|
|
||||||
return 0;
|
|
||||||
ret = nfs_page_group_lock(req);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
|
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
|
||||||
nfs_page_set_inode_ref(req, inode);
|
nfs_page_set_inode_ref(req, inode);
|
||||||
nfs_page_group_unlock(req);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nfs_page *
|
static struct nfs_page *
|
||||||
@ -232,36 +222,6 @@ static struct nfs_page *nfs_page_find_head_request(struct page *page)
|
|||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
|
|
||||||
{
|
|
||||||
struct inode *inode = page_file_mapping(page)->host;
|
|
||||||
struct nfs_page *req, *head;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for (;;) {
|
|
||||||
req = nfs_page_find_head_request(page);
|
|
||||||
if (!req)
|
|
||||||
return req;
|
|
||||||
head = nfs_page_group_lock_head(req);
|
|
||||||
if (head != req)
|
|
||||||
nfs_release_request(req);
|
|
||||||
if (IS_ERR(head))
|
|
||||||
return head;
|
|
||||||
ret = nfs_cancel_remove_inode(head, inode);
|
|
||||||
if (ret < 0) {
|
|
||||||
nfs_unlock_and_release_request(head);
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
}
|
|
||||||
/* Ensure that nobody removed the request before we locked it */
|
|
||||||
if (head == nfs_page_private_request(page))
|
|
||||||
break;
|
|
||||||
if (PageSwapCache(page))
|
|
||||||
break;
|
|
||||||
nfs_unlock_and_release_request(head);
|
|
||||||
}
|
|
||||||
return head;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Adjust the file length if we're writing beyond the end */
|
/* Adjust the file length if we're writing beyond the end */
|
||||||
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
|
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
|
||||||
{
|
{
|
||||||
@ -536,6 +496,57 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
|
|||||||
nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
|
nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* nfs_unroll_locks - unlock all newly locked reqs and wait on @req
|
||||||
|
* @head: head request of page group, must be holding head lock
|
||||||
|
* @req: request that couldn't lock and needs to wait on the req bit lock
|
||||||
|
*
|
||||||
|
* This is a helper function for nfs_lock_and_join_requests
|
||||||
|
* returns 0 on success, < 0 on error.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
|
||||||
|
{
|
||||||
|
struct nfs_page *tmp;
|
||||||
|
|
||||||
|
/* relinquish all the locks successfully grabbed this run */
|
||||||
|
for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
|
||||||
|
if (!kref_read(&tmp->wb_kref))
|
||||||
|
continue;
|
||||||
|
nfs_unlock_and_release_request(tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* nfs_page_group_lock_subreq - try to lock a subrequest
|
||||||
|
* @head: head request of page group
|
||||||
|
* @subreq: request to lock
|
||||||
|
*
|
||||||
|
* This is a helper function for nfs_lock_and_join_requests which
|
||||||
|
* must be called with the head request and page group both locked.
|
||||||
|
* On error, it returns with the page group unlocked.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!kref_get_unless_zero(&subreq->wb_kref))
|
||||||
|
return 0;
|
||||||
|
while (!nfs_lock_request(subreq)) {
|
||||||
|
nfs_page_group_unlock(head);
|
||||||
|
ret = nfs_wait_on_request(subreq);
|
||||||
|
if (!ret)
|
||||||
|
ret = nfs_page_group_lock(head);
|
||||||
|
if (ret < 0) {
|
||||||
|
nfs_unroll_locks(head, subreq);
|
||||||
|
nfs_release_request(subreq);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* nfs_lock_and_join_requests - join all subreqs to the head req
|
* nfs_lock_and_join_requests - join all subreqs to the head req
|
||||||
* @page: the page used to lookup the "page group" of nfs_page structures
|
* @page: the page used to lookup the "page group" of nfs_page structures
|
||||||
@ -555,7 +566,7 @@ static struct nfs_page *
|
|||||||
nfs_lock_and_join_requests(struct page *page)
|
nfs_lock_and_join_requests(struct page *page)
|
||||||
{
|
{
|
||||||
struct inode *inode = page_file_mapping(page)->host;
|
struct inode *inode = page_file_mapping(page)->host;
|
||||||
struct nfs_page *head;
|
struct nfs_page *head, *subreq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -563,20 +574,49 @@ nfs_lock_and_join_requests(struct page *page)
|
|||||||
* reference to the whole page group - the group will not be destroyed
|
* reference to the whole page group - the group will not be destroyed
|
||||||
* until the head reference is released.
|
* until the head reference is released.
|
||||||
*/
|
*/
|
||||||
head = nfs_find_and_lock_page_request(page);
|
retry:
|
||||||
|
head = nfs_page_find_head_request(page);
|
||||||
if (IS_ERR_OR_NULL(head))
|
if (IS_ERR_OR_NULL(head))
|
||||||
return head;
|
return head;
|
||||||
|
|
||||||
/* lock each request in the page group */
|
while (!nfs_lock_request(head)) {
|
||||||
ret = nfs_page_group_lock_subrequests(head);
|
ret = nfs_wait_on_request(head);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
nfs_unlock_and_release_request(head);
|
nfs_release_request(head);
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = nfs_page_group_lock(head);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* Ensure that nobody removed the request before we locked it */
|
||||||
|
if (head != nfs_page_private_request(page) && !PageSwapCache(page)) {
|
||||||
|
nfs_page_group_unlock(head);
|
||||||
|
nfs_unlock_and_release_request(head);
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
|
|
||||||
|
nfs_cancel_remove_inode(head, inode);
|
||||||
|
|
||||||
|
/* lock each request in the page group */
|
||||||
|
for (subreq = head->wb_this_page;
|
||||||
|
subreq != head;
|
||||||
|
subreq = subreq->wb_this_page) {
|
||||||
|
ret = nfs_page_group_lock_subreq(head, subreq);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
nfs_page_group_unlock(head);
|
||||||
|
|
||||||
nfs_join_page_group(head, inode);
|
nfs_join_page_group(head, inode);
|
||||||
|
|
||||||
return head;
|
return head;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
nfs_unlock_and_release_request(head);
|
||||||
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nfs_write_error(struct nfs_page *req, int error)
|
static void nfs_write_error(struct nfs_page *req, int error)
|
||||||
@ -769,7 +809,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
|
|||||||
struct nfs_inode *nfsi = NFS_I(inode);
|
struct nfs_inode *nfsi = NFS_I(inode);
|
||||||
struct nfs_page *head;
|
struct nfs_page *head;
|
||||||
|
|
||||||
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
|
nfs_page_group_lock(req);
|
||||||
|
if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) {
|
||||||
head = req->wb_head;
|
head = req->wb_head;
|
||||||
|
|
||||||
spin_lock(&mapping->private_lock);
|
spin_lock(&mapping->private_lock);
|
||||||
@ -780,6 +821,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
|
|||||||
}
|
}
|
||||||
spin_unlock(&mapping->private_lock);
|
spin_unlock(&mapping->private_lock);
|
||||||
}
|
}
|
||||||
|
nfs_page_group_unlock(req);
|
||||||
|
|
||||||
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
|
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
|
||||||
nfs_release_request(req);
|
nfs_release_request(req);
|
||||||
|
|||||||
@ -144,11 +144,11 @@ extern int nfs_wait_on_request(struct nfs_page *);
|
|||||||
extern void nfs_unlock_request(struct nfs_page *req);
|
extern void nfs_unlock_request(struct nfs_page *req);
|
||||||
extern void nfs_unlock_and_release_request(struct nfs_page *);
|
extern void nfs_unlock_and_release_request(struct nfs_page *);
|
||||||
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
|
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
|
||||||
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
|
|
||||||
extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
|
extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
|
||||||
extern int nfs_page_group_lock(struct nfs_page *);
|
extern int nfs_page_group_lock(struct nfs_page *);
|
||||||
extern void nfs_page_group_unlock(struct nfs_page *);
|
extern void nfs_page_group_unlock(struct nfs_page *);
|
||||||
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
|
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
|
||||||
|
extern bool nfs_page_group_sync_on_bit_locked(struct nfs_page *, unsigned int);
|
||||||
extern int nfs_page_set_headlock(struct nfs_page *req);
|
extern int nfs_page_set_headlock(struct nfs_page *req);
|
||||||
extern void nfs_page_clear_headlock(struct nfs_page *req);
|
extern void nfs_page_clear_headlock(struct nfs_page *req);
|
||||||
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
|
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user