Import of kernel-5.14.0-570.41.1.el9_6
This commit is contained in:
parent
51bcb7f7c2
commit
a3bb58653c
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 570.39.1
|
||||
RHEL_RELEASE = 570.41.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
@ -196,7 +196,7 @@ static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
|
||||
|
||||
static void tce_free_pSeries(struct iommu_table *tbl)
|
||||
{
|
||||
if (!tbl->it_userspace)
|
||||
if (tbl->it_userspace)
|
||||
tce_iommu_userspace_view_free(tbl);
|
||||
}
|
||||
|
||||
|
@ -41,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
|
||||
struct crypto_async_request *req, int err)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool finalize_req = false;
|
||||
int ret;
|
||||
struct crypto_engine_ctx *enginectx;
|
||||
|
||||
/*
|
||||
* If hardware cannot enqueue more requests
|
||||
@ -53,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
|
||||
if (!engine->retry_support) {
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
if (engine->cur_req == req) {
|
||||
finalize_req = true;
|
||||
engine->cur_req = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||
}
|
||||
|
||||
if (finalize_req || engine->retry_support) {
|
||||
enginectx = crypto_tfm_ctx(req->tfm);
|
||||
if (enginectx->op.prepare_request &&
|
||||
enginectx->op.unprepare_request) {
|
||||
ret = enginectx->op.unprepare_request(engine, req);
|
||||
if (ret)
|
||||
dev_err(engine->dev, "failed to unprepare request\n");
|
||||
}
|
||||
}
|
||||
req->complete(req, err);
|
||||
|
||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||
@ -160,7 +147,7 @@ start_request:
|
||||
ret = engine->prepare_crypt_hardware(engine);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to prepare crypt hardware\n");
|
||||
goto req_err_2;
|
||||
goto req_err_1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,16 +157,7 @@ start_request:
|
||||
op = &alg->op;
|
||||
} else {
|
||||
enginectx = crypto_tfm_ctx(async_req->tfm);
|
||||
op = &enginectx->op;
|
||||
|
||||
if (op->prepare_request) {
|
||||
ret = op->prepare_request(engine, async_req);
|
||||
if (ret) {
|
||||
dev_err(engine->dev, "failed to prepare request: %d\n",
|
||||
ret);
|
||||
goto req_err_2;
|
||||
}
|
||||
}
|
||||
if (!op->do_one_request) {
|
||||
dev_err(engine->dev, "failed to do request\n");
|
||||
ret = -EINVAL;
|
||||
@ -203,18 +181,6 @@ start_request:
|
||||
ret);
|
||||
goto req_err_1;
|
||||
}
|
||||
/*
|
||||
* If retry mechanism is supported,
|
||||
* unprepare current request and
|
||||
* enqueue it back into crypto-engine queue.
|
||||
*/
|
||||
if (enginectx->op.unprepare_request) {
|
||||
ret = enginectx->op.unprepare_request(engine,
|
||||
async_req);
|
||||
if (ret)
|
||||
dev_err(engine->dev,
|
||||
"failed to unprepare request\n");
|
||||
}
|
||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||
/*
|
||||
* If hardware was unable to execute request, enqueue it
|
||||
@ -230,13 +196,6 @@ start_request:
|
||||
goto retry;
|
||||
|
||||
req_err_1:
|
||||
if (enginectx->op.unprepare_request) {
|
||||
ret = enginectx->op.unprepare_request(engine, async_req);
|
||||
if (ret)
|
||||
dev_err(engine->dev, "failed to unprepare request\n");
|
||||
}
|
||||
|
||||
req_err_2:
|
||||
async_req->complete(async_req, ret);
|
||||
|
||||
retry:
|
||||
|
@ -28,6 +28,9 @@ struct tegra_aes_ctx {
|
||||
u32 ivsize;
|
||||
u32 key1_id;
|
||||
u32 key2_id;
|
||||
u32 keylen;
|
||||
u8 key1[AES_MAX_KEY_SIZE];
|
||||
u8 key2[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aes_reqctx {
|
||||
@ -43,8 +46,9 @@ struct tegra_aead_ctx {
|
||||
struct tegra_se *se;
|
||||
unsigned int authsize;
|
||||
u32 alg;
|
||||
u32 keylen;
|
||||
u32 key_id;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
};
|
||||
|
||||
struct tegra_aead_reqctx {
|
||||
@ -56,8 +60,8 @@ struct tegra_aead_reqctx {
|
||||
unsigned int cryptlen;
|
||||
unsigned int authsize;
|
||||
bool encrypt;
|
||||
u32 config;
|
||||
u32 crypto_config;
|
||||
u32 config;
|
||||
u32 key_id;
|
||||
u32 iv[4];
|
||||
u8 authdata[16];
|
||||
@ -67,6 +71,8 @@ struct tegra_cmac_ctx {
|
||||
struct tegra_se *se;
|
||||
unsigned int alg;
|
||||
u32 key_id;
|
||||
u32 keylen;
|
||||
u8 key[AES_MAX_KEY_SIZE];
|
||||
struct crypto_shash *fallback_tfm;
|
||||
};
|
||||
|
||||
@ -260,17 +266,13 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
||||
struct tegra_aes_reqctx *rctx = skcipher_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int cmdlen;
|
||||
unsigned int cmdlen, key1_id, key2_id;
|
||||
int ret;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_AES_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->datbuf.size = SE_AES_BUFLEN;
|
||||
rctx->iv = (u32 *)req->iv;
|
||||
rctx->iv = (ctx->alg == SE_ALG_ECB) ? NULL : (u32 *)req->iv;
|
||||
rctx->len = req->cryptlen;
|
||||
key1_id = ctx->key1_id;
|
||||
key2_id = ctx->key2_id;
|
||||
|
||||
/* Pad input to AES Block size */
|
||||
if (ctx->alg != SE_ALG_XTS) {
|
||||
@ -278,20 +280,59 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
rctx->len += AES_BLOCK_SIZE - (rctx->len % AES_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
rctx->datbuf.size = rctx->len;
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_finalize;
|
||||
}
|
||||
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
|
||||
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, rctx->encrypt);
|
||||
|
||||
if (!key1_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key1,
|
||||
ctx->keylen, ctx->alg, &key1_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(key1_id);
|
||||
|
||||
if (ctx->alg == SE_ALG_XTS) {
|
||||
if (!key2_id) {
|
||||
ret = tegra_key_submit_reserved_xts(ctx->se, ctx->key2,
|
||||
ctx->keylen, ctx->alg, &key2_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(key2_id);
|
||||
}
|
||||
|
||||
/* Prepare the command and submit for execution */
|
||||
cmdlen = tegra_aes_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
/* Copy the result */
|
||||
tegra_aes_update_iv(req, ctx);
|
||||
scatterwalk_map_and_copy(rctx->datbuf.buf, req->dst, 0, req->cryptlen, 1);
|
||||
|
||||
out:
|
||||
/* Free the buffer */
|
||||
dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(key1_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key1_id, ctx->alg);
|
||||
|
||||
if (tegra_key_is_reserved(key2_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, key2_id, ctx->alg);
|
||||
|
||||
out_finalize:
|
||||
crypto_finalize_skcipher_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@ -313,6 +354,7 @@ static int tegra_aes_cra_init(struct crypto_skcipher *tfm)
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key1_id = 0;
|
||||
ctx->key2_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
algname = crypto_tfm_alg_name(&tfm->base);
|
||||
ret = se_algname_to_algid(algname);
|
||||
@ -341,13 +383,20 @@ static int tegra_aes_setkey(struct crypto_skcipher *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key1_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key1, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
@ -365,11 +414,17 @@ static int tegra_xts_setkey(struct crypto_skcipher *tfm,
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, len,
|
||||
ctx->alg, &ctx->key1_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key1, key, len);
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key + len, len,
|
||||
ret = tegra_key_submit(ctx->se, key + len, len,
|
||||
ctx->alg, &ctx->key2_id);
|
||||
if (ret) {
|
||||
ctx->keylen = len;
|
||||
memcpy(ctx->key2, key + len, len);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -444,12 +499,6 @@ static int tegra_aes_crypt(struct skcipher_request *req, bool encrypt)
|
||||
return 0;
|
||||
|
||||
rctx->encrypt = encrypt;
|
||||
rctx->config = tegra234_aes_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(ctx->alg, encrypt);
|
||||
rctx->crypto_config |= SE_AES_KEY_INDEX(ctx->key1_id);
|
||||
|
||||
if (ctx->key2_id)
|
||||
rctx->crypto_config |= SE_AES_KEY2_INDEX(ctx->key2_id);
|
||||
|
||||
return crypto_transfer_skcipher_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
@ -715,11 +764,11 @@ static int tegra_gcm_do_gmac(struct tegra_aead_ctx *ctx, struct tegra_aead_reqct
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GMAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GMAC, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
cmdlen = tegra_gmac_prep_cmd(ctx, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx *rctx)
|
||||
@ -732,11 +781,11 @@ static int tegra_gcm_do_crypt(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GCM, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_crypt_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -755,11 +804,11 @@ static int tegra_gcm_do_final(struct tegra_aead_ctx *ctx, struct tegra_aead_reqc
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_GCM_FINAL, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_GCM_FINAL, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_gcm_prep_final_cmd(se, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -886,12 +935,12 @@ static int tegra_ccm_do_cbcmac(struct tegra_aead_ctx *ctx, struct tegra_aead_req
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CBC_MAC, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CBC_MAC,
|
||||
rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cbcmac_prep_cmd(ctx, rctx);
|
||||
|
||||
return tegra_se_host1x_submit(se, cmdlen);
|
||||
return tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
}
|
||||
|
||||
static int tegra_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
|
||||
@ -1073,7 +1122,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CTR, rctx->encrypt);
|
||||
rctx->crypto_config = tegra234_aes_crypto_cfg(SE_ALG_CTR, rctx->encrypt) |
|
||||
SE_AES_KEY_INDEX(ctx->key_id);
|
||||
SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/* Copy authdata in the top of buffer for encryption/decryption */
|
||||
if (rctx->encrypt)
|
||||
@ -1098,7 +1147,7 @@ static int tegra_ccm_do_ctr(struct tegra_aead_ctx *ctx, struct tegra_aead_reqctx
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_ctr_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1117,6 +1166,11 @@ static int tegra_ccm_crypt_init(struct aead_request *req, struct tegra_se *se,
|
||||
rctx->assoclen = req->assoclen;
|
||||
rctx->authsize = crypto_aead_authsize(tfm);
|
||||
|
||||
if (rctx->encrypt)
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
else
|
||||
rctx->cryptlen = req->cryptlen - rctx->authsize;
|
||||
|
||||
memcpy(iv, req->iv, 16);
|
||||
|
||||
ret = tegra_ccm_check_iv(iv);
|
||||
@ -1145,30 +1199,35 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
|
||||
ret = tegra_ccm_crypt_init(req, se, rctx);
|
||||
if (ret)
|
||||
goto out_finalize;
|
||||
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
/* Allocate buffers required */
|
||||
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
|
||||
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
&rctx->inbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->inbuf.buf)
|
||||
return -ENOMEM;
|
||||
goto out_finalize;
|
||||
|
||||
rctx->inbuf.size = SE_AES_BUFLEN;
|
||||
|
||||
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
|
||||
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
|
||||
&rctx->outbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->outbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto outbuf_err;
|
||||
goto out_free_inbuf;
|
||||
}
|
||||
|
||||
rctx->outbuf.size = SE_AES_BUFLEN;
|
||||
|
||||
ret = tegra_ccm_crypt_init(req, se, rctx);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->encrypt) {
|
||||
rctx->cryptlen = req->cryptlen;
|
||||
|
||||
/* CBC MAC Operation */
|
||||
ret = tegra_ccm_compute_auth(ctx, rctx);
|
||||
if (ret)
|
||||
@ -1179,10 +1238,6 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
rctx->cryptlen = req->cryptlen - ctx->authsize;
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* CTR operation */
|
||||
ret = tegra_ccm_do_ctr(ctx, rctx);
|
||||
if (ret)
|
||||
@ -1195,13 +1250,17 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
}
|
||||
|
||||
out:
|
||||
dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->outbuf.buf, rctx->outbuf.addr);
|
||||
|
||||
outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
out_free_inbuf:
|
||||
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
out_finalize:
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@ -1215,23 +1274,6 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_aead_reqctx *rctx = aead_request_ctx(req);
|
||||
int ret;
|
||||
|
||||
/* Allocate buffers required */
|
||||
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
&rctx->inbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->inbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->inbuf.size = SE_AES_BUFLEN;
|
||||
|
||||
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
&rctx->outbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->outbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto outbuf_err;
|
||||
}
|
||||
|
||||
rctx->outbuf.size = SE_AES_BUFLEN;
|
||||
|
||||
rctx->src_sg = req->src;
|
||||
rctx->dst_sg = req->dst;
|
||||
rctx->assoclen = req->assoclen;
|
||||
@ -1245,6 +1287,32 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
|
||||
rctx->iv[3] = (1 << 24);
|
||||
|
||||
rctx->key_id = ctx->key_id;
|
||||
|
||||
/* Allocate buffers required */
|
||||
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
|
||||
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
&rctx->inbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->inbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_finalize;
|
||||
}
|
||||
|
||||
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
|
||||
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
|
||||
&rctx->outbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->outbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free_inbuf;
|
||||
}
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If there is associated data perform GMAC operation */
|
||||
if (rctx->assoclen) {
|
||||
ret = tegra_gcm_do_gmac(ctx, rctx);
|
||||
@ -1268,14 +1336,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
ret = tegra_gcm_do_verify(ctx->se, rctx);
|
||||
|
||||
out:
|
||||
dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
|
||||
rctx->outbuf.buf, rctx->outbuf.addr);
|
||||
|
||||
outbuf_err:
|
||||
dma_free_coherent(ctx->se->dev, SE_AES_BUFLEN,
|
||||
out_free_inbuf:
|
||||
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
|
||||
rctx->inbuf.buf, rctx->inbuf.addr);
|
||||
|
||||
/* Finalize the request if there are no errors */
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
out_finalize:
|
||||
crypto_finalize_aead_request(ctx->se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@ -1297,6 +1368,7 @@ static int tegra_aead_cra_init(struct crypto_aead *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@ -1378,13 +1450,20 @@ static int tegra_aead_setkey(struct crypto_aead *tfm,
|
||||
const u8 *key, u32 keylen)
|
||||
{
|
||||
struct tegra_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int tegra_cmac_prep_cmd(struct tegra_cmac_ctx *ctx,
|
||||
@ -1458,6 +1537,35 @@ static void tegra_cmac_paste_result(struct tegra_se *se, struct tegra_cmac_reqct
|
||||
se->base + se->hw->regs->result + (i * 4));
|
||||
}
|
||||
|
||||
static int tegra_cmac_do_init(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int i;
|
||||
|
||||
rctx->total_len = 0;
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->residue.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
rctx->task |= SHA_FIRST;
|
||||
rctx->blk_size = crypto_ahash_blocksize(tfm);
|
||||
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rctx->residue.size = 0;
|
||||
|
||||
/* Clear any previous result */
|
||||
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
|
||||
@ -1485,7 +1593,7 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(ctx->key_id);
|
||||
rctx->crypto_config = SE_AES_KEY_INDEX(rctx->key_id);
|
||||
|
||||
/*
|
||||
* Keep one block and residue bytes in residue and
|
||||
@ -1499,6 +1607,11 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@ -1513,23 +1626,19 @@ static int tegra_cmac_do_update(struct ahash_request *req)
|
||||
rctx->residue.size = nresidue;
|
||||
|
||||
/*
|
||||
* If this is not the first 'update' call, paste the previous copied
|
||||
* If this is not the first task, paste the previous copied
|
||||
* intermediate results to the registers so that it gets picked up.
|
||||
* This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_cmac_paste_result(ctx->se, rctx);
|
||||
|
||||
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
* from the registers so that it can be used in the next 'update'
|
||||
* call. This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_cmac_copy_result(ctx->se, rctx);
|
||||
tegra_cmac_copy_result(ctx->se, rctx);
|
||||
|
||||
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1545,17 +1654,34 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
|
||||
if (!req->nbytes && !rctx->total_len && ctx->fallback_tfm) {
|
||||
return crypto_shash_tfm_digest(ctx->fallback_tfm,
|
||||
rctx->datbuf.buf, 0, req->result);
|
||||
NULL, 0, req->result);
|
||||
}
|
||||
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
rctx->config = tegra234_aes_cfg(SE_ALG_CMAC, 0);
|
||||
|
||||
/*
|
||||
* If this is not the first task, paste the previous copied
|
||||
* intermediate results to the registers so that it gets picked up.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_cmac_paste_result(ctx->se, rctx);
|
||||
|
||||
/* Prepare command and submit */
|
||||
cmdlen = tegra_cmac_prep_cmd(ctx, rctx);
|
||||
ret = tegra_se_host1x_submit(se, cmdlen);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, cmdlen);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -1567,8 +1693,10 @@ static int tegra_cmac_do_final(struct ahash_request *req)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm) * 2,
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
return ret;
|
||||
@ -1581,17 +1709,41 @@ static int tegra_cmac_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (rctx->task & SHA_INIT) {
|
||||
ret = tegra_cmac_do_init(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_INIT;
|
||||
}
|
||||
|
||||
if (!ctx->key_id) {
|
||||
ret = tegra_key_submit_reserved_aes(ctx->se, ctx->key,
|
||||
ctx->keylen, ctx->alg, &rctx->key_id);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_cmac_do_update(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_UPDATE;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_FINAL) {
|
||||
ret = tegra_cmac_do_final(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
out:
|
||||
if (tegra_key_is_reserved(rctx->key_id))
|
||||
tegra_key_invalidate_reserved(ctx->se, rctx->key_id, ctx->alg);
|
||||
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
@ -1633,6 +1785,7 @@ static int tegra_cmac_cra_init(struct crypto_tfm *tfm)
|
||||
|
||||
ctx->se = se_alg->se_dev;
|
||||
ctx->key_id = 0;
|
||||
ctx->keylen = 0;
|
||||
|
||||
ret = se_algname_to_algid(algname);
|
||||
if (ret < 0) {
|
||||
@ -1657,51 +1810,11 @@ static void tegra_cmac_cra_exit(struct crypto_tfm *tfm)
|
||||
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
|
||||
}
|
||||
|
||||
static int tegra_cmac_init(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
int i;
|
||||
|
||||
rctx->total_len = 0;
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->residue.size = 0;
|
||||
rctx->task = SHA_FIRST;
|
||||
rctx->blk_size = crypto_ahash_blocksize(tfm);
|
||||
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
|
||||
rctx->residue.size = 0;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
rctx->datbuf.size = 0;
|
||||
|
||||
/* Clear any previous result */
|
||||
for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
|
||||
writel(0, se->base + se->hw->regs->result + (i * 4));
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen)) {
|
||||
dev_dbg(ctx->se->dev, "invalid key length (%d)\n", keylen);
|
||||
@ -1711,7 +1824,24 @@ static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
if (ctx->fallback_tfm)
|
||||
crypto_shash_setkey(ctx->fallback_tfm, key, keylen);
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret) {
|
||||
ctx->keylen = keylen;
|
||||
memcpy(ctx->key, key, keylen);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_cmac_init(struct ahash_request *req)
|
||||
{
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
rctx->task = SHA_INIT;
|
||||
|
||||
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
|
||||
static int tegra_cmac_update(struct ahash_request *req)
|
||||
@ -1753,8 +1883,7 @@ static int tegra_cmac_digest(struct ahash_request *req)
|
||||
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
|
||||
|
||||
tegra_cmac_init(req);
|
||||
rctx->task |= SHA_UPDATE | SHA_FINAL;
|
||||
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
|
||||
|
||||
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ struct tegra_sha_reqctx {
|
||||
struct tegra_se_datbuf datbuf;
|
||||
struct tegra_se_datbuf residue;
|
||||
struct tegra_se_datbuf digest;
|
||||
struct tegra_se_datbuf intr_res;
|
||||
unsigned int alg;
|
||||
unsigned int config;
|
||||
unsigned int total_len;
|
||||
@ -211,9 +212,62 @@ static int tegra_sha_fallback_export(struct ahash_request *req, void *out)
|
||||
return crypto_ahash_export(&rctx->fallback_req, out);
|
||||
}
|
||||
|
||||
static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
|
||||
static int tegra_se_insert_hash_result(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
|
||||
struct tegra_sha_reqctx *rctx)
|
||||
{
|
||||
__be32 *res_be = (__be32 *)rctx->intr_res.buf;
|
||||
u32 *res = (u32 *)rctx->intr_res.buf;
|
||||
int i = 0, j;
|
||||
|
||||
cpuvaddr[i++] = 0;
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(HASH_RESULT_REG_COUNT);
|
||||
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_HASH_RESULT);
|
||||
|
||||
for (j = 0; j < HASH_RESULT_REG_COUNT; j++) {
|
||||
int idx = j;
|
||||
|
||||
/*
|
||||
* The initial, intermediate and final hash value of SHA-384, SHA-512
|
||||
* in SHA_HASH_RESULT registers follow the below layout of bytes.
|
||||
*
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_0 | B4...B7 |
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_1 | B0...B3 |
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_2 | B12...B15 |
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_3 | B8...B11 |
|
||||
* +---------------+------------+
|
||||
* | ...... |
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_14| B60...B63 |
|
||||
* +---------------+------------+
|
||||
* | HASH_RESULT_15| B56...B59 |
|
||||
* +---------------+------------+
|
||||
*
|
||||
*/
|
||||
if (ctx->alg == SE_ALG_SHA384 || ctx->alg == SE_ALG_SHA512)
|
||||
idx = (j % 2) ? j - 1 : j + 1;
|
||||
|
||||
/* For SHA-1, SHA-224, SHA-256, SHA-384, SHA-512 the initial
|
||||
* intermediate and final hash value when stored in
|
||||
* SHA_HASH_RESULT registers, the byte order is NOT in
|
||||
* little-endian.
|
||||
*/
|
||||
if (ctx->alg <= SE_ALG_SHA512)
|
||||
cpuvaddr[i++] = be32_to_cpu(res_be[idx]);
|
||||
else
|
||||
cpuvaddr[i++] = res[idx];
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static int tegra_sha_prep_cmd(struct tegra_sha_ctx *ctx, u32 *cpuvaddr,
|
||||
struct tegra_sha_reqctx *rctx)
|
||||
{
|
||||
struct tegra_se *se = ctx->se;
|
||||
u64 msg_len, msg_left;
|
||||
int i = 0;
|
||||
|
||||
@ -241,7 +295,7 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
|
||||
cpuvaddr[i++] = upper_32_bits(msg_left);
|
||||
cpuvaddr[i++] = 0;
|
||||
cpuvaddr[i++] = 0;
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(6);
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(2);
|
||||
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_CFG);
|
||||
cpuvaddr[i++] = rctx->config;
|
||||
|
||||
@ -249,15 +303,29 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
|
||||
cpuvaddr[i++] = SE_SHA_TASK_HASH_INIT;
|
||||
rctx->task &= ~SHA_FIRST;
|
||||
} else {
|
||||
cpuvaddr[i++] = 0;
|
||||
/*
|
||||
* If it isn't the first task, program the HASH_RESULT register
|
||||
* with the intermediate result from the previous task
|
||||
*/
|
||||
i += tegra_se_insert_hash_result(ctx, cpuvaddr + i, rctx);
|
||||
}
|
||||
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(4);
|
||||
cpuvaddr[i++] = se_host1x_opcode_incr_w(SE_SHA_IN_ADDR);
|
||||
cpuvaddr[i++] = rctx->datbuf.addr;
|
||||
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->datbuf.addr)) |
|
||||
SE_ADDR_HI_SZ(rctx->datbuf.size));
|
||||
cpuvaddr[i++] = rctx->digest.addr;
|
||||
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
|
||||
SE_ADDR_HI_SZ(rctx->digest.size));
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
cpuvaddr[i++] = rctx->intr_res.addr;
|
||||
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->intr_res.addr)) |
|
||||
SE_ADDR_HI_SZ(rctx->intr_res.size));
|
||||
} else {
|
||||
cpuvaddr[i++] = rctx->digest.addr;
|
||||
cpuvaddr[i++] = (u32)(SE_ADDR_HI_MSB(upper_32_bits(rctx->digest.addr)) |
|
||||
SE_ADDR_HI_SZ(rctx->digest.size));
|
||||
}
|
||||
|
||||
if (rctx->key_id) {
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(1);
|
||||
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_CRYPTO_CFG);
|
||||
@ -266,42 +334,72 @@ static int tegra_sha_prep_cmd(struct tegra_se *se, u32 *cpuvaddr,
|
||||
|
||||
cpuvaddr[i++] = host1x_opcode_setpayload(1);
|
||||
cpuvaddr[i++] = se_host1x_opcode_nonincr_w(SE_SHA_OPERATION);
|
||||
cpuvaddr[i++] = SE_SHA_OP_WRSTALL |
|
||||
SE_SHA_OP_START |
|
||||
cpuvaddr[i++] = SE_SHA_OP_WRSTALL | SE_SHA_OP_START |
|
||||
SE_SHA_OP_LASTBUF;
|
||||
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
|
||||
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
|
||||
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
|
||||
|
||||
dev_dbg(se->dev, "msg len %llu msg left %llu cfg %#x",
|
||||
msg_len, msg_left, rctx->config);
|
||||
dev_dbg(se->dev, "msg len %llu msg left %llu sz %zd cfg %#x",
|
||||
msg_len, msg_left, rctx->datbuf.size, rctx->config);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static void tegra_sha_copy_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
|
||||
static int tegra_sha_do_init(struct ahash_request *req)
|
||||
{
|
||||
int i;
|
||||
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
|
||||
for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
|
||||
rctx->result[i] = readl(se->base + se->hw->regs->result + (i * 4));
|
||||
}
|
||||
if (ctx->fallback)
|
||||
return tegra_sha_fallback_init(req);
|
||||
|
||||
static void tegra_sha_paste_hash_result(struct tegra_se *se, struct tegra_sha_reqctx *rctx)
|
||||
{
|
||||
int i;
|
||||
rctx->total_len = 0;
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->residue.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
rctx->task |= SHA_FIRST;
|
||||
rctx->alg = ctx->alg;
|
||||
rctx->blk_size = crypto_ahash_blocksize(tfm);
|
||||
rctx->digest.size = crypto_ahash_digestsize(tfm);
|
||||
|
||||
for (i = 0; i < HASH_RESULT_REG_COUNT; i++)
|
||||
writel(rctx->result[i],
|
||||
se->base + se->hw->regs->result + (i * 4));
|
||||
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
|
||||
&rctx->digest.addr, GFP_KERNEL);
|
||||
if (!rctx->digest.buf)
|
||||
goto digbuf_fail;
|
||||
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
|
||||
rctx->intr_res.size = HASH_RESULT_REG_COUNT * 4;
|
||||
rctx->intr_res.buf = dma_alloc_coherent(se->dev, rctx->intr_res.size,
|
||||
&rctx->intr_res.addr, GFP_KERNEL);
|
||||
if (!rctx->intr_res.buf)
|
||||
goto intr_res_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
intr_res_fail:
|
||||
dma_free_coherent(se->dev, rctx->residue.size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
rctx->digest.addr);
|
||||
digbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int tegra_sha_do_update(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
|
||||
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct tegra_se *se = ctx->se;
|
||||
unsigned int nblks, nresidue, size, ret;
|
||||
u32 *cpuvaddr = ctx->se->cmdbuf->addr;
|
||||
u32 *cpuvaddr = se->cmdbuf->addr;
|
||||
|
||||
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
|
||||
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
|
||||
@ -317,7 +415,6 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
|
||||
rctx->src_sg = req->src;
|
||||
rctx->datbuf.size = (req->nbytes + rctx->residue.size) - nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
|
||||
/*
|
||||
* If nbytes are less than a block size, copy it residue and
|
||||
@ -326,11 +423,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
if (nblks < 1) {
|
||||
scatterwalk_map_and_copy(rctx->residue.buf + rctx->residue.size,
|
||||
rctx->src_sg, 0, req->nbytes, 0);
|
||||
|
||||
rctx->residue.size += req->nbytes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Copy the previous residue first */
|
||||
if (rctx->residue.size)
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
@ -343,29 +445,16 @@ static int tegra_sha_do_update(struct ahash_request *req)
|
||||
|
||||
/* Update residue value with the residue after current block */
|
||||
rctx->residue.size = nresidue;
|
||||
rctx->total_len += rctx->datbuf.size;
|
||||
|
||||
rctx->config = tegra_sha_get_config(rctx->alg) |
|
||||
SE_SHA_DST_HASH_REG;
|
||||
SE_SHA_DST_MEMORY;
|
||||
|
||||
/*
|
||||
* If this is not the first 'update' call, paste the previous copied
|
||||
* intermediate results to the registers so that it gets picked up.
|
||||
* This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FIRST))
|
||||
tegra_sha_paste_hash_result(ctx->se, rctx);
|
||||
size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
|
||||
size = tegra_sha_prep_cmd(ctx->se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(ctx->se, size);
|
||||
|
||||
/*
|
||||
* If this is not the final update, copy the intermediate results
|
||||
* from the registers so that it can be used in the next 'update'
|
||||
* call. This is to support the import/export functionality.
|
||||
*/
|
||||
if (!(rctx->task & SHA_FINAL))
|
||||
tegra_sha_copy_hash_result(ctx->se, rctx);
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -379,16 +468,25 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
u32 *cpuvaddr = se->cmdbuf->addr;
|
||||
int size, ret = 0;
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
if (rctx->residue.size) {
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->residue.size,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
memcpy(rctx->datbuf.buf, rctx->residue.buf, rctx->residue.size);
|
||||
}
|
||||
|
||||
rctx->datbuf.size = rctx->residue.size;
|
||||
rctx->total_len += rctx->residue.size;
|
||||
|
||||
rctx->config = tegra_sha_get_config(rctx->alg) |
|
||||
SE_SHA_DST_MEMORY;
|
||||
|
||||
size = tegra_sha_prep_cmd(se, cpuvaddr, rctx);
|
||||
|
||||
ret = tegra_se_host1x_submit(se, size);
|
||||
size = tegra_sha_prep_cmd(ctx, cpuvaddr, rctx);
|
||||
ret = tegra_se_host1x_submit(se, se->cmdbuf, size);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -396,12 +494,18 @@ static int tegra_sha_do_final(struct ahash_request *req)
|
||||
memcpy(req->result, rctx->digest.buf, rctx->digest.size);
|
||||
|
||||
out:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
if (rctx->residue.size)
|
||||
dma_free_coherent(se->dev, rctx->datbuf.size,
|
||||
rctx->datbuf.buf, rctx->datbuf.addr);
|
||||
out_free:
|
||||
dma_free_coherent(se->dev, crypto_ahash_blocksize(tfm),
|
||||
rctx->residue.buf, rctx->residue.addr);
|
||||
dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
|
||||
rctx->digest.addr);
|
||||
|
||||
dma_free_coherent(se->dev, rctx->intr_res.size, rctx->intr_res.buf,
|
||||
rctx->intr_res.addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -414,16 +518,31 @@ static int tegra_sha_do_one_req(struct crypto_engine *engine, void *areq)
|
||||
struct tegra_se *se = ctx->se;
|
||||
int ret = 0;
|
||||
|
||||
if (rctx->task & SHA_INIT) {
|
||||
ret = tegra_sha_do_init(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_INIT;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_UPDATE) {
|
||||
ret = tegra_sha_do_update(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_UPDATE;
|
||||
}
|
||||
|
||||
if (rctx->task & SHA_FINAL) {
|
||||
ret = tegra_sha_do_final(req);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
rctx->task &= ~SHA_FINAL;
|
||||
}
|
||||
|
||||
out:
|
||||
crypto_finalize_hash_request(se->engine, req, ret);
|
||||
|
||||
return 0;
|
||||
@ -497,52 +616,6 @@ static void tegra_sha_cra_exit(struct crypto_tfm *tfm)
|
||||
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
|
||||
}
|
||||
|
||||
static int tegra_sha_init(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
struct tegra_se *se = ctx->se;
|
||||
|
||||
if (ctx->fallback)
|
||||
return tegra_sha_fallback_init(req);
|
||||
|
||||
rctx->total_len = 0;
|
||||
rctx->datbuf.size = 0;
|
||||
rctx->residue.size = 0;
|
||||
rctx->key_id = ctx->key_id;
|
||||
rctx->task = SHA_FIRST;
|
||||
rctx->alg = ctx->alg;
|
||||
rctx->blk_size = crypto_ahash_blocksize(tfm);
|
||||
rctx->digest.size = crypto_ahash_digestsize(tfm);
|
||||
|
||||
rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
|
||||
&rctx->digest.addr, GFP_KERNEL);
|
||||
if (!rctx->digest.buf)
|
||||
goto digbuf_fail;
|
||||
|
||||
rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
|
||||
&rctx->residue.addr, GFP_KERNEL);
|
||||
if (!rctx->residue.buf)
|
||||
goto resbuf_fail;
|
||||
|
||||
rctx->datbuf.buf = dma_alloc_coherent(se->dev, SE_SHA_BUFLEN,
|
||||
&rctx->datbuf.addr, GFP_KERNEL);
|
||||
if (!rctx->datbuf.buf)
|
||||
goto datbuf_fail;
|
||||
|
||||
return 0;
|
||||
|
||||
datbuf_fail:
|
||||
dma_free_coherent(se->dev, rctx->blk_size, rctx->residue.buf,
|
||||
rctx->residue.addr);
|
||||
resbuf_fail:
|
||||
dma_free_coherent(se->dev, SE_SHA_BUFLEN, rctx->datbuf.buf,
|
||||
rctx->datbuf.addr);
|
||||
digbuf_fail:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
@ -559,13 +632,29 @@ static int tegra_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||
unsigned int keylen)
|
||||
{
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
int ret;
|
||||
|
||||
if (aes_check_keylen(keylen))
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ret = tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
if (ret)
|
||||
return tegra_hmac_fallback_setkey(ctx, key, keylen);
|
||||
|
||||
ctx->fallback = false;
|
||||
|
||||
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra_sha_init(struct ahash_request *req)
|
||||
{
|
||||
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
|
||||
|
||||
rctx->task = SHA_INIT;
|
||||
|
||||
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
|
||||
static int tegra_sha_update(struct ahash_request *req)
|
||||
@ -619,8 +708,7 @@ static int tegra_sha_digest(struct ahash_request *req)
|
||||
if (ctx->fallback)
|
||||
return tegra_sha_fallback_digest(req);
|
||||
|
||||
tegra_sha_init(req);
|
||||
rctx->task |= SHA_UPDATE | SHA_FINAL;
|
||||
rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
|
||||
|
||||
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
|
||||
}
|
||||
|
@ -115,11 +115,17 @@ static int tegra_key_insert(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u16 slot, u32 alg)
|
||||
{
|
||||
const u32 *keyval = (u32 *)key;
|
||||
u32 *addr = se->cmdbuf->addr, size;
|
||||
u32 *addr = se->keybuf->addr, size;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kslt_lock);
|
||||
|
||||
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
|
||||
ret = tegra_se_host1x_submit(se, se->keybuf, size);
|
||||
|
||||
return tegra_se_host1x_submit(se, size);
|
||||
mutex_unlock(&kslt_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
@ -135,6 +141,23 @@ void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
tegra_keyslot_free(keyid);
|
||||
}
|
||||
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg)
|
||||
{
|
||||
u8 zkey[AES_MAX_KEY_SIZE] = {0};
|
||||
|
||||
if (!keyid)
|
||||
return;
|
||||
|
||||
/* Overwrite the key with 0s */
|
||||
tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
|
||||
}
|
||||
|
||||
inline int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
return tegra_key_insert(se, key, keylen, *keyid, alg);
|
||||
}
|
||||
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
int ret;
|
||||
@ -143,7 +166,7 @@ int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u3
|
||||
if (!tegra_key_in_kslt(*keyid)) {
|
||||
*keyid = tegra_keyslot_alloc();
|
||||
if (!(*keyid)) {
|
||||
dev_err(se->dev, "failed to allocate key slot\n");
|
||||
dev_dbg(se->dev, "failed to allocate key slot\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +141,7 @@ static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssi
|
||||
return cmdbuf;
|
||||
}
|
||||
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size)
|
||||
{
|
||||
struct host1x_job *job;
|
||||
int ret;
|
||||
@ -160,9 +160,9 @@ int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
|
||||
job->engine_fallback_streamid = se->stream_id;
|
||||
job->engine_streamid_offset = SE_STREAM_ID;
|
||||
|
||||
se->cmdbuf->words = size;
|
||||
cmdbuf->words = size;
|
||||
|
||||
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
|
||||
host1x_job_add_gather(job, &cmdbuf->bo, size, 0);
|
||||
|
||||
ret = host1x_job_pin(job, se->dev);
|
||||
if (ret) {
|
||||
@ -220,14 +220,22 @@ static int tegra_se_client_init(struct host1x_client *client)
|
||||
goto syncpt_put;
|
||||
}
|
||||
|
||||
se->keybuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
|
||||
if (!se->keybuf) {
|
||||
ret = -ENOMEM;
|
||||
goto cmdbuf_put;
|
||||
}
|
||||
|
||||
ret = se->hw->init_alg(se);
|
||||
if (ret) {
|
||||
dev_err(se->dev, "failed to register algorithms\n");
|
||||
goto cmdbuf_put;
|
||||
goto keybuf_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
keybuf_put:
|
||||
tegra_se_cmdbuf_put(&se->keybuf->bo);
|
||||
cmdbuf_put:
|
||||
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
|
||||
syncpt_put:
|
||||
@ -312,7 +320,6 @@ static int tegra_se_probe(struct platform_device *pdev)
|
||||
|
||||
ret = tegra_se_host1x_register(se);
|
||||
if (ret) {
|
||||
crypto_engine_stop(se->engine);
|
||||
crypto_engine_exit(se->engine);
|
||||
return dev_err_probe(dev, ret, "failed to init host1x params\n");
|
||||
}
|
||||
@ -324,7 +331,6 @@ static void tegra_se_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra_se *se = platform_get_drvdata(pdev);
|
||||
|
||||
crypto_engine_stop(se->engine);
|
||||
crypto_engine_exit(se->engine);
|
||||
host1x_client_unregister(&se->client);
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#define SE_STREAM_ID 0x90
|
||||
|
||||
#define SE_SHA_CFG 0x4004
|
||||
#define SE_SHA_IN_ADDR 0x400c
|
||||
#define SE_SHA_KEY_ADDR 0x4094
|
||||
#define SE_SHA_KEY_DATA 0x4098
|
||||
#define SE_SHA_KEYMANIFEST 0x409c
|
||||
@ -340,12 +341,14 @@
|
||||
#define SE_CRYPTO_CTR_REG_COUNT 4
|
||||
#define SE_MAX_KEYSLOT 15
|
||||
#define SE_MAX_MEM_ALLOC SZ_4M
|
||||
#define SE_AES_BUFLEN 0x8000
|
||||
#define SE_SHA_BUFLEN 0x2000
|
||||
|
||||
#define TEGRA_AES_RESERVED_KSLT 14
|
||||
#define TEGRA_XTS_RESERVED_KSLT 15
|
||||
|
||||
#define SHA_FIRST BIT(0)
|
||||
#define SHA_UPDATE BIT(1)
|
||||
#define SHA_FINAL BIT(2)
|
||||
#define SHA_INIT BIT(1)
|
||||
#define SHA_UPDATE BIT(2)
|
||||
#define SHA_FINAL BIT(3)
|
||||
|
||||
/* Security Engine operation modes */
|
||||
enum se_aes_alg {
|
||||
@ -420,6 +423,7 @@ struct tegra_se {
|
||||
struct host1x_client client;
|
||||
struct host1x_channel *channel;
|
||||
struct tegra_se_cmdbuf *cmdbuf;
|
||||
struct tegra_se_cmdbuf *keybuf;
|
||||
struct crypto_engine *engine;
|
||||
struct host1x_syncpt *syncpt;
|
||||
struct device *dev;
|
||||
@ -501,8 +505,33 @@ void tegra_deinit_aes(struct tegra_se *se);
|
||||
void tegra_deinit_hash(struct tegra_se *se);
|
||||
int tegra_key_submit(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
|
||||
int tegra_key_submit_reserved(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid);
|
||||
|
||||
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
|
||||
void tegra_key_invalidate_reserved(struct tegra_se *se, u32 keyid, u32 alg);
|
||||
int tegra_se_host1x_submit(struct tegra_se *se, struct tegra_se_cmdbuf *cmdbuf, u32 size);
|
||||
|
||||
static inline int tegra_key_submit_reserved_aes(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_AES_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline int tegra_key_submit_reserved_xts(struct tegra_se *se, const u8 *key,
|
||||
u32 keylen, u32 alg, u32 *keyid)
|
||||
{
|
||||
*keyid = TEGRA_XTS_RESERVED_KSLT;
|
||||
return tegra_key_submit_reserved(se, key, keylen, alg, keyid);
|
||||
}
|
||||
|
||||
static inline bool tegra_key_is_reserved(u32 keyid)
|
||||
{
|
||||
return ((keyid == TEGRA_AES_RESERVED_KSLT) ||
|
||||
(keyid == TEGRA_XTS_RESERVED_KSLT));
|
||||
}
|
||||
|
||||
/* HOST1x OPCODES */
|
||||
static inline u32 host1x_opcode_setpayload(unsigned int payload)
|
||||
|
@ -334,7 +334,7 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
if (!ubuf)
|
||||
return -ENOMEM;
|
||||
|
||||
pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
|
||||
for (i = 0; i < head->count; i++) {
|
||||
if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
|
||||
goto err;
|
||||
|
@ -667,8 +667,7 @@ static int ibmveth_close(struct net_device *netdev)
|
||||
|
||||
napi_disable(&adapter->napi);
|
||||
|
||||
if (!adapter->pool_config)
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
||||
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
|
||||
|
||||
@ -776,9 +775,7 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
|
||||
|
||||
if (netif_running(dev)) {
|
||||
restart = 1;
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(dev);
|
||||
adapter->pool_config = 0;
|
||||
}
|
||||
|
||||
set_attr = 0;
|
||||
@ -860,9 +857,7 @@ static int ibmveth_set_tso(struct net_device *dev, u32 data)
|
||||
|
||||
if (netif_running(dev)) {
|
||||
restart = 1;
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(dev);
|
||||
adapter->pool_config = 0;
|
||||
}
|
||||
|
||||
set_attr = 0;
|
||||
@ -1512,9 +1507,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
|
||||
only the buffer pools necessary to hold the new MTU */
|
||||
if (netif_running(adapter->netdev)) {
|
||||
need_restart = 1;
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(adapter->netdev);
|
||||
adapter->pool_config = 0;
|
||||
}
|
||||
|
||||
/* Look for an active buffer pool that can hold the new MTU */
|
||||
@ -1678,7 +1671,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
||||
adapter->vdev = dev;
|
||||
adapter->netdev = netdev;
|
||||
adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
|
||||
adapter->pool_config = 0;
|
||||
ibmveth_init_link_settings(netdev);
|
||||
|
||||
netif_napi_add_weight(netdev, &adapter->napi, ibmveth_poll, 16);
|
||||
@ -1810,20 +1802,22 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
|
||||
long value = simple_strtol(buf, NULL, 10);
|
||||
long rc;
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
if (attr == &veth_active_attr) {
|
||||
if (value && !pool->active) {
|
||||
if (netif_running(netdev)) {
|
||||
if (ibmveth_alloc_buffer_pool(pool)) {
|
||||
netdev_err(netdev,
|
||||
"unable to alloc pool\n");
|
||||
return -ENOMEM;
|
||||
rc = -ENOMEM;
|
||||
goto unlock_err;
|
||||
}
|
||||
pool->active = 1;
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
rc = ibmveth_open(netdev);
|
||||
if (rc)
|
||||
goto unlock_err;
|
||||
} else {
|
||||
pool->active = 1;
|
||||
}
|
||||
@ -1843,54 +1837,59 @@ static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
|
||||
|
||||
if (i == IBMVETH_NUM_BUFF_POOLS) {
|
||||
netdev_err(netdev, "no active pool >= MTU\n");
|
||||
return -EPERM;
|
||||
rc = -EPERM;
|
||||
goto unlock_err;
|
||||
}
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
pool->active = 0;
|
||||
adapter->pool_config = 0;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
rc = ibmveth_open(netdev);
|
||||
if (rc)
|
||||
goto unlock_err;
|
||||
}
|
||||
pool->active = 0;
|
||||
}
|
||||
} else if (attr == &veth_num_attr) {
|
||||
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
|
||||
return -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto unlock_err;
|
||||
} else {
|
||||
if (netif_running(netdev)) {
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
pool->size = value;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
rc = ibmveth_open(netdev);
|
||||
if (rc)
|
||||
goto unlock_err;
|
||||
} else {
|
||||
pool->size = value;
|
||||
}
|
||||
}
|
||||
} else if (attr == &veth_size_attr) {
|
||||
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
|
||||
return -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto unlock_err;
|
||||
} else {
|
||||
if (netif_running(netdev)) {
|
||||
adapter->pool_config = 1;
|
||||
ibmveth_close(netdev);
|
||||
adapter->pool_config = 0;
|
||||
pool->buff_size = value;
|
||||
if ((rc = ibmveth_open(netdev)))
|
||||
return rc;
|
||||
rc = ibmveth_open(netdev);
|
||||
if (rc)
|
||||
goto unlock_err;
|
||||
} else {
|
||||
pool->buff_size = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
/* kick the interrupt handler to allocate/deallocate pools */
|
||||
ibmveth_interrupt(netdev->irq, netdev);
|
||||
return count;
|
||||
|
||||
unlock_err:
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
|
@ -147,7 +147,6 @@ struct ibmveth_adapter {
|
||||
dma_addr_t filter_list_dma;
|
||||
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
|
||||
struct ibmveth_rx_q rx_queue;
|
||||
int pool_config;
|
||||
int rx_csum;
|
||||
int large_send;
|
||||
bool is_active_trunk;
|
||||
|
@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
|
||||
*/
|
||||
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
{
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* free ring buffers and the ring itself */
|
||||
idpf_ctlq_dealloc_ring_res(hw, cq);
|
||||
@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
/* Set ring_size to 0 to indicate uninitialized queue */
|
||||
cq->ring_size = 0;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
mutex_destroy(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
|
||||
|
||||
idpf_ctlq_init_regs(hw, cq, is_rxq);
|
||||
|
||||
mutex_init(&cq->cq_lock);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
list_add(&cq->cq_list, &hw->cq_list_head);
|
||||
|
||||
@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* Ensure there are enough descriptors to send all messages */
|
||||
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
|
||||
@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
wr32(hw, cq->reg.tail, cq->next_to_use);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
if (*clean_count > cq->ring_size)
|
||||
return -EBADR;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* Return number of descriptors actually cleaned */
|
||||
*clean_count = i;
|
||||
@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
if (*buff_count > 0)
|
||||
buffs_avail = true;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
if (tbp >= cq->ring_size)
|
||||
tbp = 0;
|
||||
@ -521,7 +520,7 @@ post_buffs_out:
|
||||
wr32(hw, cq->reg.tail, cq->next_to_post);
|
||||
}
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* return the number of buffers that were not posted */
|
||||
*buff_count = *buff_count - i;
|
||||
@ -549,7 +548,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
u16 i;
|
||||
|
||||
/* take the lock before we start messing with the ring */
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -608,7 +607,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
*num_q_msg = i;
|
||||
if (*num_q_msg == 0)
|
||||
|
@ -99,7 +99,7 @@ struct idpf_ctlq_info {
|
||||
|
||||
enum idpf_ctlq_type cq_type;
|
||||
int q_id;
|
||||
struct mutex cq_lock; /* control queue lock */
|
||||
spinlock_t cq_lock; /* control queue lock */
|
||||
/* used for interrupt processing */
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
|
@ -2321,8 +2321,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
size_t sz = ALIGN(size, 4096);
|
||||
|
||||
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
/* The control queue resources are freed under a spinlock, contiguous
|
||||
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
|
||||
* dma_free_*() path.
|
||||
*/
|
||||
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
|
||||
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = sz;
|
||||
|
||||
return mem->va;
|
||||
@ -2337,8 +2341,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
|
||||
{
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
|
||||
dma_free_coherent(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa);
|
||||
dma_free_attrs(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = 0;
|
||||
mem->va = NULL;
|
||||
mem->pa = 0;
|
||||
|
@ -1066,7 +1066,7 @@ int cifs_close(struct inode *inode, struct file *file)
|
||||
if ((cfile->status_file_deleted == false) &&
|
||||
(smb2_can_defer_close(inode, dclose))) {
|
||||
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
|
||||
inode->i_ctime = inode->i_mtime = current_time(inode);
|
||||
inode->i_mtime = inode_set_ctime_current(inode);
|
||||
}
|
||||
spin_lock(&cinode->deferred_lock);
|
||||
cifs_add_deferred_close(cfile, dclose);
|
||||
@ -2634,7 +2634,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
|
||||
write_data, to - from, &offset);
|
||||
cifsFileInfo_put(open_file);
|
||||
/* Does mm or vfs already set times? */
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
|
||||
inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
|
||||
if ((bytes_written > 0) && (offset))
|
||||
rc = 0;
|
||||
else if (bytes_written < 0)
|
||||
|
@ -50,12 +50,13 @@ void cifs_fscache_fill_coherency(struct inode *inode,
|
||||
struct cifs_fscache_inode_coherency_data *cd)
|
||||
{
|
||||
struct cifsInodeInfo *cifsi = CIFS_I(inode);
|
||||
struct timespec64 ctime = inode_get_ctime(inode);
|
||||
|
||||
memset(cd, 0, sizeof(*cd));
|
||||
cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec);
|
||||
cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec);
|
||||
cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec);
|
||||
cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec);
|
||||
cd->last_change_time_sec = cpu_to_le64(ctime.tv_sec);
|
||||
cd->last_change_time_nsec = cpu_to_le32(ctime.tv_nsec);
|
||||
}
|
||||
|
||||
|
||||
|
@ -170,7 +170,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
|
||||
else
|
||||
inode->i_atime = fattr->cf_atime;
|
||||
inode->i_mtime = fattr->cf_mtime;
|
||||
inode->i_ctime = fattr->cf_ctime;
|
||||
inode_set_ctime_to_ts(inode, fattr->cf_ctime);
|
||||
inode->i_rdev = fattr->cf_rdev;
|
||||
cifs_nlink_fattr_to_inode(inode, fattr);
|
||||
inode->i_uid = fattr->cf_uid;
|
||||
@ -1902,15 +1902,24 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
|
||||
struct tcon_link *tlink;
|
||||
struct cifs_tcon *tcon;
|
||||
__u32 dosattr = 0, origattr = 0;
|
||||
struct TCP_Server_Info *server;
|
||||
struct iattr *attrs = NULL;
|
||||
__u32 dosattr = 0, origattr = 0;
|
||||
bool rehash = false;
|
||||
|
||||
cifs_dbg(FYI, "cifs_unlink, dir=0x%p, dentry=0x%p\n", dir, dentry);
|
||||
|
||||
if (unlikely(cifs_forced_shutdown(cifs_sb)))
|
||||
return -EIO;
|
||||
|
||||
/* Unhash dentry in advance to prevent any concurrent opens */
|
||||
spin_lock(&dentry->d_lock);
|
||||
if (!d_unhashed(dentry)) {
|
||||
__d_drop(dentry);
|
||||
rehash = true;
|
||||
}
|
||||
spin_unlock(&dentry->d_lock);
|
||||
|
||||
tlink = cifs_sb_tlink(cifs_sb);
|
||||
if (IS_ERR(tlink))
|
||||
return PTR_ERR(tlink);
|
||||
@ -1961,7 +1970,8 @@ psx_del_no_retry:
|
||||
cifs_drop_nlink(inode);
|
||||
}
|
||||
} else if (rc == -ENOENT) {
|
||||
d_drop(dentry);
|
||||
if (simple_positive(dentry))
|
||||
d_delete(dentry);
|
||||
} else if (rc == -EBUSY) {
|
||||
if (server->ops->rename_pending_delete) {
|
||||
rc = server->ops->rename_pending_delete(full_path,
|
||||
@ -2004,9 +2014,9 @@ out_reval:
|
||||
cifs_inode = CIFS_I(inode);
|
||||
cifs_inode->time = 0; /* will force revalidate to get info
|
||||
when needed */
|
||||
inode->i_ctime = current_time(inode);
|
||||
inode_set_ctime_current(inode);
|
||||
}
|
||||
dir->i_ctime = dir->i_mtime = current_time(dir);
|
||||
dir->i_mtime = inode_set_ctime_current(dir);
|
||||
cifs_inode = CIFS_I(dir);
|
||||
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
|
||||
unlink_out:
|
||||
@ -2014,6 +2024,8 @@ unlink_out:
|
||||
kfree(attrs);
|
||||
free_xid(xid);
|
||||
cifs_put_tlink(tlink);
|
||||
if (rehash)
|
||||
d_rehash(dentry);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2322,8 +2334,8 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
|
||||
*/
|
||||
cifsInode->time = 0;
|
||||
|
||||
d_inode(direntry)->i_ctime = inode->i_ctime = inode->i_mtime =
|
||||
current_time(inode);
|
||||
inode_set_ctime_current(d_inode(direntry));
|
||||
inode->i_mtime = inode_set_ctime_current(inode);
|
||||
|
||||
rmdir_exit:
|
||||
free_dentry_path(page);
|
||||
@ -2420,6 +2432,7 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct tcon_link *tlink;
|
||||
struct cifs_tcon *tcon;
|
||||
bool rehash = false;
|
||||
unsigned int xid;
|
||||
int rc, tmprc;
|
||||
int retry_count = 0;
|
||||
@ -2435,6 +2448,17 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
|
||||
if (unlikely(cifs_forced_shutdown(cifs_sb)))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Prevent any concurrent opens on the target by unhashing the dentry.
|
||||
* VFS already unhashes the target when renaming directories.
|
||||
*/
|
||||
if (d_is_positive(target_dentry) && !d_is_dir(target_dentry)) {
|
||||
if (!d_unhashed(target_dentry)) {
|
||||
d_drop(target_dentry);
|
||||
rehash = true;
|
||||
}
|
||||
}
|
||||
|
||||
tlink = cifs_sb_tlink(cifs_sb);
|
||||
if (IS_ERR(tlink))
|
||||
return PTR_ERR(tlink);
|
||||
@ -2474,6 +2498,8 @@ cifs_rename2(struct mnt_idmap *idmap, struct inode *source_dir,
|
||||
}
|
||||
}
|
||||
|
||||
if (!rc)
|
||||
rehash = false;
|
||||
/*
|
||||
* No-replace is the natural behavior for CIFS, so skip unlink hacks.
|
||||
*/
|
||||
@ -2532,15 +2558,19 @@ unlink_target:
|
||||
goto cifs_rename_exit;
|
||||
rc = cifs_do_rename(xid, source_dentry, from_name,
|
||||
target_dentry, to_name);
|
||||
if (!rc)
|
||||
rehash = false;
|
||||
}
|
||||
|
||||
/* force revalidate to go get info when needed */
|
||||
CIFS_I(source_dir)->time = CIFS_I(target_dir)->time = 0;
|
||||
|
||||
source_dir->i_ctime = source_dir->i_mtime = target_dir->i_ctime =
|
||||
target_dir->i_mtime = current_time(source_dir);
|
||||
source_dir->i_mtime = target_dir->i_mtime = inode_set_ctime_to_ts(source_dir,
|
||||
inode_set_ctime_current(target_dir));
|
||||
|
||||
cifs_rename_exit:
|
||||
if (rehash)
|
||||
d_rehash(target_dentry);
|
||||
kfree(info_buf_source);
|
||||
free_dentry_path(page2);
|
||||
free_dentry_path(page1);
|
||||
|
@ -1443,7 +1443,8 @@ smb2_close_getattr(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
if (file_inf.LastWriteTime)
|
||||
inode->i_mtime = cifs_NTtimeToUnix(file_inf.LastWriteTime);
|
||||
if (file_inf.ChangeTime)
|
||||
inode->i_ctime = cifs_NTtimeToUnix(file_inf.ChangeTime);
|
||||
inode_set_ctime_to_ts(inode,
|
||||
cifs_NTtimeToUnix(file_inf.ChangeTime));
|
||||
if (file_inf.LastAccessTime)
|
||||
inode->i_atime = cifs_NTtimeToUnix(file_inf.LastAccessTime);
|
||||
|
||||
|
@ -19,15 +19,9 @@ struct device;
|
||||
|
||||
/*
|
||||
* struct crypto_engine_op - crypto hardware engine operations
|
||||
* @prepare__request: do some prepare if need before handle the current request
|
||||
* @unprepare_request: undo any work done by prepare_request()
|
||||
* @do_one_request: do encryption for current request
|
||||
*/
|
||||
struct crypto_engine_op {
|
||||
int (*prepare_request)(struct crypto_engine *engine,
|
||||
void *areq);
|
||||
int (*unprepare_request)(struct crypto_engine *engine,
|
||||
void *areq);
|
||||
int (*do_one_request)(struct crypto_engine *engine,
|
||||
void *areq);
|
||||
};
|
||||
|
@ -1,3 +1,33 @@
|
||||
* Thu Aug 28 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.41.1.el9_6]
|
||||
- powerpc/pseries/iommu: Fix kmemleak in TCE table userspace view (Mamatha Inamdar) [RHEL-107002]
|
||||
- net: ibmveth: make veth_pool_store stop hanging (Mamatha Inamdar) [RHEL-109494]
|
||||
- ibmveth: Always stop tx queues during close (Mamatha Inamdar) [RHEL-109494]
|
||||
- smb: client: fix race with concurrent opens in rename(2) (Paulo Alcantara) [RHEL-109723]
|
||||
- smb: client: fix race with concurrent opens in unlink(2) (Paulo Alcantara) [RHEL-109723]
|
||||
- smb: convert to ctime accessor functions (Paulo Alcantara) [RHEL-109723]
|
||||
- crypto: tegra - Fix IV usage for AES ECB (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Fix format specifier in tegra_sha_prep_cmd() (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Use HMAC fallback when keyslots are full (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Reserve keyslots to allocate dynamically (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Set IV to NULL explicitly for AES ECB (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Fix CMAC intermediate result handling (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Fix HASH intermediate result handling (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Transfer HASH init function to crypto engine (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - check return value for hash do_one_req (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - finalize crypto req on error (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Do not use fixed size buffers (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - Use separate buffer for setkey (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - remove unneeded crypto_engine_stop() call (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - remove redundant error check on ret (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: tegra - do not transfer req when tegra init fails (Nirmala Dalvi) [RHEL-107286]
|
||||
- crypto: engine - Remove prepare/unprepare request (Nirmala Dalvi) [RHEL-107286]
|
||||
- udmabuf: fix a buf size overflow issue during udmabuf creation (CKI Backport Bot) [RHEL-99746] {CVE-2025-37803}
|
||||
Resolves: RHEL-107002, RHEL-107286, RHEL-109494, RHEL-109723, RHEL-99746
|
||||
|
||||
* Wed Aug 27 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.40.1.el9_6]
|
||||
- idpf: convert control queue mutex to a spinlock (CKI Backport Bot) [RHEL-106054] {CVE-2025-38392}
|
||||
Resolves: RHEL-106054
|
||||
|
||||
* Sat Aug 23 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.39.1.el9_6]
|
||||
- xfrm: interface: fix use-after-free after changing collect_md xfrm interface (CKI Backport Bot) [RHEL-109529] {CVE-2025-38500}
|
||||
- Merge: net: mana: Fix race of mana_hwc_post_rx_wqe and new hwc response [rhel-9.6.z] (Maxim Levitsky) [RHEL-58904]
|
||||
|
Loading…
Reference in New Issue
Block a user