mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 04:38:03 +00:00
crypto: ahash - remove support for nonzero alignmask
Currently, the ahash API checks the alignment of all key and result buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. First, since it does not apply to the message, its effect is much more limited than e.g. is the case for the alignmask for "skcipher". Second, the key and result buffers are given as virtual addresses and cannot (in general) be DMA'ed into, so drivers end up having to copy to/from them in software anyway. As a result it's easy to use memcpy() or the unaligned access helpers. The crypto_hash_walk_*() helper functions do use the alignmask to align the message. But with one exception those are only used for shash algorithms being exposed via the ahash API, not for native ahashes, and aligning the message is not required in this case, especially now that alignmask support has been removed from shash. The exception is the n2_core driver, which doesn't set an alignmask. In any case, no ahash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from ahash. The benefit is that all the code to handle "misaligned" buffers in the ahash API goes away, reducing the overhead of the ahash API. This follows the same change that was made to shash. Signed-off-by: Eric Biggers <ebiggers@google.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
54eea8e290
commit
c626910f3f
@ -235,6 +235,4 @@ Specifics Of Asynchronous HASH Transformation
|
|||||||
|
|
||||||
Some of the drivers will want to use the Generic ScatterWalk in case the
|
Some of the drivers will want to use the Generic ScatterWalk in case the
|
||||||
implementation needs to be fed separate chunks of the scatterlist which
|
implementation needs to be fed separate chunks of the scatterlist which
|
||||||
contains the input data. The buffer containing the resulting hash will
|
contains the input data.
|
||||||
always be properly aligned to .cra_alignmask so there is no need to
|
|
||||||
worry about this.
|
|
||||||
|
117
crypto/ahash.c
117
crypto/ahash.c
@ -35,21 +35,12 @@ struct ahash_request_priv {
|
|||||||
|
|
||||||
static int hash_walk_next(struct crypto_hash_walk *walk)
|
static int hash_walk_next(struct crypto_hash_walk *walk)
|
||||||
{
|
{
|
||||||
unsigned int alignmask = walk->alignmask;
|
|
||||||
unsigned int offset = walk->offset;
|
unsigned int offset = walk->offset;
|
||||||
unsigned int nbytes = min(walk->entrylen,
|
unsigned int nbytes = min(walk->entrylen,
|
||||||
((unsigned int)(PAGE_SIZE)) - offset);
|
((unsigned int)(PAGE_SIZE)) - offset);
|
||||||
|
|
||||||
walk->data = kmap_local_page(walk->pg);
|
walk->data = kmap_local_page(walk->pg);
|
||||||
walk->data += offset;
|
walk->data += offset;
|
||||||
|
|
||||||
if (offset & alignmask) {
|
|
||||||
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
|
|
||||||
|
|
||||||
if (nbytes > unaligned)
|
|
||||||
nbytes = unaligned;
|
|
||||||
}
|
|
||||||
|
|
||||||
walk->entrylen -= nbytes;
|
walk->entrylen -= nbytes;
|
||||||
return nbytes;
|
return nbytes;
|
||||||
}
|
}
|
||||||
@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
|
|||||||
|
|
||||||
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
|
||||||
{
|
{
|
||||||
unsigned int alignmask = walk->alignmask;
|
|
||||||
|
|
||||||
walk->data -= walk->offset;
|
walk->data -= walk->offset;
|
||||||
|
|
||||||
if (walk->entrylen && (walk->offset & alignmask) && !err) {
|
|
||||||
unsigned int nbytes;
|
|
||||||
|
|
||||||
walk->offset = ALIGN(walk->offset, alignmask + 1);
|
|
||||||
nbytes = min(walk->entrylen,
|
|
||||||
(unsigned int)(PAGE_SIZE - walk->offset));
|
|
||||||
if (nbytes) {
|
|
||||||
walk->entrylen -= nbytes;
|
|
||||||
walk->data += walk->offset;
|
|
||||||
return nbytes;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
kunmap_local(walk->data);
|
kunmap_local(walk->data);
|
||||||
crypto_yield(walk->flags);
|
crypto_yield(walk->flags);
|
||||||
|
|
||||||
@ -121,7 +97,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
|
|
||||||
walk->sg = req->src;
|
walk->sg = req->src;
|
||||||
walk->flags = req->base.flags;
|
walk->flags = req->base.flags;
|
||||||
|
|
||||||
@ -129,26 +104,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
|
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
|
||||||
|
|
||||||
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
|
|
||||||
unsigned int keylen)
|
|
||||||
{
|
|
||||||
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
||||||
int ret;
|
|
||||||
u8 *buffer, *alignbuffer;
|
|
||||||
unsigned long absize;
|
|
||||||
|
|
||||||
absize = keylen + alignmask;
|
|
||||||
buffer = kmalloc(absize, GFP_KERNEL);
|
|
||||||
if (!buffer)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
|
|
||||||
memcpy(alignbuffer, key, keylen);
|
|
||||||
ret = tfm->setkey(tfm, alignbuffer, keylen);
|
|
||||||
kfree_sensitive(buffer);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm)
|
|||||||
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
{
|
{
|
||||||
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
int err = tfm->setkey(tfm, key, keylen);
|
||||||
int err;
|
|
||||||
|
|
||||||
if ((unsigned long)key & alignmask)
|
|
||||||
err = ahash_setkey_unaligned(tfm, key, keylen);
|
|
||||||
else
|
|
||||||
err = tfm->setkey(tfm, key, keylen);
|
|
||||||
|
|
||||||
if (unlikely(err)) {
|
if (unlikely(err)) {
|
||||||
ahash_set_needkey(tfm);
|
ahash_set_needkey(tfm);
|
||||||
@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
|
|||||||
bool has_state)
|
bool has_state)
|
||||||
{
|
{
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
||||||
unsigned int ds = crypto_ahash_digestsize(tfm);
|
unsigned int ds = crypto_ahash_digestsize(tfm);
|
||||||
struct ahash_request *subreq;
|
struct ahash_request *subreq;
|
||||||
unsigned int subreq_size;
|
unsigned int subreq_size;
|
||||||
@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
|
|||||||
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
|
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
|
||||||
subreq_size += reqsize;
|
subreq_size += reqsize;
|
||||||
subreq_size += ds;
|
subreq_size += ds;
|
||||||
subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
|
|
||||||
|
|
||||||
flags = ahash_request_flags(req);
|
flags = ahash_request_flags(req);
|
||||||
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
|
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
|
||||||
@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
|
|||||||
ahash_request_set_callback(subreq, flags, cplt, req);
|
ahash_request_set_callback(subreq, flags, cplt, req);
|
||||||
|
|
||||||
result = (u8 *)(subreq + 1) + reqsize;
|
result = (u8 *)(subreq + 1) + reqsize;
|
||||||
result = PTR_ALIGN(result, alignmask + 1);
|
|
||||||
|
|
||||||
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
|
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
|
||||||
|
|
||||||
@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err)
|
|||||||
kfree_sensitive(subreq);
|
kfree_sensitive(subreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ahash_op_unaligned_done(void *data, int err)
|
|
||||||
{
|
|
||||||
struct ahash_request *areq = data;
|
|
||||||
|
|
||||||
if (err == -EINPROGRESS)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
/* First copy req->result into req->priv.result */
|
|
||||||
ahash_restore_req(areq, err);
|
|
||||||
|
|
||||||
out:
|
|
||||||
/* Complete the ORIGINAL request. */
|
|
||||||
ahash_request_complete(areq, err);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ahash_op_unaligned(struct ahash_request *req,
|
|
||||||
int (*op)(struct ahash_request *),
|
|
||||||
bool has_state)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = op(req->priv);
|
|
||||||
if (err == -EINPROGRESS || err == -EBUSY)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
ahash_restore_req(req, err);
|
|
||||||
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int crypto_ahash_op(struct ahash_request *req,
|
|
||||||
int (*op)(struct ahash_request *),
|
|
||||||
bool has_state)
|
|
||||||
{
|
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
||||||
unsigned long alignmask = crypto_ahash_alignmask(tfm);
|
|
||||||
int err;
|
|
||||||
|
|
||||||
if ((unsigned long)req->result & alignmask)
|
|
||||||
err = ahash_op_unaligned(req, op, has_state);
|
|
||||||
else
|
|
||||||
err = op(req);
|
|
||||||
|
|
||||||
return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
|
|
||||||
}
|
|
||||||
|
|
||||||
int crypto_ahash_final(struct ahash_request *req)
|
int crypto_ahash_final(struct ahash_request *req)
|
||||||
{
|
{
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req)
|
|||||||
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
||||||
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
|
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
|
||||||
|
|
||||||
return crypto_ahash_op(req, tfm->final, true);
|
return crypto_hash_errstat(alg, tfm->final(req));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_ahash_final);
|
EXPORT_SYMBOL_GPL(crypto_ahash_final);
|
||||||
|
|
||||||
@ -325,7 +221,7 @@ int crypto_ahash_finup(struct ahash_request *req)
|
|||||||
atomic64_add(req->nbytes, &istat->hash_tlen);
|
atomic64_add(req->nbytes, &istat->hash_tlen);
|
||||||
}
|
}
|
||||||
|
|
||||||
return crypto_ahash_op(req, tfm->finup, true);
|
return crypto_hash_errstat(alg, tfm->finup(req));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
|
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
|
||||||
|
|
||||||
@ -333,6 +229,7 @@ int crypto_ahash_digest(struct ahash_request *req)
|
|||||||
{
|
{
|
||||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||||
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
|
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
|
||||||
|
int err;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
|
||||||
struct crypto_istat_hash *istat = hash_get_stat(alg);
|
struct crypto_istat_hash *istat = hash_get_stat(alg);
|
||||||
@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||||
return crypto_hash_errstat(alg, -ENOKEY);
|
err = -ENOKEY;
|
||||||
|
else
|
||||||
|
err = tfm->digest(req);
|
||||||
|
|
||||||
return crypto_ahash_op(req, tfm->digest, false);
|
return crypto_hash_errstat(alg, err);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
|
||||||
|
|
||||||
|
@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg)
|
|||||||
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
|
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* alignmask is not useful for hashes, so it is not supported. */
|
||||||
|
if (base->cra_alignmask)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
|
||||||
@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg)
|
|||||||
if (alg->descsize > HASH_MAX_DESCSIZE)
|
if (alg->descsize > HASH_MAX_DESCSIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* alignmask is not useful for shash, so it is not supported. */
|
|
||||||
if (base->cra_alignmask)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
if ((alg->export && !alg->import) || (alg->import && !alg->export))
|
if ((alg->export && !alg->import) || (alg->import && !alg->export))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -18,15 +18,13 @@ struct crypto_hash_walk {
|
|||||||
char *data;
|
char *data;
|
||||||
|
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
unsigned int alignmask;
|
unsigned int flags;
|
||||||
|
|
||||||
struct page *pg;
|
struct page *pg;
|
||||||
unsigned int entrylen;
|
unsigned int entrylen;
|
||||||
|
|
||||||
unsigned int total;
|
unsigned int total;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
|
||||||
unsigned int flags;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ahash_instance {
|
struct ahash_instance {
|
||||||
|
@ -110,7 +110,6 @@
|
|||||||
* crypto_aead_walksize() (with the remainder going at the end), no chunk
|
* crypto_aead_walksize() (with the remainder going at the end), no chunk
|
||||||
* can cross a page boundary or a scatterlist element boundary.
|
* can cross a page boundary or a scatterlist element boundary.
|
||||||
* ahash:
|
* ahash:
|
||||||
* - The result buffer must be aligned to the algorithm's alignmask.
|
|
||||||
* - crypto_ahash_finup() must not be used unless the algorithm implements
|
* - crypto_ahash_finup() must not be used unless the algorithm implements
|
||||||
* ->finup() natively.
|
* ->finup() natively.
|
||||||
*/
|
*/
|
||||||
@ -278,18 +277,20 @@ struct compress_alg {
|
|||||||
* @cra_ctxsize: Size of the operational context of the transformation. This
|
* @cra_ctxsize: Size of the operational context of the transformation. This
|
||||||
* value informs the kernel crypto API about the memory size
|
* value informs the kernel crypto API about the memory size
|
||||||
* needed to be allocated for the transformation context.
|
* needed to be allocated for the transformation context.
|
||||||
* @cra_alignmask: Alignment mask for the input and output data buffer. The data
|
* @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
|
||||||
* buffer containing the input data for the algorithm must be
|
* 1 less than the alignment, in bytes, that the algorithm
|
||||||
* aligned to this alignment mask. The data buffer for the
|
* implementation requires for input and output buffers. When
|
||||||
* output data must be aligned to this alignment mask. Note that
|
* the crypto API is invoked with buffers that are not aligned
|
||||||
* the Crypto API will do the re-alignment in software, but
|
* to this alignment, the crypto API automatically utilizes
|
||||||
* only under special conditions and there is a performance hit.
|
* appropriately aligned temporary buffers to comply with what
|
||||||
* The re-alignment happens at these occasions for different
|
* the algorithm needs. (For scatterlists this happens only if
|
||||||
* @cra_u types: cipher -- For both input data and output data
|
* the algorithm uses the skcipher_walk helper functions.) This
|
||||||
* buffer; ahash -- For output hash destination buf; shash --
|
* misalignment handling carries a performance penalty, so it is
|
||||||
* For output hash destination buf.
|
* preferred that algorithms do not set a nonzero alignmask.
|
||||||
* This is needed on hardware which is flawed by design and
|
* Also, crypto API users may wish to allocate buffers aligned
|
||||||
* cannot pick data from arbitrary addresses.
|
* to the alignmask of the algorithm being used, in order to
|
||||||
|
* avoid the API having to realign them. Note: the alignmask is
|
||||||
|
* not supported for hash algorithms and is always 0 for them.
|
||||||
* @cra_priority: Priority of this transformation implementation. In case
|
* @cra_priority: Priority of this transformation implementation. In case
|
||||||
* multiple transformations with same @cra_name are available to
|
* multiple transformations with same @cra_name are available to
|
||||||
* the Crypto API, the kernel will use the one with highest
|
* the Crypto API, the kernel will use the one with highest
|
||||||
|
Loading…
Reference in New Issue
Block a user