[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210514201508.27967-9-chang.seok.bae@intel.com>
Date: Fri, 14 May 2021 13:15:05 -0700
From: "Chang S. Bae" <chang.seok.bae@...el.com>
To: tglx@...utronix.de, mingo@...nel.org, bp@...e.de, luto@...nel.org,
x86@...nel.org, herbert@...dor.apana.org.au
Cc: dan.j.williams@...el.com, dave.hansen@...el.com,
ravi.v.shankar@...el.com, linux-crypto@...r.kernel.org,
linux-kernel@...r.kernel.org, chang.seok.bae@...el.com
Subject: [RFC PATCH v2 08/11] crypto: x86/aes-ni - Improve error handling
Some error case in the glue code is possibly ignored and thus not handled
correctly. Make sure each error code is not overwritten.
Signed-off-by: Chang S. Bae <chang.seok.bae@...el.com>
Cc: x86@...nel.org
Cc: linux-crypto@...r.kernel.org
Cc: linux-kernel@...r.kernel.org
---
Changes from RFC v1:
* Added as a new patch. This change prepares to address Ard's feedback.
---
arch/x86/crypto/aesni-intel_glue.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2144e54a6c89..685943f0e5a3 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -291,7 +291,7 @@ static int ecb_encrypt(struct skcipher_request *req)
nbytes & AES_BLOCK_MASK);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
+ err |= skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -313,7 +313,7 @@ static int ecb_decrypt(struct skcipher_request *req)
nbytes & AES_BLOCK_MASK);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
+ err |= skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -335,7 +335,7 @@ static int cbc_encrypt(struct skcipher_request *req)
nbytes & AES_BLOCK_MASK, walk.iv);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
+ err |= skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -357,7 +357,7 @@ static int cbc_decrypt(struct skcipher_request *req)
nbytes & AES_BLOCK_MASK, walk.iv);
kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = skcipher_walk_done(&walk, nbytes);
+ err |= skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -522,7 +522,7 @@ static int ctr_crypt(struct skcipher_request *req)
nbytes = 0;
}
kernel_fpu_end();
- err = skcipher_walk_done(&walk, nbytes);
+ err |= skcipher_walk_done(&walk, nbytes);
}
return err;
}
@@ -691,7 +691,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
}
kernel_fpu_end();
- err = skcipher_walk_done(&walk, 0);
+ err |= skcipher_walk_done(&walk, 0);
}
if (err)
@@ -862,7 +862,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
skcipher_request_set_crypt(&subreq, req->src, req->dst,
blocks * AES_BLOCK_SIZE, req->iv);
req = &subreq;
- err = skcipher_walk_virt(&walk, req, false);
+ err |= skcipher_walk_virt(&walk, req, false);
} else {
tail = 0;
}
@@ -888,7 +888,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes, walk.iv);
kernel_fpu_end();
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ err |= skcipher_walk_done(&walk, walk.nbytes - nbytes);
if (walk.nbytes > 0)
kernel_fpu_begin();
@@ -905,7 +905,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
req->iv);
- err = skcipher_walk_virt(&walk, &subreq, false);
+ err |= skcipher_walk_virt(&walk, &subreq, false);
if (err)
return err;
--
2.17.1
Powered by blists - more mailing lists