diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/crypto/aes-spe-glue.c | 454 | ||||
-rw-r--r-- | arch/powerpc/include/asm/asm-prototypes.h | 3 | ||||
-rw-r--r-- | arch/powerpc/include/asm/local.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/security_features.h | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/security.c | 57 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 37 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 30 | ||||
-rw-r--r-- | arch/powerpc/perf/core-book3s.c | 18 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/dtl.c | 38 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hvCall_inst.c | 12 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 15 |
13 files changed, 360 insertions, 321 deletions
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c index 3a4ca7d32477..1fad5d4c658d 100644 --- a/arch/powerpc/crypto/aes-spe-glue.c +++ b/arch/powerpc/crypto/aes-spe-glue.c @@ -17,7 +17,10 @@ #include <asm/byteorder.h> #include <asm/switch_to.h> #include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> #include <crypto/xts.h> +#include <crypto/gf128mul.h> +#include <crypto/scatterwalk.h> /* * MAX_BYTES defines the number of bytes that are allowed to be processed @@ -118,13 +121,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, return 0; } -static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, +static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm, + const u8 *in_key, unsigned int key_len) +{ + return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { - struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int err; - err = xts_check_key(tfm, in_key, key_len); + err = xts_verify_key(tfm, in_key, key_len); if (err) return err; @@ -133,7 +142,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_256) { - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } @@ -178,208 +187,229 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) spe_end(); } -static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ecb_crypt(struct skcipher_request *req, bool enc) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes); + if (enc) + ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes); + else + ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes); spe_end(); - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ecb_encrypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; - int err; - - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; - - spe_begin(); - ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes); - spe_end(); - - err = blkcipher_walk_done(desc, &walk, ubytes); - } + return ppc_ecb_crypt(req, true); +} - return err; +static int ppc_ecb_decrypt(struct skcipher_request *req) +{ + return ppc_ecb_crypt(req, false); } -static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_cbc_crypt(struct skcipher_request *req, bool enc) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes, walk.iv); + if (enc) + ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes, + walk.iv); + else + ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes, + walk.iv); spe_end(); - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_cbc_encrypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; - int err; - - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; - - spe_begin(); - ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes, walk.iv); - spe_end(); - - err = blkcipher_walk_done(desc, &walk, ubytes); - } + return ppc_cbc_crypt(req, true); +} - return err; +static int ppc_cbc_decrypt(struct skcipher_request *req) +{ + return ppc_cbc_crypt(req, false); } -static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_ctr_crypt(struct skcipher_request *req) { - struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int pbytes, ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + err = skcipher_walk_virt(&walk, req, false); - while ((pbytes = walk.nbytes)) { - pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes; - pbytes = pbytes == nbytes ? - nbytes : pbytes & ~(AES_BLOCK_SIZE - 1); - ubytes = walk.nbytes - pbytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + if (nbytes < walk.total) + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, pbytes , walk.iv); + ctx->key_enc, ctx->rounds, nbytes, walk.iv); spe_end(); - nbytes -= pbytes; - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_xts_crypt(struct skcipher_request *req, bool enc) { - struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; int err; u32 *twk; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); + err = skcipher_walk_virt(&walk, req, false); twk = ctx->key_twk; - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + while ((nbytes = walk.nbytes) != 0) { + nbytes = min_t(unsigned int, nbytes, MAX_BYTES); + nbytes = round_down(nbytes, AES_BLOCK_SIZE); spe_begin(); - ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk); + if (enc) + ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_enc, ctx->rounds, nbytes, + walk.iv, twk); + else + ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, + ctx->key_dec, ctx->rounds, nbytes, + walk.iv, twk); spe_end(); twk = NULL; - err = blkcipher_walk_done(desc, &walk, ubytes); + err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } return err; } -static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) +static int ppc_xts_encrypt(struct skcipher_request *req) { - struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - struct blkcipher_walk walk; - unsigned int ubytes; + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + int offset = req->cryptlen - tail - AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 b[2][AES_BLOCK_SIZE]; int err; - u32 *twk; - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - blkcipher_walk_init(&walk, dst, src, nbytes); - err = blkcipher_walk_virt(desc, &walk); - twk = ctx->key_twk; + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (tail) { + subreq = *req; + skcipher_request_set_crypt(&subreq, req->src, req->dst, + req->cryptlen - tail, req->iv); + req = &subreq; + } - while ((nbytes = walk.nbytes)) { - ubytes = nbytes > MAX_BYTES ? - nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); - nbytes -= ubytes; + err = ppc_xts_crypt(req, true); + if (err || !tail) + return err; - spe_begin(); - ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, - ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk); - spe_end(); + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE, 0); + memcpy(b[1], b[0], tail); + scatterwalk_map_and_copy(b[0], req->src, offset + AES_BLOCK_SIZE, tail, 0); - twk = NULL; - err = blkcipher_walk_done(desc, &walk, ubytes); + spe_begin(); + ppc_encrypt_xts(b[0], b[0], ctx->key_enc, ctx->rounds, AES_BLOCK_SIZE, + req->iv, NULL); + spe_end(); + + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); + + return 0; +} + +static int ppc_xts_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm); + int tail = req->cryptlen % AES_BLOCK_SIZE; + int offset = req->cryptlen - tail - AES_BLOCK_SIZE; + struct skcipher_request subreq; + u8 b[3][AES_BLOCK_SIZE]; + le128 twk; + int err; + + if (req->cryptlen < AES_BLOCK_SIZE) + return -EINVAL; + + if (tail) { + subreq = *req; + skcipher_request_set_crypt(&subreq, req->src, req->dst, + offset, req->iv); + req = &subreq; } - return err; + err = ppc_xts_crypt(req, false); + if (err || !tail) + return err; + + scatterwalk_map_and_copy(b[1], req->src, offset, AES_BLOCK_SIZE + tail, 0); + + spe_begin(); + if (!offset) + ppc_encrypt_ecb(req->iv, req->iv, ctx->key_twk, ctx->rounds, + AES_BLOCK_SIZE); + + gf128mul_x_ble(&twk, (le128 *)req->iv); + + ppc_decrypt_xts(b[1], b[1], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, + (u8 *)&twk, NULL); + memcpy(b[0], b[2], tail); + memcpy(b[0] + tail, b[1] + tail, AES_BLOCK_SIZE - tail); + ppc_decrypt_xts(b[0], b[0], ctx->key_dec, ctx->rounds, AES_BLOCK_SIZE, + req->iv, NULL); + spe_end(); + + scatterwalk_map_and_copy(b[0], req->dst, offset, AES_BLOCK_SIZE + tail, 1); + + return 0; } /* @@ -388,9 +418,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, * This improves IPsec thoughput by another few percent. Additionally we assume * that AES context is always aligned to at least 8 bytes because it is created * with kmalloc() in the crypto infrastructure - * */ -static struct crypto_alg aes_algs[] = { { + +static struct crypto_alg aes_cipher_alg = { .cra_name = "aes", .cra_driver_name = "aes-ppc-spe", .cra_priority = 300, @@ -408,96 +438,84 @@ static struct crypto_alg aes_algs[] = { { .cia_decrypt = ppc_aes_decrypt } } -}, { - .cra_name = "ecb(aes)", - .cra_driver_name = "ecb-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_ecb_encrypt, - .decrypt = ppc_ecb_decrypt, - } - } -}, { - .cra_name = "cbc(aes)", - .cra_driver_name = "cbc-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_cbc_encrypt, - .decrypt = ppc_cbc_decrypt, - } - } -}, { - .cra_name = "ctr(aes)", - .cra_driver_name = "ctr-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct ppc_aes_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_aes_setkey, - .encrypt = ppc_ctr_crypt, - .decrypt = ppc_ctr_crypt, - } - } -}, { - .cra_name = "xts(aes)", - .cra_driver_name = "xts-ppc-spe", - .cra_priority = 300, - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, - .cra_blocksize = AES_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct ppc_xts_ctx), - .cra_alignmask = 0, - .cra_type = &crypto_blkcipher_type, - .cra_module = THIS_MODULE, - .cra_u = { - .blkcipher = { - .min_keysize = AES_MIN_KEY_SIZE * 2, - .max_keysize = AES_MAX_KEY_SIZE * 2, - .ivsize = AES_BLOCK_SIZE, - .setkey = ppc_xts_setkey, - .encrypt = ppc_xts_encrypt, - .decrypt = ppc_xts_decrypt, - } +}; + +static struct skcipher_alg aes_skcipher_algs[] = { + { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_ecb_encrypt, + .decrypt = ppc_ecb_decrypt, + }, { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_cbc_encrypt, + .decrypt = ppc_cbc_decrypt, + }, { + .base.cra_name = "ctr(aes)", + .base.cra_driver_name = "ctr-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct ppc_aes_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_aes_setkey_skcipher, + .encrypt = ppc_ctr_crypt, + .decrypt = ppc_ctr_crypt, + .chunksize = AES_BLOCK_SIZE, + }, { + .base.cra_name = "xts(aes)", + .base.cra_driver_name = "xts-ppc-spe", + .base.cra_priority = 300, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ppc_xts_ctx), + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE * 2, + .max_keysize = AES_MAX_KEY_SIZE * 2, + .ivsize = AES_BLOCK_SIZE, + .setkey = ppc_xts_setkey, + .encrypt = ppc_xts_encrypt, + .decrypt = ppc_xts_decrypt, } -} }; +}; static int __init ppc_aes_mod_init(void) { - return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); + int err; + + err = crypto_register_alg(&aes_cipher_alg); + if (err) + return err; + + err = crypto_register_skciphers(aes_skcipher_algs, + ARRAY_SIZE(aes_skcipher_algs)); + if (err) + crypto_unregister_alg(&aes_cipher_alg); + return err; } static void __exit ppc_aes_mod_fini(void) { - crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); + crypto_unregister_alg(&aes_cipher_alg); + crypto_unregister_skciphers(aes_skcipher_algs, + ARRAY_SIZE(aes_skcipher_algs)); } module_init(ppc_aes_mod_init); diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 8561498e653c..d84d1417ddb6 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -152,9 +152,12 @@ void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr); /* Patch sites */ extern s32 patch__call_flush_count_cache; extern s32 patch__flush_count_cache_return; +extern s32 patch__flush_link_stack_return; +extern s32 patch__call_kvm_flush_link_stack; extern s32 patch__memset_nocache, patch__memcpy_nocache; extern long flush_count_cache; +extern long kvm_flush_link_stack; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv); diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index fdd00939270b..bc4bd19b7fc2 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -17,7 +17,7 @@ typedef struct #define LOCAL_INIT(i) { (i) } -static __inline__ long local_read(local_t *l) +static __inline__ long local_read(const local_t *l) { return READ_ONCE(l->v); } diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index 759597bf0fd8..ccf44c135389 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -81,6 +81,9 @@ static inline bool security_ftr_enabled(unsigned long feature) // Software required to flush count cache on context switch #define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull +// Software required to flush link stack on context switch +#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull + // Features enabled by default #define SEC_FTR_DEFAULT \ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 6467bdab8d40..3fd3ef352e3f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -537,6 +537,7 @@ flush_count_cache: /* Save LR into r9 */ mflr r9 + // Flush the link stack .rept 64 bl .+4 .endr @@ -546,6 +547,11 @@ flush_count_cache: .balign 32 /* Restore LR */ 1: mtlr r9 + + // If we're just flushing the link stack, return here +3: nop + patch_site 3b patch__flush_link_stack_return + li r9,0x7fff mtctr r9 diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index 7cfcb294b11c..bd91dceb7010 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -24,6 +24,7 @@ enum count_cache_flush_type { COUNT_CACHE_FLUSH_HW = 0x4, }; static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; +static bool link_stack_flush_enabled; bool barrier_nospec_enabled; static bool no_nospec; @@ -212,11 +213,19 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c if (ccd) seq_buf_printf(&s, "Indirect branch cache disabled"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) { seq_buf_printf(&s, "Mitigation: Software count cache flush"); if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW) seq_buf_printf(&s, " (hardware accelerated)"); + + if (link_stack_flush_enabled) + seq_buf_printf(&s, ", Software link stack flush"); + } else if (btb_flush_enabled) { seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); } else { @@ -377,18 +386,49 @@ static __init int stf_barrier_debugfs_init(void) device_initcall(stf_barrier_debugfs_init); #endif /* CONFIG_DEBUG_FS */ +static void no_count_cache_flush(void) +{ + count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; + pr_info("count-cache-flush: software flush disabled.\n"); +} + static void toggle_count_cache_flush(bool enable) { - if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) && + !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) + enable = false; + + if (!enable) { patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP); - count_cache_flush_type = COUNT_CACHE_FLUSH_NONE; - pr_info("count-cache-flush: software flush disabled.\n"); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP); +#endif + pr_info("link-stack-flush: software flush disabled.\n"); + link_stack_flush_enabled = false; + no_count_cache_flush(); return; } + // This enables the branch from _switch to flush_count_cache patch_branch_site(&patch__call_flush_count_cache, (u64)&flush_count_cache, BRANCH_SET_LINK); +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + // This enables the branch from guest_exit_cont to kvm_flush_link_stack + patch_branch_site(&patch__call_kvm_flush_link_stack, + (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); +#endif + + pr_info("link-stack-flush: software flush enabled.\n"); + link_stack_flush_enabled = true; + + // If we just need to flush the link stack, patch an early return + if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { + patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR); + no_count_cache_flush(); + return; + } + if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { count_cache_flush_type = COUNT_CACHE_FLUSH_SW; pr_info("count-cache-flush: full software flush sequence enabled.\n"); @@ -407,11 +447,20 @@ void setup_count_cache_flush(void) if (no_spectrev2 || cpu_mitigations_off()) { if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) - pr_warn("Spectre v2 mitigations not under software control, can't disable\n"); + pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); enable = false; } + /* + * There's no firmware feature flag/hypervisor bit to tell us we need to + * flush the link stack on context switch. So we set it here if we see + * either of the Spectre v2 mitigations that aim to protect userspace. + */ + if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || + security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) + security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); + toggle_count_cache_flush(enable); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 694522308cd5..84827da01d45 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -338,7 +338,7 @@ static unsigned long vtime_delta(struct task_struct *tsk, return stime; } -void vtime_account_system(struct task_struct *tsk) +void vtime_account_kernel(struct task_struct *tsk) { unsigned long stime, stime_scaled, steal_time; struct cpu_accounting_data *acct = get_accounting(tsk); @@ -366,7 +366,7 @@ void vtime_account_system(struct task_struct *tsk) #endif } } -EXPORT_SYMBOL_GPL(vtime_account_system); +EXPORT_SYMBOL_GPL(vtime_account_kernel); void vtime_account_idle(struct task_struct *tsk) { @@ -395,7 +395,7 @@ static void vtime_flush_scaled(struct task_struct *tsk, /* * Account the whole cputime accumulated in the paca * Must be called with interrupts disabled. - * Assumes that vtime_account_system/idle() has been called + * Assumes that vtime_account_kernel/idle() has been called * recently (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 060a1acd7c6d..8834220036a5 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -6,6 +6,8 @@ #endif #define BSS_FIRST_SECTIONS *(.bss.prominit) +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 0 #include <asm/page.h> #include <asm-generic/vmlinux.lds.h> @@ -18,22 +20,8 @@ ENTRY(_stext) PHDRS { - kernel PT_LOAD FLAGS(7); /* RWX */ - notes PT_NOTE FLAGS(0); - dummy PT_NOTE FLAGS(0); - - /* binutils < 2.18 has a bug that makes it misbehave when taking an - ELF file with all segments at load address 0 as input. This - happens when running "strip" on vmlinux, because of the AT() magic - in this linker script. People using GCC >= 4.2 won't run into - this problem, because the "build-id" support will put some data - into the "notes" segment (at a non-zero load address). - - To work around this, we force some data into both the "dummy" - segment and the kernel segment, so the dummy segment will get a - non-zero load address. It's not enough to always create the - "notes" segment, since if nothing gets assigned to it, its load - address will be zero. */ + text PT_LOAD FLAGS(7); /* RWX */ + note PT_NOTE FLAGS(0); } #ifdef CONFIG_PPC64 @@ -77,7 +65,7 @@ SECTIONS #else /* !CONFIG_PPC64 */ HEAD_TEXT #endif - } :kernel + } :text __head_end = .; @@ -126,7 +114,7 @@ SECTIONS __got2_end = .; #endif /* CONFIG_PPC32 */ - } :kernel + } :text . = ALIGN(ETEXT_ALIGN_SIZE); _etext = .; @@ -175,17 +163,6 @@ SECTIONS __stop__btb_flush_fixup = .; } #endif - EXCEPTION_TABLE(0) - - NOTES :kernel :notes - - /* The dummy segment contents for the bug workaround mentioned above - near PHDRS. */ - .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) { - LONG(0) - LONG(0) - LONG(0) - } :kernel :dummy /* * Init sections discarded at runtime @@ -200,7 +177,7 @@ SECTIONS #ifdef CONFIG_PPC64 *(.tramp.ftrace.init); #endif - } :kernel + } :text /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index faebcbb8c4db..0496e66aaa56 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -11,6 +11,7 @@ */ #include <asm/ppc_asm.h> +#include <asm/code-patching-asm.h> #include <asm/kvm_asm.h> #include <asm/reg.h> #include <asm/mmu.h> @@ -1487,6 +1488,13 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 1: #endif /* CONFIG_KVM_XICS */ + /* + * Possibly flush the link stack here, before we do a blr in + * guest_exit_short_path. + */ +1: nop + patch_site 1b patch__call_kvm_flush_link_stack + /* If we came in through the P9 short path, go back out to C now */ lwz r0, STACK_SLOT_SHORT_PATH(r1) cmpwi r0, 0 @@ -1963,6 +1971,28 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) mtlr r0 blr +.balign 32 +.global kvm_flush_link_stack +kvm_flush_link_stack: + /* Save LR into r0 */ + mflr r0 + + /* Flush the link stack. On Power8 it's up to 32 entries in size. */ + .rept 32 + bl .+4 + .endr + + /* And on Power9 it's up to 64. */ +BEGIN_FTR_SECTION + .rept 32 + bl .+4 + .endr +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) + + /* Restore LR */ + mtlr r0 + blr + kvmppc_guest_external: /* External interrupt, first check for host_ipi. If this is * set, we know the host wants us out so let's do it now diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index ca92e01d0bd1..48604625ab31 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -96,7 +96,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { return 0; } -static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { } +static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { } static inline u32 perf_get_misc_flags(struct pt_regs *regs) { return 0; @@ -127,7 +127,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw) static inline void power_pmu_bhrb_enable(struct perf_event *event) {} static inline void power_pmu_bhrb_disable(struct perf_event *event) {} static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {} -static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} +static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} static void pmao_restore_workaround(bool ebb) { } #endif /* CONFIG_PPC32 */ @@ -179,7 +179,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. */ -static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) +static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { unsigned long mmcra = regs->dsisr; bool sdar_valid; @@ -204,8 +204,7 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) *addrp = mfspr(SPRN_SDAR); - if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && - is_kernel_addr(mfspr(SPRN_SDAR))) + if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0) *addrp = 0; } @@ -444,7 +443,7 @@ static __u64 power_pmu_bhrb_to(u64 addr) } /* Processing BHRB entries */ -static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) +static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) { u64 val; u64 addr; @@ -472,8 +471,7 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) * exporting it to userspace (avoid exposure of regions * where we could have speculative execution) */ - if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) && - is_kernel_addr(addr)) + if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0) continue; /* Branches are read most recent first (ie. mfbhrb 0 is @@ -2087,12 +2085,12 @@ static void record_and_restart(struct perf_event *event, unsigned long val, if (event->attr.sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) - perf_get_data_addr(regs, &data.addr); + perf_get_data_addr(event, regs, &data.addr); if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { struct cpu_hw_events *cpuhw; cpuhw = this_cpu_ptr(&cpu_hw_events); - power_pmu_bhrb_read(cpuhw); + power_pmu_bhrb_read(event, cpuhw); data.br_stack = &cpuhw->bhrb_stack; } diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 2b87480f2837..eab8aa293743 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -19,7 +19,6 @@ struct dtl { struct dtl_entry *buf; - struct dentry *file; int cpu; int buf_entries; u64 last_idx; @@ -320,46 +319,28 @@ static const struct file_operations dtl_fops = { static struct dentry *dtl_dir; -static int dtl_setup_file(struct dtl *dtl) +static void dtl_setup_file(struct dtl *dtl) { char name[10]; sprintf(name, "cpu-%d", dtl->cpu); - dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops); - if (!dtl->file) - return -ENOMEM; - - return 0; + debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops); } static int dtl_init(void) { - struct dentry *event_mask_file, *buf_entries_file; - int rc, i; + int i; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return -ENODEV; /* set up common debugfs structure */ - rc = -ENOMEM; dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root); - if (!dtl_dir) { - printk(KERN_WARNING "%s: can't create dtl root dir\n", - __func__); - goto err; - } - event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, - dtl_dir, &dtl_event_mask); - buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, - dtl_dir, &dtl_buf_entries); - - if (!event_mask_file || !buf_entries_file) { - printk(KERN_WARNING "%s: can't create dtl files\n", __func__); - goto err_remove_dir; - } + debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask); + debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries); /* set up the per-cpu log structures */ for_each_possible_cpu(i) { @@ -367,16 +348,9 @@ static int dtl_init(void) spin_lock_init(&dtl->lock); dtl->cpu = i; - rc = dtl_setup_file(dtl); - if (rc) - goto err_remove_dir; + dtl_setup_file(dtl); } return 0; - -err_remove_dir: - debugfs_remove_recursive(dtl_dir); -err: - return rc; } machine_arch_initcall(pseries, dtl_init); diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index bcc1b67417a8..c40c62ec432e 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -129,7 +129,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, long retval, static int __init hcall_inst_init(void) { struct dentry *hcall_root; - struct dentry *hcall_file; char cpu_name_buf[CPU_NAME_BUF_SIZE]; int cpu; @@ -145,17 +144,12 @@ static int __init hcall_inst_init(void) } hcall_root = debugfs_create_dir(HCALL_ROOT_DIR, NULL); - if (!hcall_root) - return -ENOMEM; for_each_possible_cpu(cpu) { snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu); - hcall_file = debugfs_create_file(cpu_name_buf, 0444, - hcall_root, - per_cpu(hcall_stats, cpu), - &hcall_inst_seq_fops); - if (!hcall_file) - return -ENOMEM; + debugfs_create_file(cpu_name_buf, 0444, hcall_root, + per_cpu(hcall_stats, cpu), + &hcall_inst_seq_fops); } return 0; diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f87a5c64e24d..f9f57c55655e 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -1998,24 +1998,11 @@ static int __init vpa_debugfs_init(void) return 0; vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root); - if (!vpa_dir) { - pr_warn("%s: can't create vpa root dir\n", __func__); - return -ENOMEM; - } /* set up the per-cpu vpa file*/ for_each_possible_cpu(i) { - struct dentry *d; - sprintf(name, "cpu-%ld", i); - - d = debugfs_create_file(name, 0400, vpa_dir, (void *)i, - &vpa_fops); - if (!d) { - pr_warn("%s: can't create per-cpu vpa file\n", - __func__); - return -ENOMEM; - } + debugfs_create_file(name, 0400, vpa_dir, (void *)i, &vpa_fops); } return 0; |