root/include/crypto/algapi.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. crypto_set_spawn
  2. crypto_attr_alg
  3. crypto_queue_len
  4. crypto_xor
  5. crypto_xor_cpy
  6. crypto_tfm_ctx_aligned
  7. crypto_tfm_alg_instance
  8. crypto_instance_ctx
  9. crypto_ablkcipher_alg
  10. crypto_ablkcipher_ctx
  11. crypto_ablkcipher_ctx_aligned
  12. crypto_spawn_blkcipher
  13. crypto_blkcipher_ctx
  14. crypto_blkcipher_ctx_aligned
  15. crypto_spawn_cipher
  16. crypto_cipher_alg
  17. blkcipher_walk_init
  18. ablkcipher_walk_init
  19. ablkcipher_walk_complete
  20. crypto_get_backlog
  21. ablkcipher_enqueue_request
  22. ablkcipher_dequeue_request
  23. ablkcipher_request_ctx
  24. crypto_get_attr_alg
  25. crypto_requires_off
  26. crypto_requires_sync
  27. crypto_memneq
  28. crypto_yield

   1 /* SPDX-License-Identifier: GPL-2.0-or-later */
   2 /*
   3  * Cryptographic API for algorithms (i.e., low-level API).
   4  *
   5  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
   6  */
   7 #ifndef _CRYPTO_ALGAPI_H
   8 #define _CRYPTO_ALGAPI_H
   9 
  10 #include <linux/crypto.h>
  11 #include <linux/list.h>
  12 #include <linux/kernel.h>
  13 #include <linux/skbuff.h>
  14 
  15 /*
  16  * Maximum values for blocksize and alignmask, used to allocate
  17  * static buffers that are big enough for any combination of
  18  * algs and architectures. Ciphers have a lower maximum size.
  19  */
  20 #define MAX_ALGAPI_BLOCKSIZE            160
  21 #define MAX_ALGAPI_ALIGNMASK            63
  22 #define MAX_CIPHER_BLOCKSIZE            16
  23 #define MAX_CIPHER_ALIGNMASK            15
  24 
  25 struct crypto_aead;
  26 struct crypto_instance;
  27 struct module;
  28 struct rtattr;
  29 struct seq_file;
  30 
  31 struct crypto_type {
  32         unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
  33         unsigned int (*extsize)(struct crypto_alg *alg);
  34         int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
  35         int (*init_tfm)(struct crypto_tfm *tfm);
  36         void (*show)(struct seq_file *m, struct crypto_alg *alg);
  37         int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
  38         void (*free)(struct crypto_instance *inst);
  39 
  40         unsigned int type;
  41         unsigned int maskclear;
  42         unsigned int maskset;
  43         unsigned int tfmsize;
  44 };
  45 
  46 struct crypto_instance {
  47         struct crypto_alg alg;
  48 
  49         struct crypto_template *tmpl;
  50         struct hlist_node list;
  51 
  52         void *__ctx[] CRYPTO_MINALIGN_ATTR;
  53 };
  54 
  55 struct crypto_template {
  56         struct list_head list;
  57         struct hlist_head instances;
  58         struct module *module;
  59 
  60         struct crypto_instance *(*alloc)(struct rtattr **tb);
  61         void (*free)(struct crypto_instance *inst);
  62         int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
  63 
  64         char name[CRYPTO_MAX_ALG_NAME];
  65 };
  66 
  67 struct crypto_spawn {
  68         struct list_head list;
  69         struct crypto_alg *alg;
  70         struct crypto_instance *inst;
  71         const struct crypto_type *frontend;
  72         u32 mask;
  73 };
  74 
  75 struct crypto_queue {
  76         struct list_head list;
  77         struct list_head *backlog;
  78 
  79         unsigned int qlen;
  80         unsigned int max_qlen;
  81 };
  82 
  83 struct scatter_walk {
  84         struct scatterlist *sg;
  85         unsigned int offset;
  86 };
  87 
  88 struct blkcipher_walk {
  89         union {
  90                 struct {
  91                         struct page *page;
  92                         unsigned long offset;
  93                 } phys;
  94 
  95                 struct {
  96                         u8 *page;
  97                         u8 *addr;
  98                 } virt;
  99         } src, dst;
 100 
 101         struct scatter_walk in;
 102         unsigned int nbytes;
 103 
 104         struct scatter_walk out;
 105         unsigned int total;
 106 
 107         void *page;
 108         u8 *buffer;
 109         u8 *iv;
 110         unsigned int ivsize;
 111 
 112         int flags;
 113         unsigned int walk_blocksize;
 114         unsigned int cipher_blocksize;
 115         unsigned int alignmask;
 116 };
 117 
 118 struct ablkcipher_walk {
 119         struct {
 120                 struct page *page;
 121                 unsigned int offset;
 122         } src, dst;
 123 
 124         struct scatter_walk     in;
 125         unsigned int            nbytes;
 126         struct scatter_walk     out;
 127         unsigned int            total;
 128         struct list_head        buffers;
 129         u8                      *iv_buffer;
 130         u8                      *iv;
 131         int                     flags;
 132         unsigned int            blocksize;
 133 };
 134 
 135 extern const struct crypto_type crypto_ablkcipher_type;
 136 extern const struct crypto_type crypto_blkcipher_type;
 137 
 138 void crypto_mod_put(struct crypto_alg *alg);
 139 
 140 int crypto_register_template(struct crypto_template *tmpl);
 141 int crypto_register_templates(struct crypto_template *tmpls, int count);
 142 void crypto_unregister_template(struct crypto_template *tmpl);
 143 void crypto_unregister_templates(struct crypto_template *tmpls, int count);
 144 struct crypto_template *crypto_lookup_template(const char *name);
 145 
 146 int crypto_register_instance(struct crypto_template *tmpl,
 147                              struct crypto_instance *inst);
 148 int crypto_unregister_instance(struct crypto_instance *inst);
 149 
 150 int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
 151                       struct crypto_instance *inst, u32 mask);
 152 int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
 153                        struct crypto_instance *inst,
 154                        const struct crypto_type *frontend);
 155 int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
 156                       u32 type, u32 mask);
 157 
 158 void crypto_drop_spawn(struct crypto_spawn *spawn);
 159 struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
 160                                     u32 mask);
 161 void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
 162 
 163 static inline void crypto_set_spawn(struct crypto_spawn *spawn,
 164                                     struct crypto_instance *inst)
 165 {
 166         spawn->inst = inst;
 167 }
 168 
 169 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
 170 int crypto_check_attr_type(struct rtattr **tb, u32 type);
 171 const char *crypto_attr_alg_name(struct rtattr *rta);
 172 struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
 173                                     const struct crypto_type *frontend,
 174                                     u32 type, u32 mask);
 175 
 176 static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
 177                                                  u32 type, u32 mask)
 178 {
 179         return crypto_attr_alg2(rta, NULL, type, mask);
 180 }
 181 
 182 int crypto_attr_u32(struct rtattr *rta, u32 *num);
 183 int crypto_inst_setname(struct crypto_instance *inst, const char *name,
 184                         struct crypto_alg *alg);
 185 void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
 186                             unsigned int head);
 187 
 188 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 189 int crypto_enqueue_request(struct crypto_queue *queue,
 190                            struct crypto_async_request *request);
 191 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 192 static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
 193 {
 194         return queue->qlen;
 195 }
 196 
 197 void crypto_inc(u8 *a, unsigned int size);
 198 void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int size);
 199 
 200 static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size)
 201 {
 202         if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
 203             __builtin_constant_p(size) &&
 204             (size % sizeof(unsigned long)) == 0) {
 205                 unsigned long *d = (unsigned long *)dst;
 206                 unsigned long *s = (unsigned long *)src;
 207 
 208                 while (size > 0) {
 209                         *d++ ^= *s++;
 210                         size -= sizeof(unsigned long);
 211                 }
 212         } else {
 213                 __crypto_xor(dst, dst, src, size);
 214         }
 215 }
 216 
 217 static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
 218                                   unsigned int size)
 219 {
 220         if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
 221             __builtin_constant_p(size) &&
 222             (size % sizeof(unsigned long)) == 0) {
 223                 unsigned long *d = (unsigned long *)dst;
 224                 unsigned long *s1 = (unsigned long *)src1;
 225                 unsigned long *s2 = (unsigned long *)src2;
 226 
 227                 while (size > 0) {
 228                         *d++ = *s1++ ^ *s2++;
 229                         size -= sizeof(unsigned long);
 230                 }
 231         } else {
 232                 __crypto_xor(dst, src1, src2, size);
 233         }
 234 }
 235 
 236 int blkcipher_walk_done(struct blkcipher_desc *desc,
 237                         struct blkcipher_walk *walk, int err);
 238 int blkcipher_walk_virt(struct blkcipher_desc *desc,
 239                         struct blkcipher_walk *walk);
 240 int blkcipher_walk_phys(struct blkcipher_desc *desc,
 241                         struct blkcipher_walk *walk);
 242 int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
 243                               struct blkcipher_walk *walk,
 244                               unsigned int blocksize);
 245 int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
 246                                    struct blkcipher_walk *walk,
 247                                    struct crypto_aead *tfm,
 248                                    unsigned int blocksize);
 249 
 250 int ablkcipher_walk_done(struct ablkcipher_request *req,
 251                          struct ablkcipher_walk *walk, int err);
 252 int ablkcipher_walk_phys(struct ablkcipher_request *req,
 253                          struct ablkcipher_walk *walk);
 254 void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
 255 
 256 static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
 257 {
 258         return PTR_ALIGN(crypto_tfm_ctx(tfm),
 259                          crypto_tfm_alg_alignmask(tfm) + 1);
 260 }
 261 
 262 static inline struct crypto_instance *crypto_tfm_alg_instance(
 263         struct crypto_tfm *tfm)
 264 {
 265         return container_of(tfm->__crt_alg, struct crypto_instance, alg);
 266 }
 267 
 268 static inline void *crypto_instance_ctx(struct crypto_instance *inst)
 269 {
 270         return inst->__ctx;
 271 }
 272 
 273 static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
 274         struct crypto_ablkcipher *tfm)
 275 {
 276         return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
 277 }
 278 
 279 static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
 280 {
 281         return crypto_tfm_ctx(&tfm->base);
 282 }
 283 
 284 static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
 285 {
 286         return crypto_tfm_ctx_aligned(&tfm->base);
 287 }
 288 
 289 static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
 290         struct crypto_spawn *spawn)
 291 {
 292         u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
 293         u32 mask = CRYPTO_ALG_TYPE_MASK;
 294 
 295         return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
 296 }
 297 
 298 static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
 299 {
 300         return crypto_tfm_ctx(&tfm->base);
 301 }
 302 
 303 static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
 304 {
 305         return crypto_tfm_ctx_aligned(&tfm->base);
 306 }
 307 
 308 static inline struct crypto_cipher *crypto_spawn_cipher(
 309         struct crypto_spawn *spawn)
 310 {
 311         u32 type = CRYPTO_ALG_TYPE_CIPHER;
 312         u32 mask = CRYPTO_ALG_TYPE_MASK;
 313 
 314         return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
 315 }
 316 
 317 static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
 318 {
 319         return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
 320 }
 321 
 322 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
 323                                        struct scatterlist *dst,
 324                                        struct scatterlist *src,
 325                                        unsigned int nbytes)
 326 {
 327         walk->in.sg = src;
 328         walk->out.sg = dst;
 329         walk->total = nbytes;
 330 }
 331 
 332 static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
 333                                         struct scatterlist *dst,
 334                                         struct scatterlist *src,
 335                                         unsigned int nbytes)
 336 {
 337         walk->in.sg = src;
 338         walk->out.sg = dst;
 339         walk->total = nbytes;
 340         INIT_LIST_HEAD(&walk->buffers);
 341 }
 342 
 343 static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
 344 {
 345         if (unlikely(!list_empty(&walk->buffers)))
 346                 __ablkcipher_walk_complete(walk);
 347 }
 348 
 349 static inline struct crypto_async_request *crypto_get_backlog(
 350         struct crypto_queue *queue)
 351 {
 352         return queue->backlog == &queue->list ? NULL :
 353                container_of(queue->backlog, struct crypto_async_request, list);
 354 }
 355 
 356 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
 357                                              struct ablkcipher_request *request)
 358 {
 359         return crypto_enqueue_request(queue, &request->base);
 360 }
 361 
 362 static inline struct ablkcipher_request *ablkcipher_dequeue_request(
 363         struct crypto_queue *queue)
 364 {
 365         return ablkcipher_request_cast(crypto_dequeue_request(queue));
 366 }
 367 
 368 static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
 369 {
 370         return req->__ctx;
 371 }
 372 
 373 static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
 374                                                      u32 type, u32 mask)
 375 {
 376         return crypto_attr_alg(tb[1], type, mask);
 377 }
 378 
 379 static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
 380 {
 381         return (type ^ off) & mask & off;
 382 }
 383 
 384 /*
 385  * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
 386  * Otherwise returns zero.
 387  */
 388 static inline int crypto_requires_sync(u32 type, u32 mask)
 389 {
 390         return crypto_requires_off(type, mask, CRYPTO_ALG_ASYNC);
 391 }
 392 
 393 noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
 394 
 395 /**
 396  * crypto_memneq - Compare two areas of memory without leaking
 397  *                 timing information.
 398  *
 399  * @a: One area of memory
 400  * @b: Another area of memory
 401  * @size: The size of the area.
 402  *
 403  * Returns 0 when data is equal, 1 otherwise.
 404  */
 405 static inline int crypto_memneq(const void *a, const void *b, size_t size)
 406 {
 407         return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
 408 }
 409 
 410 static inline void crypto_yield(u32 flags)
 411 {
 412         if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
 413                 cond_resched();
 414 }
 415 
 416 int crypto_register_notifier(struct notifier_block *nb);
 417 int crypto_unregister_notifier(struct notifier_block *nb);
 418 
 419 /* Crypto notification events. */
 420 enum {
 421         CRYPTO_MSG_ALG_REQUEST,
 422         CRYPTO_MSG_ALG_REGISTER,
 423         CRYPTO_MSG_ALG_LOADED,
 424 };
 425 
 426 #endif  /* _CRYPTO_ALGAPI_H */

/* [<][>][^][v][top][bottom][index][help] */