]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/crypto/geode-aes.c
[PATCH] remove many unneeded #includes of sched.h
[net-next-2.6.git] / drivers / crypto / geode-aes.c
CommitLineData
9fe757b0
JC
1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
9fe757b0
JC
11#include <linux/pci.h>
12#include <linux/pci_ids.h>
13#include <linux/crypto.h>
14#include <linux/spinlock.h>
15#include <crypto/algapi.h>
16
17#include <asm/io.h>
18#include <asm/delay.h>
19
20#include "geode-aes.h"
21
22/* Register definitions */
23
24#define AES_CTRLA_REG 0x0000
25
26#define AES_CTRL_START 0x01
27#define AES_CTRL_DECRYPT 0x00
28#define AES_CTRL_ENCRYPT 0x02
29#define AES_CTRL_WRKEY 0x04
30#define AES_CTRL_DCA 0x08
31#define AES_CTRL_SCA 0x10
32#define AES_CTRL_CBC 0x20
33
34#define AES_INTR_REG 0x0008
35
36#define AES_INTRA_PENDING (1 << 16)
37#define AES_INTRB_PENDING (1 << 17)
38
39#define AES_INTR_PENDING (AES_INTRA_PENDING | AES_INTRB_PENDING)
40#define AES_INTR_MASK 0x07
41
42#define AES_SOURCEA_REG 0x0010
43#define AES_DSTA_REG 0x0014
44#define AES_LENA_REG 0x0018
45#define AES_WRITEKEY0_REG 0x0030
46#define AES_WRITEIV0_REG 0x0040
47
48/* A very large counter that is used to gracefully bail out of an
49 * operation in case of trouble
50 */
51
52#define AES_OP_TIMEOUT 0x50000
53
54/* Static structures */
55
56static void __iomem * _iobase;
57static spinlock_t lock;
58
59/* Write a 128 bit field (either a writable key or IV) */
60static inline void
61_writefield(u32 offset, void *value)
62{
63 int i;
64 for(i = 0; i < 4; i++)
65 iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
66}
67
68/* Read a 128 bit field (either a writable key or IV) */
69static inline void
70_readfield(u32 offset, void *value)
71{
72 int i;
73 for(i = 0; i < 4; i++)
74 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
75}
76
77static int
78do_crypt(void *src, void *dst, int len, u32 flags)
79{
80 u32 status;
81 u32 counter = AES_OP_TIMEOUT;
82
83 iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
84 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
85 iowrite32(len, _iobase + AES_LENA_REG);
86
87 /* Start the operation */
88 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
89
90 do
91 status = ioread32(_iobase + AES_INTR_REG);
92 while(!(status & AES_INTRA_PENDING) && --counter);
93
94 /* Clear the event */
95 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
96 return counter ? 0 : 1;
97}
98
ab782705 99static unsigned int
9fe757b0
JC
100geode_aes_crypt(struct geode_aes_op *op)
101{
102
103 u32 flags = 0;
104 int iflags;
105
106 if (op->len == 0 || op->src == op->dst)
107 return 0;
108
109 if (op->flags & AES_FLAGS_COHERENT)
110 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
111
112 if (op->dir == AES_DIR_ENCRYPT)
113 flags |= AES_CTRL_ENCRYPT;
114
115 /* Start the critical section */
116
117 spin_lock_irqsave(&lock, iflags);
118
119 if (op->mode == AES_MODE_CBC) {
120 flags |= AES_CTRL_CBC;
121 _writefield(AES_WRITEIV0_REG, op->iv);
122 }
123
124 if (op->flags & AES_FLAGS_USRKEY) {
125 flags |= AES_CTRL_WRKEY;
126 _writefield(AES_WRITEKEY0_REG, op->key);
127 }
128
129 do_crypt(op->src, op->dst, op->len, flags);
130
131 if (op->mode == AES_MODE_CBC)
132 _readfield(AES_WRITEIV0_REG, op->iv);
133
134 spin_unlock_irqrestore(&lock, iflags);
135
136 return op->len;
137}
138
139/* CRYPTO-API Functions */
140
141static int
142geode_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int len)
143{
144 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
145
146 if (len != AES_KEY_LENGTH) {
147 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
148 return -EINVAL;
149 }
150
151 memcpy(op->key, key, len);
152 return 0;
153}
154
155static void
156geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
157{
158 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
159
160 if ((out == NULL) || (in == NULL))
161 return;
162
163 op->src = (void *) in;
164 op->dst = (void *) out;
165 op->mode = AES_MODE_ECB;
166 op->flags = 0;
167 op->len = AES_MIN_BLOCK_SIZE;
168 op->dir = AES_DIR_ENCRYPT;
169
170 geode_aes_crypt(op);
171}
172
173
174static void
175geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
176{
177 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
178
179 if ((out == NULL) || (in == NULL))
180 return;
181
182 op->src = (void *) in;
183 op->dst = (void *) out;
184 op->mode = AES_MODE_ECB;
185 op->flags = 0;
186 op->len = AES_MIN_BLOCK_SIZE;
187 op->dir = AES_DIR_DECRYPT;
188
189 geode_aes_crypt(op);
190}
191
192
193static struct crypto_alg geode_alg = {
194 .cra_name = "aes",
195 .cra_driver_name = "geode-aes-128",
196 .cra_priority = 300,
197 .cra_alignmask = 15,
198 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
199 .cra_blocksize = AES_MIN_BLOCK_SIZE,
200 .cra_ctxsize = sizeof(struct geode_aes_op),
201 .cra_module = THIS_MODULE,
202 .cra_list = LIST_HEAD_INIT(geode_alg.cra_list),
203 .cra_u = {
204 .cipher = {
205 .cia_min_keysize = AES_KEY_LENGTH,
206 .cia_max_keysize = AES_KEY_LENGTH,
207 .cia_setkey = geode_setkey,
208 .cia_encrypt = geode_encrypt,
209 .cia_decrypt = geode_decrypt
210 }
211 }
212};
213
214static int
215geode_cbc_decrypt(struct blkcipher_desc *desc,
216 struct scatterlist *dst, struct scatterlist *src,
217 unsigned int nbytes)
218{
219 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
220 struct blkcipher_walk walk;
221 int err, ret;
222
223 blkcipher_walk_init(&walk, dst, src, nbytes);
224 err = blkcipher_walk_virt(desc, &walk);
225
226 while((nbytes = walk.nbytes)) {
227 op->src = walk.src.virt.addr,
228 op->dst = walk.dst.virt.addr;
229 op->mode = AES_MODE_CBC;
230 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
231 op->dir = AES_DIR_DECRYPT;
232
233 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
234
235 ret = geode_aes_crypt(op);
236
237 memcpy(walk.iv, op->iv, AES_IV_LENGTH);
238 nbytes -= ret;
239
240 err = blkcipher_walk_done(desc, &walk, nbytes);
241 }
242
243 return err;
244}
245
246static int
247geode_cbc_encrypt(struct blkcipher_desc *desc,
248 struct scatterlist *dst, struct scatterlist *src,
249 unsigned int nbytes)
250{
251 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
252 struct blkcipher_walk walk;
253 int err, ret;
254
255 blkcipher_walk_init(&walk, dst, src, nbytes);
256 err = blkcipher_walk_virt(desc, &walk);
257
258 while((nbytes = walk.nbytes)) {
259 op->src = walk.src.virt.addr,
260 op->dst = walk.dst.virt.addr;
261 op->mode = AES_MODE_CBC;
262 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
263 op->dir = AES_DIR_ENCRYPT;
264
265 memcpy(op->iv, walk.iv, AES_IV_LENGTH);
266
267 ret = geode_aes_crypt(op);
268 nbytes -= ret;
269 err = blkcipher_walk_done(desc, &walk, nbytes);
270 }
271
272 return err;
273}
274
275static struct crypto_alg geode_cbc_alg = {
276 .cra_name = "cbc(aes)",
277 .cra_driver_name = "cbc-aes-geode-128",
278 .cra_priority = 400,
279 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
280 .cra_blocksize = AES_MIN_BLOCK_SIZE,
281 .cra_ctxsize = sizeof(struct geode_aes_op),
282 .cra_alignmask = 15,
283 .cra_type = &crypto_blkcipher_type,
284 .cra_module = THIS_MODULE,
285 .cra_list = LIST_HEAD_INIT(geode_cbc_alg.cra_list),
286 .cra_u = {
287 .blkcipher = {
288 .min_keysize = AES_KEY_LENGTH,
289 .max_keysize = AES_KEY_LENGTH,
290 .setkey = geode_setkey,
291 .encrypt = geode_cbc_encrypt,
292 .decrypt = geode_cbc_decrypt,
293 }
294 }
295};
296
297static int
298geode_ecb_decrypt(struct blkcipher_desc *desc,
299 struct scatterlist *dst, struct scatterlist *src,
300 unsigned int nbytes)
301{
302 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
303 struct blkcipher_walk walk;
304 int err, ret;
305
306 blkcipher_walk_init(&walk, dst, src, nbytes);
307 err = blkcipher_walk_virt(desc, &walk);
308
309 while((nbytes = walk.nbytes)) {
310 op->src = walk.src.virt.addr,
311 op->dst = walk.dst.virt.addr;
312 op->mode = AES_MODE_ECB;
313 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
314 op->dir = AES_DIR_DECRYPT;
315
316 ret = geode_aes_crypt(op);
317 nbytes -= ret;
318 err = blkcipher_walk_done(desc, &walk, nbytes);
319 }
320
321 return err;
322}
323
324static int
325geode_ecb_encrypt(struct blkcipher_desc *desc,
326 struct scatterlist *dst, struct scatterlist *src,
327 unsigned int nbytes)
328{
329 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
330 struct blkcipher_walk walk;
331 int err, ret;
332
333 blkcipher_walk_init(&walk, dst, src, nbytes);
334 err = blkcipher_walk_virt(desc, &walk);
335
336 while((nbytes = walk.nbytes)) {
337 op->src = walk.src.virt.addr,
338 op->dst = walk.dst.virt.addr;
339 op->mode = AES_MODE_ECB;
340 op->len = nbytes - (nbytes % AES_MIN_BLOCK_SIZE);
341 op->dir = AES_DIR_ENCRYPT;
342
343 ret = geode_aes_crypt(op);
344 nbytes -= ret;
345 ret = blkcipher_walk_done(desc, &walk, nbytes);
346 }
347
348 return err;
349}
350
351static struct crypto_alg geode_ecb_alg = {
352 .cra_name = "ecb(aes)",
353 .cra_driver_name = "ecb-aes-geode-128",
354 .cra_priority = 400,
355 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
356 .cra_blocksize = AES_MIN_BLOCK_SIZE,
357 .cra_ctxsize = sizeof(struct geode_aes_op),
358 .cra_alignmask = 15,
359 .cra_type = &crypto_blkcipher_type,
360 .cra_module = THIS_MODULE,
361 .cra_list = LIST_HEAD_INIT(geode_ecb_alg.cra_list),
362 .cra_u = {
363 .blkcipher = {
364 .min_keysize = AES_KEY_LENGTH,
365 .max_keysize = AES_KEY_LENGTH,
366 .setkey = geode_setkey,
367 .encrypt = geode_ecb_encrypt,
368 .decrypt = geode_ecb_decrypt,
369 }
370 }
371};
372
373static void
374geode_aes_remove(struct pci_dev *dev)
375{
376 crypto_unregister_alg(&geode_alg);
377 crypto_unregister_alg(&geode_ecb_alg);
378 crypto_unregister_alg(&geode_cbc_alg);
379
380 pci_iounmap(dev, _iobase);
381 _iobase = NULL;
382
383 pci_release_regions(dev);
384 pci_disable_device(dev);
385}
386
387
388static int
389geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
390{
391 int ret;
392
393 if ((ret = pci_enable_device(dev)))
394 return ret;
395
396 if ((ret = pci_request_regions(dev, "geode-aes-128")))
397 goto eenable;
398
399 _iobase = pci_iomap(dev, 0, 0);
400
401 if (_iobase == NULL) {
402 ret = -ENOMEM;
403 goto erequest;
404 }
405
406 spin_lock_init(&lock);
407
408 /* Clear any pending activity */
409 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
410
411 if ((ret = crypto_register_alg(&geode_alg)))
412 goto eiomap;
413
414 if ((ret = crypto_register_alg(&geode_ecb_alg)))
415 goto ealg;
416
417 if ((ret = crypto_register_alg(&geode_cbc_alg)))
418 goto eecb;
419
420 printk(KERN_NOTICE "geode-aes: GEODE AES engine enabled.\n");
421 return 0;
422
423 eecb:
424 crypto_unregister_alg(&geode_ecb_alg);
425
426 ealg:
427 crypto_unregister_alg(&geode_alg);
428
429 eiomap:
430 pci_iounmap(dev, _iobase);
431
432 erequest:
433 pci_release_regions(dev);
434
435 eenable:
436 pci_disable_device(dev);
437
438 printk(KERN_ERR "geode-aes: GEODE AES initialization failed.\n");
439 return ret;
440}
441
442static struct pci_device_id geode_aes_tbl[] = {
443 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LX_AES, PCI_ANY_ID, PCI_ANY_ID} ,
444 { 0, }
445};
446
447MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
448
449static struct pci_driver geode_aes_driver = {
450 .name = "Geode LX AES",
451 .id_table = geode_aes_tbl,
452 .probe = geode_aes_probe,
453 .remove = __devexit_p(geode_aes_remove)
454};
455
456static int __init
457geode_aes_init(void)
458{
09cb914f 459 return pci_register_driver(&geode_aes_driver);
9fe757b0
JC
460}
461
462static void __exit
463geode_aes_exit(void)
464{
465 pci_unregister_driver(&geode_aes_driver);
466}
467
468MODULE_AUTHOR("Advanced Micro Devices, Inc.");
469MODULE_DESCRIPTION("Geode LX Hardware AES driver");
470MODULE_LICENSE("GPL");
471
472module_init(geode_aes_init);
473module_exit(geode_aes_exit);