]>
Commit | Line | Data |
---|---|---|
0a625fd2 DM |
1 | /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. |
2 | * | |
3 | * Copyright (C) 2010 David S. Miller <davem@davemloft.net> | |
4 | */ | |
5 | ||
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/of.h> | |
11 | #include <linux/of_device.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/crypto.h> | |
16 | #include <crypto/md5.h> | |
17 | #include <crypto/sha.h> | |
18 | #include <crypto/aes.h> | |
19 | #include <crypto/des.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/sched.h> | |
23 | ||
24 | #include <crypto/internal/hash.h> | |
25 | #include <crypto/scatterwalk.h> | |
26 | #include <crypto/algapi.h> | |
27 | ||
28 | #include <asm/hypervisor.h> | |
29 | #include <asm/mdesc.h> | |
30 | ||
31 | #include "n2_core.h" | |
32 | ||
33 | #define DRV_MODULE_NAME "n2_crypto" | |
34 | #define DRV_MODULE_VERSION "0.1" | |
35 | #define DRV_MODULE_RELDATE "April 29, 2010" | |
36 | ||
37 | static char version[] __devinitdata = | |
38 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
39 | ||
40 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | |
41 | MODULE_DESCRIPTION("Niagara2 Crypto driver"); | |
42 | MODULE_LICENSE("GPL"); | |
43 | MODULE_VERSION(DRV_MODULE_VERSION); | |
44 | ||
45 | #define N2_CRA_PRIORITY 300 | |
46 | ||
47 | static DEFINE_MUTEX(spu_lock); | |
48 | ||
49 | struct spu_queue { | |
50 | cpumask_t sharing; | |
51 | unsigned long qhandle; | |
52 | ||
53 | spinlock_t lock; | |
54 | u8 q_type; | |
55 | void *q; | |
56 | unsigned long head; | |
57 | unsigned long tail; | |
58 | struct list_head jobs; | |
59 | ||
60 | unsigned long devino; | |
61 | ||
62 | char irq_name[32]; | |
63 | unsigned int irq; | |
64 | ||
65 | struct list_head list; | |
66 | }; | |
67 | ||
68 | static struct spu_queue **cpu_to_cwq; | |
69 | static struct spu_queue **cpu_to_mau; | |
70 | ||
71 | static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) | |
72 | { | |
73 | if (q->q_type == HV_NCS_QTYPE_MAU) { | |
74 | off += MAU_ENTRY_SIZE; | |
75 | if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) | |
76 | off = 0; | |
77 | } else { | |
78 | off += CWQ_ENTRY_SIZE; | |
79 | if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) | |
80 | off = 0; | |
81 | } | |
82 | return off; | |
83 | } | |
84 | ||
85 | struct n2_request_common { | |
86 | struct list_head entry; | |
87 | unsigned int offset; | |
88 | }; | |
89 | #define OFFSET_NOT_RUNNING (~(unsigned int)0) | |
90 | ||
91 | /* An async job request records the final tail value it used in | |
92 | * n2_request_common->offset, test to see if that offset is in | |
93 | * the range old_head, new_head, inclusive. | |
94 | */ | |
95 | static inline bool job_finished(struct spu_queue *q, unsigned int offset, | |
96 | unsigned long old_head, unsigned long new_head) | |
97 | { | |
98 | if (old_head <= new_head) { | |
99 | if (offset > old_head && offset <= new_head) | |
100 | return true; | |
101 | } else { | |
102 | if (offset > old_head || offset <= new_head) | |
103 | return true; | |
104 | } | |
105 | return false; | |
106 | } | |
107 | ||
108 | /* When the HEAD marker is unequal to the actual HEAD, we get | |
109 | * a virtual device INO interrupt. We should process the | |
110 | * completed CWQ entries and adjust the HEAD marker to clear | |
111 | * the IRQ. | |
112 | */ | |
113 | static irqreturn_t cwq_intr(int irq, void *dev_id) | |
114 | { | |
115 | unsigned long off, new_head, hv_ret; | |
116 | struct spu_queue *q = dev_id; | |
117 | ||
118 | pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", | |
119 | smp_processor_id(), q->qhandle); | |
120 | ||
121 | spin_lock(&q->lock); | |
122 | ||
123 | hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); | |
124 | ||
125 | pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", | |
126 | smp_processor_id(), new_head, hv_ret); | |
127 | ||
128 | for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { | |
129 | /* XXX ... XXX */ | |
130 | } | |
131 | ||
132 | hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); | |
133 | if (hv_ret == HV_EOK) | |
134 | q->head = new_head; | |
135 | ||
136 | spin_unlock(&q->lock); | |
137 | ||
138 | return IRQ_HANDLED; | |
139 | } | |
140 | ||
141 | static irqreturn_t mau_intr(int irq, void *dev_id) | |
142 | { | |
143 | struct spu_queue *q = dev_id; | |
144 | unsigned long head, hv_ret; | |
145 | ||
146 | spin_lock(&q->lock); | |
147 | ||
148 | pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", | |
149 | smp_processor_id(), q->qhandle); | |
150 | ||
151 | hv_ret = sun4v_ncs_gethead(q->qhandle, &head); | |
152 | ||
153 | pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", | |
154 | smp_processor_id(), head, hv_ret); | |
155 | ||
156 | sun4v_ncs_sethead_marker(q->qhandle, head); | |
157 | ||
158 | spin_unlock(&q->lock); | |
159 | ||
160 | return IRQ_HANDLED; | |
161 | } | |
162 | ||
163 | static void *spu_queue_next(struct spu_queue *q, void *cur) | |
164 | { | |
165 | return q->q + spu_next_offset(q, cur - q->q); | |
166 | } | |
167 | ||
168 | static int spu_queue_num_free(struct spu_queue *q) | |
169 | { | |
170 | unsigned long head = q->head; | |
171 | unsigned long tail = q->tail; | |
172 | unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); | |
173 | unsigned long diff; | |
174 | ||
175 | if (head > tail) | |
176 | diff = head - tail; | |
177 | else | |
178 | diff = (end - tail) + head; | |
179 | ||
180 | return (diff / CWQ_ENTRY_SIZE) - 1; | |
181 | } | |
182 | ||
183 | static void *spu_queue_alloc(struct spu_queue *q, int num_entries) | |
184 | { | |
185 | int avail = spu_queue_num_free(q); | |
186 | ||
187 | if (avail >= num_entries) | |
188 | return q->q + q->tail; | |
189 | ||
190 | return NULL; | |
191 | } | |
192 | ||
193 | static unsigned long spu_queue_submit(struct spu_queue *q, void *last) | |
194 | { | |
195 | unsigned long hv_ret, new_tail; | |
196 | ||
197 | new_tail = spu_next_offset(q, last - q->q); | |
198 | ||
199 | hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); | |
200 | if (hv_ret == HV_EOK) | |
201 | q->tail = new_tail; | |
202 | return hv_ret; | |
203 | } | |
204 | ||
205 | static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, | |
206 | int enc_type, int auth_type, | |
207 | unsigned int hash_len, | |
208 | bool sfas, bool sob, bool eob, bool encrypt, | |
209 | int opcode) | |
210 | { | |
211 | u64 word = (len - 1) & CONTROL_LEN; | |
212 | ||
213 | word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); | |
214 | word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); | |
215 | word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); | |
216 | if (sfas) | |
217 | word |= CONTROL_STORE_FINAL_AUTH_STATE; | |
218 | if (sob) | |
219 | word |= CONTROL_START_OF_BLOCK; | |
220 | if (eob) | |
221 | word |= CONTROL_END_OF_BLOCK; | |
222 | if (encrypt) | |
223 | word |= CONTROL_ENCRYPT; | |
224 | if (hmac_key_len) | |
225 | word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; | |
226 | if (hash_len) | |
227 | word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; | |
228 | ||
229 | return word; | |
230 | } | |
231 | ||
232 | #if 0 | |
233 | static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) | |
234 | { | |
235 | if (this_len >= 64 || | |
236 | qp->head != qp->tail) | |
237 | return true; | |
238 | return false; | |
239 | } | |
240 | #endif | |
241 | ||
242 | struct n2_base_ctx { | |
243 | struct list_head list; | |
244 | }; | |
245 | ||
246 | static void n2_base_ctx_init(struct n2_base_ctx *ctx) | |
247 | { | |
248 | INIT_LIST_HEAD(&ctx->list); | |
249 | } | |
250 | ||
251 | struct n2_hash_ctx { | |
252 | struct n2_base_ctx base; | |
253 | ||
254 | struct crypto_ahash *fallback; | |
255 | ||
256 | /* These next three members must match the layout created by | |
257 | * crypto_init_shash_ops_async. This allows us to properly | |
258 | * plumb requests we can't do in hardware down to the fallback | |
259 | * operation, providing all of the data structures and layouts | |
260 | * expected by those paths. | |
261 | */ | |
262 | struct ahash_request fallback_req; | |
263 | struct shash_desc fallback_desc; | |
264 | union { | |
265 | struct md5_state md5; | |
266 | struct sha1_state sha1; | |
267 | struct sha256_state sha256; | |
268 | } u; | |
269 | ||
270 | unsigned char hash_key[64]; | |
271 | unsigned char keyed_zero_hash[32]; | |
272 | }; | |
273 | ||
274 | static int n2_hash_async_init(struct ahash_request *req) | |
275 | { | |
276 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
277 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
278 | ||
279 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | |
280 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
281 | ||
282 | return crypto_ahash_init(&ctx->fallback_req); | |
283 | } | |
284 | ||
285 | static int n2_hash_async_update(struct ahash_request *req) | |
286 | { | |
287 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
288 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
289 | ||
290 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | |
291 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
292 | ctx->fallback_req.nbytes = req->nbytes; | |
293 | ctx->fallback_req.src = req->src; | |
294 | ||
295 | return crypto_ahash_update(&ctx->fallback_req); | |
296 | } | |
297 | ||
298 | static int n2_hash_async_final(struct ahash_request *req) | |
299 | { | |
300 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
301 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
302 | ||
303 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | |
304 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
305 | ctx->fallback_req.result = req->result; | |
306 | ||
307 | return crypto_ahash_final(&ctx->fallback_req); | |
308 | } | |
309 | ||
310 | static int n2_hash_async_finup(struct ahash_request *req) | |
311 | { | |
312 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
313 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
314 | ||
315 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | |
316 | ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
317 | ctx->fallback_req.nbytes = req->nbytes; | |
318 | ctx->fallback_req.src = req->src; | |
319 | ctx->fallback_req.result = req->result; | |
320 | ||
321 | return crypto_ahash_finup(&ctx->fallback_req); | |
322 | } | |
323 | ||
324 | static int n2_hash_cra_init(struct crypto_tfm *tfm) | |
325 | { | |
326 | const char *fallback_driver_name = tfm->__crt_alg->cra_name; | |
327 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
328 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
329 | struct crypto_ahash *fallback_tfm; | |
330 | int err; | |
331 | ||
332 | fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, | |
333 | CRYPTO_ALG_NEED_FALLBACK); | |
334 | if (IS_ERR(fallback_tfm)) { | |
335 | pr_warning("Fallback driver '%s' could not be loaded!\n", | |
336 | fallback_driver_name); | |
337 | err = PTR_ERR(fallback_tfm); | |
338 | goto out; | |
339 | } | |
340 | ||
341 | ctx->fallback = fallback_tfm; | |
342 | return 0; | |
343 | ||
344 | out: | |
345 | return err; | |
346 | } | |
347 | ||
348 | static void n2_hash_cra_exit(struct crypto_tfm *tfm) | |
349 | { | |
350 | struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); | |
351 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); | |
352 | ||
353 | crypto_free_ahash(ctx->fallback); | |
354 | } | |
355 | ||
356 | static unsigned long wait_for_tail(struct spu_queue *qp) | |
357 | { | |
358 | unsigned long head, hv_ret; | |
359 | ||
360 | do { | |
361 | hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); | |
362 | if (hv_ret != HV_EOK) { | |
363 | pr_err("Hypervisor error on gethead\n"); | |
364 | break; | |
365 | } | |
366 | if (head == qp->tail) { | |
367 | qp->head = head; | |
368 | break; | |
369 | } | |
370 | } while (1); | |
371 | return hv_ret; | |
372 | } | |
373 | ||
374 | static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, | |
375 | struct cwq_initial_entry *ent) | |
376 | { | |
377 | unsigned long hv_ret = spu_queue_submit(qp, ent); | |
378 | ||
379 | if (hv_ret == HV_EOK) | |
380 | hv_ret = wait_for_tail(qp); | |
381 | ||
382 | return hv_ret; | |
383 | } | |
384 | ||
385 | static int n2_hash_async_digest(struct ahash_request *req, | |
386 | unsigned int auth_type, unsigned int digest_size, | |
387 | unsigned int result_size, void *hash_loc) | |
388 | { | |
389 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
390 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
391 | struct cwq_initial_entry *ent; | |
392 | struct crypto_hash_walk walk; | |
393 | struct spu_queue *qp; | |
394 | unsigned long flags; | |
395 | int err = -ENODEV; | |
396 | int nbytes, cpu; | |
397 | ||
398 | /* The total effective length of the operation may not | |
399 | * exceed 2^16. | |
400 | */ | |
401 | if (unlikely(req->nbytes > (1 << 16))) { | |
402 | ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); | |
403 | ctx->fallback_req.base.flags = | |
404 | req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; | |
405 | ctx->fallback_req.nbytes = req->nbytes; | |
406 | ctx->fallback_req.src = req->src; | |
407 | ctx->fallback_req.result = req->result; | |
408 | ||
409 | return crypto_ahash_digest(&ctx->fallback_req); | |
410 | } | |
411 | ||
412 | n2_base_ctx_init(&ctx->base); | |
413 | ||
414 | nbytes = crypto_hash_walk_first(req, &walk); | |
415 | ||
416 | cpu = get_cpu(); | |
417 | qp = cpu_to_cwq[cpu]; | |
418 | if (!qp) | |
419 | goto out; | |
420 | ||
421 | spin_lock_irqsave(&qp->lock, flags); | |
422 | ||
423 | /* XXX can do better, improve this later by doing a by-hand scatterlist | |
424 | * XXX walk, etc. | |
425 | */ | |
426 | ent = qp->q + qp->tail; | |
427 | ||
428 | ent->control = control_word_base(nbytes, 0, 0, | |
429 | auth_type, digest_size, | |
430 | false, true, false, false, | |
431 | OPCODE_INPLACE_BIT | | |
432 | OPCODE_AUTH_MAC); | |
433 | ent->src_addr = __pa(walk.data); | |
434 | ent->auth_key_addr = 0UL; | |
435 | ent->auth_iv_addr = __pa(hash_loc); | |
436 | ent->final_auth_state_addr = 0UL; | |
437 | ent->enc_key_addr = 0UL; | |
438 | ent->enc_iv_addr = 0UL; | |
439 | ent->dest_addr = __pa(hash_loc); | |
440 | ||
441 | nbytes = crypto_hash_walk_done(&walk, 0); | |
442 | while (nbytes > 0) { | |
443 | ent = spu_queue_next(qp, ent); | |
444 | ||
445 | ent->control = (nbytes - 1); | |
446 | ent->src_addr = __pa(walk.data); | |
447 | ent->auth_key_addr = 0UL; | |
448 | ent->auth_iv_addr = 0UL; | |
449 | ent->final_auth_state_addr = 0UL; | |
450 | ent->enc_key_addr = 0UL; | |
451 | ent->enc_iv_addr = 0UL; | |
452 | ent->dest_addr = 0UL; | |
453 | ||
454 | nbytes = crypto_hash_walk_done(&walk, 0); | |
455 | } | |
456 | ent->control |= CONTROL_END_OF_BLOCK; | |
457 | ||
458 | if (submit_and_wait_for_tail(qp, ent) != HV_EOK) | |
459 | err = -EINVAL; | |
460 | else | |
461 | err = 0; | |
462 | ||
463 | spin_unlock_irqrestore(&qp->lock, flags); | |
464 | ||
465 | if (!err) | |
466 | memcpy(req->result, hash_loc, result_size); | |
467 | out: | |
468 | put_cpu(); | |
469 | ||
470 | return err; | |
471 | } | |
472 | ||
473 | static int n2_md5_async_digest(struct ahash_request *req) | |
474 | { | |
475 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
476 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
477 | struct md5_state *m = &ctx->u.md5; | |
478 | ||
479 | if (unlikely(req->nbytes == 0)) { | |
480 | static const char md5_zero[MD5_DIGEST_SIZE] = { | |
481 | 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, | |
482 | 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, | |
483 | }; | |
484 | ||
485 | memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); | |
486 | return 0; | |
487 | } | |
488 | m->hash[0] = cpu_to_le32(0x67452301); | |
489 | m->hash[1] = cpu_to_le32(0xefcdab89); | |
490 | m->hash[2] = cpu_to_le32(0x98badcfe); | |
491 | m->hash[3] = cpu_to_le32(0x10325476); | |
492 | ||
493 | return n2_hash_async_digest(req, AUTH_TYPE_MD5, | |
494 | MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, | |
495 | m->hash); | |
496 | } | |
497 | ||
498 | static int n2_sha1_async_digest(struct ahash_request *req) | |
499 | { | |
500 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
501 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
502 | struct sha1_state *s = &ctx->u.sha1; | |
503 | ||
504 | if (unlikely(req->nbytes == 0)) { | |
505 | static const char sha1_zero[SHA1_DIGEST_SIZE] = { | |
506 | 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, | |
507 | 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, | |
508 | 0x07, 0x09 | |
509 | }; | |
510 | ||
511 | memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); | |
512 | return 0; | |
513 | } | |
514 | s->state[0] = SHA1_H0; | |
515 | s->state[1] = SHA1_H1; | |
516 | s->state[2] = SHA1_H2; | |
517 | s->state[3] = SHA1_H3; | |
518 | s->state[4] = SHA1_H4; | |
519 | ||
520 | return n2_hash_async_digest(req, AUTH_TYPE_SHA1, | |
521 | SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, | |
522 | s->state); | |
523 | } | |
524 | ||
525 | static int n2_sha256_async_digest(struct ahash_request *req) | |
526 | { | |
527 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
528 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
529 | struct sha256_state *s = &ctx->u.sha256; | |
530 | ||
531 | if (req->nbytes == 0) { | |
532 | static const char sha256_zero[SHA256_DIGEST_SIZE] = { | |
533 | 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, | |
534 | 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, | |
535 | 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, | |
536 | 0x1b, 0x78, 0x52, 0xb8, 0x55 | |
537 | }; | |
538 | ||
539 | memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); | |
540 | return 0; | |
541 | } | |
542 | s->state[0] = SHA256_H0; | |
543 | s->state[1] = SHA256_H1; | |
544 | s->state[2] = SHA256_H2; | |
545 | s->state[3] = SHA256_H3; | |
546 | s->state[4] = SHA256_H4; | |
547 | s->state[5] = SHA256_H5; | |
548 | s->state[6] = SHA256_H6; | |
549 | s->state[7] = SHA256_H7; | |
550 | ||
551 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | |
552 | SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, | |
553 | s->state); | |
554 | } | |
555 | ||
556 | static int n2_sha224_async_digest(struct ahash_request *req) | |
557 | { | |
558 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
559 | struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
560 | struct sha256_state *s = &ctx->u.sha256; | |
561 | ||
562 | if (req->nbytes == 0) { | |
563 | static const char sha224_zero[SHA224_DIGEST_SIZE] = { | |
564 | 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, | |
565 | 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, | |
566 | 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, | |
567 | 0x2f | |
568 | }; | |
569 | ||
570 | memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); | |
571 | return 0; | |
572 | } | |
573 | s->state[0] = SHA224_H0; | |
574 | s->state[1] = SHA224_H1; | |
575 | s->state[2] = SHA224_H2; | |
576 | s->state[3] = SHA224_H3; | |
577 | s->state[4] = SHA224_H4; | |
578 | s->state[5] = SHA224_H5; | |
579 | s->state[6] = SHA224_H6; | |
580 | s->state[7] = SHA224_H7; | |
581 | ||
582 | return n2_hash_async_digest(req, AUTH_TYPE_SHA256, | |
583 | SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, | |
584 | s->state); | |
585 | } | |
586 | ||
587 | struct n2_cipher_context { | |
588 | int key_len; | |
589 | int enc_type; | |
590 | union { | |
591 | u8 aes[AES_MAX_KEY_SIZE]; | |
592 | u8 des[DES_KEY_SIZE]; | |
593 | u8 des3[3 * DES_KEY_SIZE]; | |
594 | u8 arc4[258]; /* S-box, X, Y */ | |
595 | } key; | |
596 | }; | |
597 | ||
598 | #define N2_CHUNK_ARR_LEN 16 | |
599 | ||
600 | struct n2_crypto_chunk { | |
601 | struct list_head entry; | |
602 | unsigned long iv_paddr : 44; | |
603 | unsigned long arr_len : 20; | |
604 | unsigned long dest_paddr; | |
605 | unsigned long dest_final; | |
606 | struct { | |
607 | unsigned long src_paddr : 44; | |
608 | unsigned long src_len : 20; | |
609 | } arr[N2_CHUNK_ARR_LEN]; | |
610 | }; | |
611 | ||
612 | struct n2_request_context { | |
613 | struct ablkcipher_walk walk; | |
614 | struct list_head chunk_list; | |
615 | struct n2_crypto_chunk chunk; | |
616 | u8 temp_iv[16]; | |
617 | }; | |
618 | ||
619 | /* The SPU allows some level of flexibility for partial cipher blocks | |
620 | * being specified in a descriptor. | |
621 | * | |
622 | * It merely requires that every descriptor's length field is at least | |
623 | * as large as the cipher block size. This means that a cipher block | |
624 | * can span at most 2 descriptors. However, this does not allow a | |
625 | * partial block to span into the final descriptor as that would | |
626 | * violate the rule (since every descriptor's length must be at lest | |
627 | * the block size). So, for example, assuming an 8 byte block size: | |
628 | * | |
629 | * 0xe --> 0xa --> 0x8 | |
630 | * | |
631 | * is a valid length sequence, whereas: | |
632 | * | |
633 | * 0xe --> 0xb --> 0x7 | |
634 | * | |
635 | * is not a valid sequence. | |
636 | */ | |
637 | ||
638 | struct n2_cipher_alg { | |
639 | struct list_head entry; | |
640 | u8 enc_type; | |
641 | struct crypto_alg alg; | |
642 | }; | |
643 | ||
644 | static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) | |
645 | { | |
646 | struct crypto_alg *alg = tfm->__crt_alg; | |
647 | ||
648 | return container_of(alg, struct n2_cipher_alg, alg); | |
649 | } | |
650 | ||
651 | struct n2_cipher_request_context { | |
652 | struct ablkcipher_walk walk; | |
653 | }; | |
654 | ||
655 | static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
656 | unsigned int keylen) | |
657 | { | |
658 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
659 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
660 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
661 | ||
662 | ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); | |
663 | ||
664 | switch (keylen) { | |
665 | case AES_KEYSIZE_128: | |
666 | ctx->enc_type |= ENC_TYPE_ALG_AES128; | |
667 | break; | |
668 | case AES_KEYSIZE_192: | |
669 | ctx->enc_type |= ENC_TYPE_ALG_AES192; | |
670 | break; | |
671 | case AES_KEYSIZE_256: | |
672 | ctx->enc_type |= ENC_TYPE_ALG_AES256; | |
673 | break; | |
674 | default: | |
675 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
676 | return -EINVAL; | |
677 | } | |
678 | ||
679 | ctx->key_len = keylen; | |
680 | memcpy(ctx->key.aes, key, keylen); | |
681 | return 0; | |
682 | } | |
683 | ||
684 | static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
685 | unsigned int keylen) | |
686 | { | |
687 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
688 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
689 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
690 | u32 tmp[DES_EXPKEY_WORDS]; | |
691 | int err; | |
692 | ||
693 | ctx->enc_type = n2alg->enc_type; | |
694 | ||
695 | if (keylen != DES_KEY_SIZE) { | |
696 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
697 | return -EINVAL; | |
698 | } | |
699 | ||
700 | err = des_ekey(tmp, key); | |
701 | if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { | |
702 | tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; | |
703 | return -EINVAL; | |
704 | } | |
705 | ||
706 | ctx->key_len = keylen; | |
707 | memcpy(ctx->key.des, key, keylen); | |
708 | return 0; | |
709 | } | |
710 | ||
711 | static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
712 | unsigned int keylen) | |
713 | { | |
714 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
715 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
716 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
717 | ||
718 | ctx->enc_type = n2alg->enc_type; | |
719 | ||
720 | if (keylen != (3 * DES_KEY_SIZE)) { | |
721 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
722 | return -EINVAL; | |
723 | } | |
724 | ctx->key_len = keylen; | |
725 | memcpy(ctx->key.des3, key, keylen); | |
726 | return 0; | |
727 | } | |
728 | ||
729 | static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, | |
730 | unsigned int keylen) | |
731 | { | |
732 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
733 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
734 | struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); | |
735 | u8 *s = ctx->key.arc4; | |
736 | u8 *x = s + 256; | |
737 | u8 *y = x + 1; | |
738 | int i, j, k; | |
739 | ||
740 | ctx->enc_type = n2alg->enc_type; | |
741 | ||
742 | j = k = 0; | |
743 | *x = 0; | |
744 | *y = 0; | |
745 | for (i = 0; i < 256; i++) | |
746 | s[i] = i; | |
747 | for (i = 0; i < 256; i++) { | |
748 | u8 a = s[i]; | |
749 | j = (j + key[k] + a) & 0xff; | |
750 | s[i] = s[j]; | |
751 | s[j] = a; | |
752 | if (++k >= keylen) | |
753 | k = 0; | |
754 | } | |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
759 | static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) | |
760 | { | |
761 | int this_len = nbytes; | |
762 | ||
763 | this_len -= (nbytes & (block_size - 1)); | |
764 | return this_len > (1 << 16) ? (1 << 16) : this_len; | |
765 | } | |
766 | ||
767 | static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, | |
768 | struct spu_queue *qp, bool encrypt) | |
769 | { | |
770 | struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); | |
771 | struct cwq_initial_entry *ent; | |
772 | bool in_place; | |
773 | int i; | |
774 | ||
775 | ent = spu_queue_alloc(qp, cp->arr_len); | |
776 | if (!ent) { | |
777 | pr_info("queue_alloc() of %d fails\n", | |
778 | cp->arr_len); | |
779 | return -EBUSY; | |
780 | } | |
781 | ||
782 | in_place = (cp->dest_paddr == cp->arr[0].src_paddr); | |
783 | ||
784 | ent->control = control_word_base(cp->arr[0].src_len, | |
785 | 0, ctx->enc_type, 0, 0, | |
786 | false, true, false, encrypt, | |
787 | OPCODE_ENCRYPT | | |
788 | (in_place ? OPCODE_INPLACE_BIT : 0)); | |
789 | ent->src_addr = cp->arr[0].src_paddr; | |
790 | ent->auth_key_addr = 0UL; | |
791 | ent->auth_iv_addr = 0UL; | |
792 | ent->final_auth_state_addr = 0UL; | |
793 | ent->enc_key_addr = __pa(&ctx->key); | |
794 | ent->enc_iv_addr = cp->iv_paddr; | |
795 | ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); | |
796 | ||
797 | for (i = 1; i < cp->arr_len; i++) { | |
798 | ent = spu_queue_next(qp, ent); | |
799 | ||
800 | ent->control = cp->arr[i].src_len - 1; | |
801 | ent->src_addr = cp->arr[i].src_paddr; | |
802 | ent->auth_key_addr = 0UL; | |
803 | ent->auth_iv_addr = 0UL; | |
804 | ent->final_auth_state_addr = 0UL; | |
805 | ent->enc_key_addr = 0UL; | |
806 | ent->enc_iv_addr = 0UL; | |
807 | ent->dest_addr = 0UL; | |
808 | } | |
809 | ent->control |= CONTROL_END_OF_BLOCK; | |
810 | ||
811 | return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; | |
812 | } | |
813 | ||
814 | static int n2_compute_chunks(struct ablkcipher_request *req) | |
815 | { | |
816 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
817 | struct ablkcipher_walk *walk = &rctx->walk; | |
818 | struct n2_crypto_chunk *chunk; | |
819 | unsigned long dest_prev; | |
820 | unsigned int tot_len; | |
821 | bool prev_in_place; | |
822 | int err, nbytes; | |
823 | ||
824 | ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); | |
825 | err = ablkcipher_walk_phys(req, walk); | |
826 | if (err) | |
827 | return err; | |
828 | ||
829 | INIT_LIST_HEAD(&rctx->chunk_list); | |
830 | ||
831 | chunk = &rctx->chunk; | |
832 | INIT_LIST_HEAD(&chunk->entry); | |
833 | ||
834 | chunk->iv_paddr = 0UL; | |
835 | chunk->arr_len = 0; | |
836 | chunk->dest_paddr = 0UL; | |
837 | ||
838 | prev_in_place = false; | |
839 | dest_prev = ~0UL; | |
840 | tot_len = 0; | |
841 | ||
842 | while ((nbytes = walk->nbytes) != 0) { | |
843 | unsigned long dest_paddr, src_paddr; | |
844 | bool in_place; | |
845 | int this_len; | |
846 | ||
847 | src_paddr = (page_to_phys(walk->src.page) + | |
848 | walk->src.offset); | |
849 | dest_paddr = (page_to_phys(walk->dst.page) + | |
850 | walk->dst.offset); | |
851 | in_place = (src_paddr == dest_paddr); | |
852 | this_len = cipher_descriptor_len(nbytes, walk->blocksize); | |
853 | ||
854 | if (chunk->arr_len != 0) { | |
855 | if (in_place != prev_in_place || | |
856 | (!prev_in_place && | |
857 | dest_paddr != dest_prev) || | |
858 | chunk->arr_len == N2_CHUNK_ARR_LEN || | |
859 | tot_len + this_len > (1 << 16)) { | |
860 | chunk->dest_final = dest_prev; | |
861 | list_add_tail(&chunk->entry, | |
862 | &rctx->chunk_list); | |
863 | chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); | |
864 | if (!chunk) { | |
865 | err = -ENOMEM; | |
866 | break; | |
867 | } | |
868 | INIT_LIST_HEAD(&chunk->entry); | |
869 | } | |
870 | } | |
871 | if (chunk->arr_len == 0) { | |
872 | chunk->dest_paddr = dest_paddr; | |
873 | tot_len = 0; | |
874 | } | |
875 | chunk->arr[chunk->arr_len].src_paddr = src_paddr; | |
876 | chunk->arr[chunk->arr_len].src_len = this_len; | |
877 | chunk->arr_len++; | |
878 | ||
879 | dest_prev = dest_paddr + this_len; | |
880 | prev_in_place = in_place; | |
881 | tot_len += this_len; | |
882 | ||
883 | err = ablkcipher_walk_done(req, walk, nbytes - this_len); | |
884 | if (err) | |
885 | break; | |
886 | } | |
887 | if (!err && chunk->arr_len != 0) { | |
888 | chunk->dest_final = dest_prev; | |
889 | list_add_tail(&chunk->entry, &rctx->chunk_list); | |
890 | } | |
891 | ||
892 | return err; | |
893 | } | |
894 | ||
895 | static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) | |
896 | { | |
897 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
898 | struct n2_crypto_chunk *c, *tmp; | |
899 | ||
900 | if (final_iv) | |
901 | memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); | |
902 | ||
903 | ablkcipher_walk_complete(&rctx->walk); | |
904 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | |
905 | list_del(&c->entry); | |
906 | if (unlikely(c != &rctx->chunk)) | |
907 | kfree(c); | |
908 | } | |
909 | ||
910 | } | |
911 | ||
912 | static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) | |
913 | { | |
914 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
915 | struct crypto_tfm *tfm = req->base.tfm; | |
916 | int err = n2_compute_chunks(req); | |
917 | struct n2_crypto_chunk *c, *tmp; | |
918 | unsigned long flags, hv_ret; | |
919 | struct spu_queue *qp; | |
920 | ||
921 | if (err) | |
922 | return err; | |
923 | ||
924 | qp = cpu_to_cwq[get_cpu()]; | |
925 | err = -ENODEV; | |
926 | if (!qp) | |
927 | goto out; | |
928 | ||
929 | spin_lock_irqsave(&qp->lock, flags); | |
930 | ||
931 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { | |
932 | err = __n2_crypt_chunk(tfm, c, qp, encrypt); | |
933 | if (err) | |
934 | break; | |
935 | list_del(&c->entry); | |
936 | if (unlikely(c != &rctx->chunk)) | |
937 | kfree(c); | |
938 | } | |
939 | if (!err) { | |
940 | hv_ret = wait_for_tail(qp); | |
941 | if (hv_ret != HV_EOK) | |
942 | err = -EINVAL; | |
943 | } | |
944 | ||
945 | spin_unlock_irqrestore(&qp->lock, flags); | |
946 | ||
947 | put_cpu(); | |
948 | ||
949 | out: | |
950 | n2_chunk_complete(req, NULL); | |
951 | return err; | |
952 | } | |
953 | ||
954 | static int n2_encrypt_ecb(struct ablkcipher_request *req) | |
955 | { | |
956 | return n2_do_ecb(req, true); | |
957 | } | |
958 | ||
959 | static int n2_decrypt_ecb(struct ablkcipher_request *req) | |
960 | { | |
961 | return n2_do_ecb(req, false); | |
962 | } | |
963 | ||
964 | static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) | |
965 | { | |
966 | struct n2_request_context *rctx = ablkcipher_request_ctx(req); | |
967 | struct crypto_tfm *tfm = req->base.tfm; | |
968 | unsigned long flags, hv_ret, iv_paddr; | |
969 | int err = n2_compute_chunks(req); | |
970 | struct n2_crypto_chunk *c, *tmp; | |
971 | struct spu_queue *qp; | |
972 | void *final_iv_addr; | |
973 | ||
974 | final_iv_addr = NULL; | |
975 | ||
976 | if (err) | |
977 | return err; | |
978 | ||
979 | qp = cpu_to_cwq[get_cpu()]; | |
980 | err = -ENODEV; | |
981 | if (!qp) | |
982 | goto out; | |
983 | ||
984 | spin_lock_irqsave(&qp->lock, flags); | |
985 | ||
986 | if (encrypt) { | |
987 | iv_paddr = __pa(rctx->walk.iv); | |
988 | list_for_each_entry_safe(c, tmp, &rctx->chunk_list, | |
989 | entry) { | |
990 | c->iv_paddr = iv_paddr; | |
991 | err = __n2_crypt_chunk(tfm, c, qp, true); | |
992 | if (err) | |
993 | break; | |
994 | iv_paddr = c->dest_final - rctx->walk.blocksize; | |
995 | list_del(&c->entry); | |
996 | if (unlikely(c != &rctx->chunk)) | |
997 | kfree(c); | |
998 | } | |
999 | final_iv_addr = __va(iv_paddr); | |
1000 | } else { | |
1001 | list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, | |
1002 | entry) { | |
1003 | if (c == &rctx->chunk) { | |
1004 | iv_paddr = __pa(rctx->walk.iv); | |
1005 | } else { | |
1006 | iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + | |
1007 | tmp->arr[tmp->arr_len-1].src_len - | |
1008 | rctx->walk.blocksize); | |
1009 | } | |
1010 | if (!final_iv_addr) { | |
1011 | unsigned long pa; | |
1012 | ||
1013 | pa = (c->arr[c->arr_len-1].src_paddr + | |
1014 | c->arr[c->arr_len-1].src_len - | |
1015 | rctx->walk.blocksize); | |
1016 | final_iv_addr = rctx->temp_iv; | |
1017 | memcpy(rctx->temp_iv, __va(pa), | |
1018 | rctx->walk.blocksize); | |
1019 | } | |
1020 | c->iv_paddr = iv_paddr; | |
1021 | err = __n2_crypt_chunk(tfm, c, qp, false); | |
1022 | if (err) | |
1023 | break; | |
1024 | list_del(&c->entry); | |
1025 | if (unlikely(c != &rctx->chunk)) | |
1026 | kfree(c); | |
1027 | } | |
1028 | } | |
1029 | if (!err) { | |
1030 | hv_ret = wait_for_tail(qp); | |
1031 | if (hv_ret != HV_EOK) | |
1032 | err = -EINVAL; | |
1033 | } | |
1034 | ||
1035 | spin_unlock_irqrestore(&qp->lock, flags); | |
1036 | ||
1037 | put_cpu(); | |
1038 | ||
1039 | out: | |
1040 | n2_chunk_complete(req, err ? NULL : final_iv_addr); | |
1041 | return err; | |
1042 | } | |
1043 | ||
1044 | static int n2_encrypt_chaining(struct ablkcipher_request *req) | |
1045 | { | |
1046 | return n2_do_chaining(req, true); | |
1047 | } | |
1048 | ||
1049 | static int n2_decrypt_chaining(struct ablkcipher_request *req) | |
1050 | { | |
1051 | return n2_do_chaining(req, false); | |
1052 | } | |
1053 | ||
1054 | struct n2_cipher_tmpl { | |
1055 | const char *name; | |
1056 | const char *drv_name; | |
1057 | u8 block_size; | |
1058 | u8 enc_type; | |
1059 | struct ablkcipher_alg ablkcipher; | |
1060 | }; | |
1061 | ||
1062 | static const struct n2_cipher_tmpl cipher_tmpls[] = { | |
1063 | /* ARC4: only ECB is supported (chaining bits ignored) */ | |
1064 | { .name = "ecb(arc4)", | |
1065 | .drv_name = "ecb-arc4", | |
1066 | .block_size = 1, | |
1067 | .enc_type = (ENC_TYPE_ALG_RC4_STREAM | | |
1068 | ENC_TYPE_CHAINING_ECB), | |
1069 | .ablkcipher = { | |
1070 | .min_keysize = 1, | |
1071 | .max_keysize = 256, | |
1072 | .setkey = n2_arc4_setkey, | |
1073 | .encrypt = n2_encrypt_ecb, | |
1074 | .decrypt = n2_decrypt_ecb, | |
1075 | }, | |
1076 | }, | |
1077 | ||
1078 | /* DES: ECB CBC and CFB are supported */ | |
1079 | { .name = "ecb(des)", | |
1080 | .drv_name = "ecb-des", | |
1081 | .block_size = DES_BLOCK_SIZE, | |
1082 | .enc_type = (ENC_TYPE_ALG_DES | | |
1083 | ENC_TYPE_CHAINING_ECB), | |
1084 | .ablkcipher = { | |
1085 | .min_keysize = DES_KEY_SIZE, | |
1086 | .max_keysize = DES_KEY_SIZE, | |
1087 | .setkey = n2_des_setkey, | |
1088 | .encrypt = n2_encrypt_ecb, | |
1089 | .decrypt = n2_decrypt_ecb, | |
1090 | }, | |
1091 | }, | |
1092 | { .name = "cbc(des)", | |
1093 | .drv_name = "cbc-des", | |
1094 | .block_size = DES_BLOCK_SIZE, | |
1095 | .enc_type = (ENC_TYPE_ALG_DES | | |
1096 | ENC_TYPE_CHAINING_CBC), | |
1097 | .ablkcipher = { | |
1098 | .ivsize = DES_BLOCK_SIZE, | |
1099 | .min_keysize = DES_KEY_SIZE, | |
1100 | .max_keysize = DES_KEY_SIZE, | |
1101 | .setkey = n2_des_setkey, | |
1102 | .encrypt = n2_encrypt_chaining, | |
1103 | .decrypt = n2_decrypt_chaining, | |
1104 | }, | |
1105 | }, | |
1106 | { .name = "cfb(des)", | |
1107 | .drv_name = "cfb-des", | |
1108 | .block_size = DES_BLOCK_SIZE, | |
1109 | .enc_type = (ENC_TYPE_ALG_DES | | |
1110 | ENC_TYPE_CHAINING_CFB), | |
1111 | .ablkcipher = { | |
1112 | .min_keysize = DES_KEY_SIZE, | |
1113 | .max_keysize = DES_KEY_SIZE, | |
1114 | .setkey = n2_des_setkey, | |
1115 | .encrypt = n2_encrypt_chaining, | |
1116 | .decrypt = n2_decrypt_chaining, | |
1117 | }, | |
1118 | }, | |
1119 | ||
1120 | /* 3DES: ECB CBC and CFB are supported */ | |
1121 | { .name = "ecb(des3_ede)", | |
1122 | .drv_name = "ecb-3des", | |
1123 | .block_size = DES_BLOCK_SIZE, | |
1124 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1125 | ENC_TYPE_CHAINING_ECB), | |
1126 | .ablkcipher = { | |
1127 | .min_keysize = 3 * DES_KEY_SIZE, | |
1128 | .max_keysize = 3 * DES_KEY_SIZE, | |
1129 | .setkey = n2_3des_setkey, | |
1130 | .encrypt = n2_encrypt_ecb, | |
1131 | .decrypt = n2_decrypt_ecb, | |
1132 | }, | |
1133 | }, | |
1134 | { .name = "cbc(des3_ede)", | |
1135 | .drv_name = "cbc-3des", | |
1136 | .block_size = DES_BLOCK_SIZE, | |
1137 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1138 | ENC_TYPE_CHAINING_CBC), | |
1139 | .ablkcipher = { | |
1140 | .ivsize = DES_BLOCK_SIZE, | |
1141 | .min_keysize = 3 * DES_KEY_SIZE, | |
1142 | .max_keysize = 3 * DES_KEY_SIZE, | |
1143 | .setkey = n2_3des_setkey, | |
1144 | .encrypt = n2_encrypt_chaining, | |
1145 | .decrypt = n2_decrypt_chaining, | |
1146 | }, | |
1147 | }, | |
1148 | { .name = "cfb(des3_ede)", | |
1149 | .drv_name = "cfb-3des", | |
1150 | .block_size = DES_BLOCK_SIZE, | |
1151 | .enc_type = (ENC_TYPE_ALG_3DES | | |
1152 | ENC_TYPE_CHAINING_CFB), | |
1153 | .ablkcipher = { | |
1154 | .min_keysize = 3 * DES_KEY_SIZE, | |
1155 | .max_keysize = 3 * DES_KEY_SIZE, | |
1156 | .setkey = n2_3des_setkey, | |
1157 | .encrypt = n2_encrypt_chaining, | |
1158 | .decrypt = n2_decrypt_chaining, | |
1159 | }, | |
1160 | }, | |
1161 | /* AES: ECB CBC and CTR are supported */ | |
1162 | { .name = "ecb(aes)", | |
1163 | .drv_name = "ecb-aes", | |
1164 | .block_size = AES_BLOCK_SIZE, | |
1165 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1166 | ENC_TYPE_CHAINING_ECB), | |
1167 | .ablkcipher = { | |
1168 | .min_keysize = AES_MIN_KEY_SIZE, | |
1169 | .max_keysize = AES_MAX_KEY_SIZE, | |
1170 | .setkey = n2_aes_setkey, | |
1171 | .encrypt = n2_encrypt_ecb, | |
1172 | .decrypt = n2_decrypt_ecb, | |
1173 | }, | |
1174 | }, | |
1175 | { .name = "cbc(aes)", | |
1176 | .drv_name = "cbc-aes", | |
1177 | .block_size = AES_BLOCK_SIZE, | |
1178 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1179 | ENC_TYPE_CHAINING_CBC), | |
1180 | .ablkcipher = { | |
1181 | .ivsize = AES_BLOCK_SIZE, | |
1182 | .min_keysize = AES_MIN_KEY_SIZE, | |
1183 | .max_keysize = AES_MAX_KEY_SIZE, | |
1184 | .setkey = n2_aes_setkey, | |
1185 | .encrypt = n2_encrypt_chaining, | |
1186 | .decrypt = n2_decrypt_chaining, | |
1187 | }, | |
1188 | }, | |
1189 | { .name = "ctr(aes)", | |
1190 | .drv_name = "ctr-aes", | |
1191 | .block_size = AES_BLOCK_SIZE, | |
1192 | .enc_type = (ENC_TYPE_ALG_AES128 | | |
1193 | ENC_TYPE_CHAINING_COUNTER), | |
1194 | .ablkcipher = { | |
1195 | .ivsize = AES_BLOCK_SIZE, | |
1196 | .min_keysize = AES_MIN_KEY_SIZE, | |
1197 | .max_keysize = AES_MAX_KEY_SIZE, | |
1198 | .setkey = n2_aes_setkey, | |
1199 | .encrypt = n2_encrypt_chaining, | |
1200 | .decrypt = n2_encrypt_chaining, | |
1201 | }, | |
1202 | }, | |
1203 | ||
1204 | }; | |
1205 | #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) | |
1206 | ||
1207 | static LIST_HEAD(cipher_algs); | |
1208 | ||
1209 | struct n2_hash_tmpl { | |
1210 | const char *name; | |
1211 | int (*digest)(struct ahash_request *req); | |
1212 | u8 digest_size; | |
1213 | u8 block_size; | |
1214 | }; | |
1215 | static const struct n2_hash_tmpl hash_tmpls[] = { | |
1216 | { .name = "md5", | |
1217 | .digest = n2_md5_async_digest, | |
1218 | .digest_size = MD5_DIGEST_SIZE, | |
1219 | .block_size = MD5_HMAC_BLOCK_SIZE }, | |
1220 | { .name = "sha1", | |
1221 | .digest = n2_sha1_async_digest, | |
1222 | .digest_size = SHA1_DIGEST_SIZE, | |
1223 | .block_size = SHA1_BLOCK_SIZE }, | |
1224 | { .name = "sha256", | |
1225 | .digest = n2_sha256_async_digest, | |
1226 | .digest_size = SHA256_DIGEST_SIZE, | |
1227 | .block_size = SHA256_BLOCK_SIZE }, | |
1228 | { .name = "sha224", | |
1229 | .digest = n2_sha224_async_digest, | |
1230 | .digest_size = SHA224_DIGEST_SIZE, | |
1231 | .block_size = SHA224_BLOCK_SIZE }, | |
1232 | }; | |
1233 | #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) | |
1234 | ||
1235 | struct n2_ahash_alg { | |
1236 | struct list_head entry; | |
1237 | struct ahash_alg alg; | |
1238 | }; | |
1239 | static LIST_HEAD(ahash_algs); | |
1240 | ||
1241 | static int algs_registered; | |
1242 | ||
1243 | static void __n2_unregister_algs(void) | |
1244 | { | |
1245 | struct n2_cipher_alg *cipher, *cipher_tmp; | |
1246 | struct n2_ahash_alg *alg, *alg_tmp; | |
1247 | ||
1248 | list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { | |
1249 | crypto_unregister_alg(&cipher->alg); | |
1250 | list_del(&cipher->entry); | |
1251 | kfree(cipher); | |
1252 | } | |
1253 | list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { | |
1254 | crypto_unregister_ahash(&alg->alg); | |
1255 | list_del(&alg->entry); | |
1256 | kfree(alg); | |
1257 | } | |
1258 | } | |
1259 | ||
1260 | static int n2_cipher_cra_init(struct crypto_tfm *tfm) | |
1261 | { | |
1262 | tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); | |
1263 | return 0; | |
1264 | } | |
1265 | ||
1266 | static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) | |
1267 | { | |
1268 | struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | |
1269 | struct crypto_alg *alg; | |
1270 | int err; | |
1271 | ||
1272 | if (!p) | |
1273 | return -ENOMEM; | |
1274 | ||
1275 | alg = &p->alg; | |
1276 | ||
1277 | snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | |
1278 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); | |
1279 | alg->cra_priority = N2_CRA_PRIORITY; | |
1280 | alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; | |
1281 | alg->cra_blocksize = tmpl->block_size; | |
1282 | p->enc_type = tmpl->enc_type; | |
1283 | alg->cra_ctxsize = sizeof(struct n2_cipher_context); | |
1284 | alg->cra_type = &crypto_ablkcipher_type; | |
1285 | alg->cra_u.ablkcipher = tmpl->ablkcipher; | |
1286 | alg->cra_init = n2_cipher_cra_init; | |
1287 | alg->cra_module = THIS_MODULE; | |
1288 | ||
1289 | list_add(&p->entry, &cipher_algs); | |
1290 | err = crypto_register_alg(alg); | |
1291 | if (err) { | |
1292 | list_del(&p->entry); | |
1293 | kfree(p); | |
1294 | } | |
1295 | return err; | |
1296 | } | |
1297 | ||
1298 | static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) | |
1299 | { | |
1300 | struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); | |
1301 | struct hash_alg_common *halg; | |
1302 | struct crypto_alg *base; | |
1303 | struct ahash_alg *ahash; | |
1304 | int err; | |
1305 | ||
1306 | if (!p) | |
1307 | return -ENOMEM; | |
1308 | ||
1309 | ahash = &p->alg; | |
1310 | ahash->init = n2_hash_async_init; | |
1311 | ahash->update = n2_hash_async_update; | |
1312 | ahash->final = n2_hash_async_final; | |
1313 | ahash->finup = n2_hash_async_finup; | |
1314 | ahash->digest = tmpl->digest; | |
1315 | ||
1316 | halg = &ahash->halg; | |
1317 | halg->digestsize = tmpl->digest_size; | |
1318 | ||
1319 | base = &halg->base; | |
1320 | snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); | |
1321 | snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); | |
1322 | base->cra_priority = N2_CRA_PRIORITY; | |
1323 | base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; | |
1324 | base->cra_blocksize = tmpl->block_size; | |
1325 | base->cra_ctxsize = sizeof(struct n2_hash_ctx); | |
1326 | base->cra_module = THIS_MODULE; | |
1327 | base->cra_init = n2_hash_cra_init; | |
1328 | base->cra_exit = n2_hash_cra_exit; | |
1329 | ||
1330 | list_add(&p->entry, &ahash_algs); | |
1331 | err = crypto_register_ahash(ahash); | |
1332 | if (err) { | |
1333 | list_del(&p->entry); | |
1334 | kfree(p); | |
1335 | } | |
1336 | return err; | |
1337 | } | |
1338 | ||
1339 | static int __devinit n2_register_algs(void) | |
1340 | { | |
1341 | int i, err = 0; | |
1342 | ||
1343 | mutex_lock(&spu_lock); | |
1344 | if (algs_registered++) | |
1345 | goto out; | |
1346 | ||
1347 | for (i = 0; i < NUM_HASH_TMPLS; i++) { | |
1348 | err = __n2_register_one_ahash(&hash_tmpls[i]); | |
1349 | if (err) { | |
1350 | __n2_unregister_algs(); | |
1351 | goto out; | |
1352 | } | |
1353 | } | |
1354 | for (i = 0; i < NUM_CIPHER_TMPLS; i++) { | |
1355 | err = __n2_register_one_cipher(&cipher_tmpls[i]); | |
1356 | if (err) { | |
1357 | __n2_unregister_algs(); | |
1358 | goto out; | |
1359 | } | |
1360 | } | |
1361 | ||
1362 | out: | |
1363 | mutex_unlock(&spu_lock); | |
1364 | return err; | |
1365 | } | |
1366 | ||
1367 | static void __exit n2_unregister_algs(void) | |
1368 | { | |
1369 | mutex_lock(&spu_lock); | |
1370 | if (!--algs_registered) | |
1371 | __n2_unregister_algs(); | |
1372 | mutex_unlock(&spu_lock); | |
1373 | } | |
1374 | ||
1375 | /* To map CWQ queues to interrupt sources, the hypervisor API provides | |
1376 | * a devino. This isn't very useful to us because all of the | |
1377 | * interrupts listed in the of_device node have been translated to | |
1378 | * Linux virtual IRQ cookie numbers. | |
1379 | * | |
1380 | * So we have to back-translate, going through the 'intr' and 'ino' | |
1381 | * property tables of the n2cp MDESC node, matching it with the OF | |
1382 | * 'interrupts' property entries, in order to to figure out which | |
1383 | * devino goes to which already-translated IRQ. | |
1384 | */ | |
1385 | static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, | |
1386 | unsigned long dev_ino) | |
1387 | { | |
1388 | const unsigned int *dev_intrs; | |
1389 | unsigned int intr; | |
1390 | int i; | |
1391 | ||
1392 | for (i = 0; i < ip->num_intrs; i++) { | |
1393 | if (ip->ino_table[i].ino == dev_ino) | |
1394 | break; | |
1395 | } | |
1396 | if (i == ip->num_intrs) | |
1397 | return -ENODEV; | |
1398 | ||
1399 | intr = ip->ino_table[i].intr; | |
1400 | ||
ff6c7341 | 1401 | dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); |
0a625fd2 DM |
1402 | if (!dev_intrs) |
1403 | return -ENODEV; | |
1404 | ||
1405 | for (i = 0; i < dev->num_irqs; i++) { | |
1406 | if (dev_intrs[i] == intr) | |
1407 | return i; | |
1408 | } | |
1409 | ||
1410 | return -ENODEV; | |
1411 | } | |
1412 | ||
1413 | static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, | |
1414 | const char *irq_name, struct spu_queue *p, | |
1415 | irq_handler_t handler) | |
1416 | { | |
1417 | unsigned long herr; | |
1418 | int index; | |
1419 | ||
1420 | herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); | |
1421 | if (herr) | |
1422 | return -EINVAL; | |
1423 | ||
1424 | index = find_devino_index(dev, ip, p->devino); | |
1425 | if (index < 0) | |
1426 | return index; | |
1427 | ||
1428 | p->irq = dev->irqs[index]; | |
1429 | ||
1430 | sprintf(p->irq_name, "%s-%d", irq_name, index); | |
1431 | ||
1432 | return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, | |
1433 | p->irq_name, p); | |
1434 | } | |
1435 | ||
1436 | static struct kmem_cache *queue_cache[2]; | |
1437 | ||
1438 | static void *new_queue(unsigned long q_type) | |
1439 | { | |
1440 | return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); | |
1441 | } | |
1442 | ||
1443 | static void free_queue(void *p, unsigned long q_type) | |
1444 | { | |
1445 | return kmem_cache_free(queue_cache[q_type - 1], p); | |
1446 | } | |
1447 | ||
1448 | static int queue_cache_init(void) | |
1449 | { | |
1450 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | |
1451 | queue_cache[HV_NCS_QTYPE_MAU - 1] = | |
527b9525 | 1452 | kmem_cache_create("mau_queue", |
0a625fd2 DM |
1453 | (MAU_NUM_ENTRIES * |
1454 | MAU_ENTRY_SIZE), | |
1455 | MAU_ENTRY_SIZE, 0, NULL); | |
1456 | if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) | |
1457 | return -ENOMEM; | |
1458 | ||
1459 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) | |
1460 | queue_cache[HV_NCS_QTYPE_CWQ - 1] = | |
1461 | kmem_cache_create("cwq_queue", | |
1462 | (CWQ_NUM_ENTRIES * | |
1463 | CWQ_ENTRY_SIZE), | |
1464 | CWQ_ENTRY_SIZE, 0, NULL); | |
1465 | if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { | |
1466 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | |
1467 | return -ENOMEM; | |
1468 | } | |
1469 | return 0; | |
1470 | } | |
1471 | ||
1472 | static void queue_cache_destroy(void) | |
1473 | { | |
1474 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); | |
1475 | kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); | |
1476 | } | |
1477 | ||
1478 | static int spu_queue_register(struct spu_queue *p, unsigned long q_type) | |
1479 | { | |
1480 | cpumask_var_t old_allowed; | |
1481 | unsigned long hv_ret; | |
1482 | ||
1483 | if (cpumask_empty(&p->sharing)) | |
1484 | return -EINVAL; | |
1485 | ||
1486 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | |
1487 | return -ENOMEM; | |
1488 | ||
1489 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | |
1490 | ||
1491 | set_cpus_allowed_ptr(current, &p->sharing); | |
1492 | ||
1493 | hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), | |
1494 | CWQ_NUM_ENTRIES, &p->qhandle); | |
1495 | if (!hv_ret) | |
1496 | sun4v_ncs_sethead_marker(p->qhandle, 0); | |
1497 | ||
1498 | set_cpus_allowed_ptr(current, old_allowed); | |
1499 | ||
1500 | free_cpumask_var(old_allowed); | |
1501 | ||
1502 | return (hv_ret ? -EINVAL : 0); | |
1503 | } | |
1504 | ||
1505 | static int spu_queue_setup(struct spu_queue *p) | |
1506 | { | |
1507 | int err; | |
1508 | ||
1509 | p->q = new_queue(p->q_type); | |
1510 | if (!p->q) | |
1511 | return -ENOMEM; | |
1512 | ||
1513 | err = spu_queue_register(p, p->q_type); | |
1514 | if (err) { | |
1515 | free_queue(p->q, p->q_type); | |
1516 | p->q = NULL; | |
1517 | } | |
1518 | ||
1519 | return err; | |
1520 | } | |
1521 | ||
1522 | static void spu_queue_destroy(struct spu_queue *p) | |
1523 | { | |
1524 | unsigned long hv_ret; | |
1525 | ||
1526 | if (!p->q) | |
1527 | return; | |
1528 | ||
1529 | hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); | |
1530 | ||
1531 | if (!hv_ret) | |
1532 | free_queue(p->q, p->q_type); | |
1533 | } | |
1534 | ||
1535 | static void spu_list_destroy(struct list_head *list) | |
1536 | { | |
1537 | struct spu_queue *p, *n; | |
1538 | ||
1539 | list_for_each_entry_safe(p, n, list, list) { | |
1540 | int i; | |
1541 | ||
1542 | for (i = 0; i < NR_CPUS; i++) { | |
1543 | if (cpu_to_cwq[i] == p) | |
1544 | cpu_to_cwq[i] = NULL; | |
1545 | } | |
1546 | ||
1547 | if (p->irq) { | |
1548 | free_irq(p->irq, p); | |
1549 | p->irq = 0; | |
1550 | } | |
1551 | spu_queue_destroy(p); | |
1552 | list_del(&p->list); | |
1553 | kfree(p); | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | /* Walk the backward arcs of a CWQ 'exec-unit' node, | |
1558 | * gathering cpu membership information. | |
1559 | */ | |
1560 | static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, | |
1561 | struct of_device *dev, | |
1562 | u64 node, struct spu_queue *p, | |
1563 | struct spu_queue **table) | |
1564 | { | |
1565 | u64 arc; | |
1566 | ||
1567 | mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { | |
1568 | u64 tgt = mdesc_arc_target(mdesc, arc); | |
1569 | const char *name = mdesc_node_name(mdesc, tgt); | |
1570 | const u64 *id; | |
1571 | ||
1572 | if (strcmp(name, "cpu")) | |
1573 | continue; | |
1574 | id = mdesc_get_property(mdesc, tgt, "id", NULL); | |
1575 | if (table[*id] != NULL) { | |
1576 | dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", | |
ff6c7341 | 1577 | dev->dev.of_node->full_name); |
0a625fd2 DM |
1578 | return -EINVAL; |
1579 | } | |
1580 | cpu_set(*id, p->sharing); | |
1581 | table[*id] = p; | |
1582 | } | |
1583 | return 0; | |
1584 | } | |
1585 | ||
1586 | /* Process an 'exec-unit' MDESC node of type 'cwq'. */ | |
1587 | static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, | |
1588 | struct of_device *dev, struct mdesc_handle *mdesc, | |
1589 | u64 node, const char *iname, unsigned long q_type, | |
1590 | irq_handler_t handler, struct spu_queue **table) | |
1591 | { | |
1592 | struct spu_queue *p; | |
1593 | int err; | |
1594 | ||
1595 | p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); | |
1596 | if (!p) { | |
1597 | dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", | |
ff6c7341 | 1598 | dev->dev.of_node->full_name); |
0a625fd2 DM |
1599 | return -ENOMEM; |
1600 | } | |
1601 | ||
1602 | cpus_clear(p->sharing); | |
1603 | spin_lock_init(&p->lock); | |
1604 | p->q_type = q_type; | |
1605 | INIT_LIST_HEAD(&p->jobs); | |
1606 | list_add(&p->list, list); | |
1607 | ||
1608 | err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); | |
1609 | if (err) | |
1610 | return err; | |
1611 | ||
1612 | err = spu_queue_setup(p); | |
1613 | if (err) | |
1614 | return err; | |
1615 | ||
1616 | return spu_map_ino(dev, ip, iname, p, handler); | |
1617 | } | |
1618 | ||
1619 | static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, | |
1620 | struct spu_mdesc_info *ip, struct list_head *list, | |
1621 | const char *exec_name, unsigned long q_type, | |
1622 | irq_handler_t handler, struct spu_queue **table) | |
1623 | { | |
1624 | int err = 0; | |
1625 | u64 node; | |
1626 | ||
1627 | mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { | |
1628 | const char *type; | |
1629 | ||
1630 | type = mdesc_get_property(mdesc, node, "type", NULL); | |
1631 | if (!type || strcmp(type, exec_name)) | |
1632 | continue; | |
1633 | ||
1634 | err = handle_exec_unit(ip, list, dev, mdesc, node, | |
1635 | exec_name, q_type, handler, table); | |
1636 | if (err) { | |
1637 | spu_list_destroy(list); | |
1638 | break; | |
1639 | } | |
1640 | } | |
1641 | ||
1642 | return err; | |
1643 | } | |
1644 | ||
1645 | static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, | |
1646 | struct spu_mdesc_info *ip) | |
1647 | { | |
1648 | const u64 *intr, *ino; | |
1649 | int intr_len, ino_len; | |
1650 | int i; | |
1651 | ||
1652 | intr = mdesc_get_property(mdesc, node, "intr", &intr_len); | |
1653 | if (!intr) | |
1654 | return -ENODEV; | |
1655 | ||
1656 | ino = mdesc_get_property(mdesc, node, "ino", &ino_len); | |
1657 | if (!intr) | |
1658 | return -ENODEV; | |
1659 | ||
1660 | if (intr_len != ino_len) | |
1661 | return -EINVAL; | |
1662 | ||
1663 | ip->num_intrs = intr_len / sizeof(u64); | |
1664 | ip->ino_table = kzalloc((sizeof(struct ino_blob) * | |
1665 | ip->num_intrs), | |
1666 | GFP_KERNEL); | |
1667 | if (!ip->ino_table) | |
1668 | return -ENOMEM; | |
1669 | ||
1670 | for (i = 0; i < ip->num_intrs; i++) { | |
1671 | struct ino_blob *b = &ip->ino_table[i]; | |
1672 | b->intr = intr[i]; | |
1673 | b->ino = ino[i]; | |
1674 | } | |
1675 | ||
1676 | return 0; | |
1677 | } | |
1678 | ||
1679 | static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, | |
1680 | struct of_device *dev, | |
1681 | struct spu_mdesc_info *ip, | |
1682 | const char *node_name) | |
1683 | { | |
1684 | const unsigned int *reg; | |
1685 | u64 node; | |
1686 | ||
ff6c7341 | 1687 | reg = of_get_property(dev->dev.of_node, "reg", NULL); |
0a625fd2 DM |
1688 | if (!reg) |
1689 | return -ENODEV; | |
1690 | ||
1691 | mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { | |
1692 | const char *name; | |
1693 | const u64 *chdl; | |
1694 | ||
1695 | name = mdesc_get_property(mdesc, node, "name", NULL); | |
1696 | if (!name || strcmp(name, node_name)) | |
1697 | continue; | |
1698 | chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); | |
1699 | if (!chdl || (*chdl != *reg)) | |
1700 | continue; | |
1701 | ip->cfg_handle = *chdl; | |
1702 | return get_irq_props(mdesc, node, ip); | |
1703 | } | |
1704 | ||
1705 | return -ENODEV; | |
1706 | } | |
1707 | ||
1708 | static unsigned long n2_spu_hvapi_major; | |
1709 | static unsigned long n2_spu_hvapi_minor; | |
1710 | ||
1711 | static int __devinit n2_spu_hvapi_register(void) | |
1712 | { | |
1713 | int err; | |
1714 | ||
1715 | n2_spu_hvapi_major = 2; | |
1716 | n2_spu_hvapi_minor = 0; | |
1717 | ||
1718 | err = sun4v_hvapi_register(HV_GRP_NCS, | |
1719 | n2_spu_hvapi_major, | |
1720 | &n2_spu_hvapi_minor); | |
1721 | ||
1722 | if (!err) | |
1723 | pr_info("Registered NCS HVAPI version %lu.%lu\n", | |
1724 | n2_spu_hvapi_major, | |
1725 | n2_spu_hvapi_minor); | |
1726 | ||
1727 | return err; | |
1728 | } | |
1729 | ||
1730 | static void n2_spu_hvapi_unregister(void) | |
1731 | { | |
1732 | sun4v_hvapi_unregister(HV_GRP_NCS); | |
1733 | } | |
1734 | ||
1735 | static int global_ref; | |
1736 | ||
1737 | static int __devinit grab_global_resources(void) | |
1738 | { | |
1739 | int err = 0; | |
1740 | ||
1741 | mutex_lock(&spu_lock); | |
1742 | ||
1743 | if (global_ref++) | |
1744 | goto out; | |
1745 | ||
1746 | err = n2_spu_hvapi_register(); | |
1747 | if (err) | |
1748 | goto out; | |
1749 | ||
1750 | err = queue_cache_init(); | |
1751 | if (err) | |
1752 | goto out_hvapi_release; | |
1753 | ||
1754 | err = -ENOMEM; | |
1755 | cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | |
1756 | GFP_KERNEL); | |
1757 | if (!cpu_to_cwq) | |
1758 | goto out_queue_cache_destroy; | |
1759 | ||
1760 | cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, | |
1761 | GFP_KERNEL); | |
1762 | if (!cpu_to_mau) | |
1763 | goto out_free_cwq_table; | |
1764 | ||
1765 | err = 0; | |
1766 | ||
1767 | out: | |
1768 | if (err) | |
1769 | global_ref--; | |
1770 | mutex_unlock(&spu_lock); | |
1771 | return err; | |
1772 | ||
1773 | out_free_cwq_table: | |
1774 | kfree(cpu_to_cwq); | |
1775 | cpu_to_cwq = NULL; | |
1776 | ||
1777 | out_queue_cache_destroy: | |
1778 | queue_cache_destroy(); | |
1779 | ||
1780 | out_hvapi_release: | |
1781 | n2_spu_hvapi_unregister(); | |
1782 | goto out; | |
1783 | } | |
1784 | ||
1785 | static void release_global_resources(void) | |
1786 | { | |
1787 | mutex_lock(&spu_lock); | |
1788 | if (!--global_ref) { | |
1789 | kfree(cpu_to_cwq); | |
1790 | cpu_to_cwq = NULL; | |
1791 | ||
1792 | kfree(cpu_to_mau); | |
1793 | cpu_to_mau = NULL; | |
1794 | ||
1795 | queue_cache_destroy(); | |
1796 | n2_spu_hvapi_unregister(); | |
1797 | } | |
1798 | mutex_unlock(&spu_lock); | |
1799 | } | |
1800 | ||
1801 | static struct n2_crypto * __devinit alloc_n2cp(void) | |
1802 | { | |
1803 | struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); | |
1804 | ||
1805 | if (np) | |
1806 | INIT_LIST_HEAD(&np->cwq_list); | |
1807 | ||
1808 | return np; | |
1809 | } | |
1810 | ||
1811 | static void free_n2cp(struct n2_crypto *np) | |
1812 | { | |
1813 | if (np->cwq_info.ino_table) { | |
1814 | kfree(np->cwq_info.ino_table); | |
1815 | np->cwq_info.ino_table = NULL; | |
1816 | } | |
1817 | ||
1818 | kfree(np); | |
1819 | } | |
1820 | ||
1821 | static void __devinit n2_spu_driver_version(void) | |
1822 | { | |
1823 | static int n2_spu_version_printed; | |
1824 | ||
1825 | if (n2_spu_version_printed++ == 0) | |
1826 | pr_info("%s", version); | |
1827 | } | |
1828 | ||
1829 | static int __devinit n2_crypto_probe(struct of_device *dev, | |
1830 | const struct of_device_id *match) | |
1831 | { | |
1832 | struct mdesc_handle *mdesc; | |
1833 | const char *full_name; | |
1834 | struct n2_crypto *np; | |
1835 | int err; | |
1836 | ||
1837 | n2_spu_driver_version(); | |
1838 | ||
ff6c7341 | 1839 | full_name = dev->dev.of_node->full_name; |
0a625fd2 DM |
1840 | pr_info("Found N2CP at %s\n", full_name); |
1841 | ||
1842 | np = alloc_n2cp(); | |
1843 | if (!np) { | |
1844 | dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", | |
1845 | full_name); | |
1846 | return -ENOMEM; | |
1847 | } | |
1848 | ||
1849 | err = grab_global_resources(); | |
1850 | if (err) { | |
1851 | dev_err(&dev->dev, "%s: Unable to grab " | |
1852 | "global resources.\n", full_name); | |
1853 | goto out_free_n2cp; | |
1854 | } | |
1855 | ||
1856 | mdesc = mdesc_grab(); | |
1857 | ||
1858 | if (!mdesc) { | |
1859 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | |
1860 | full_name); | |
1861 | err = -ENODEV; | |
1862 | goto out_free_global; | |
1863 | } | |
1864 | err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); | |
1865 | if (err) { | |
1866 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | |
1867 | full_name); | |
1868 | mdesc_release(mdesc); | |
1869 | goto out_free_global; | |
1870 | } | |
1871 | ||
1872 | err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, | |
1873 | "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, | |
1874 | cpu_to_cwq); | |
1875 | mdesc_release(mdesc); | |
1876 | ||
1877 | if (err) { | |
1878 | dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", | |
1879 | full_name); | |
1880 | goto out_free_global; | |
1881 | } | |
1882 | ||
1883 | err = n2_register_algs(); | |
1884 | if (err) { | |
1885 | dev_err(&dev->dev, "%s: Unable to register algorithms.\n", | |
1886 | full_name); | |
1887 | goto out_free_spu_list; | |
1888 | } | |
1889 | ||
1890 | dev_set_drvdata(&dev->dev, np); | |
1891 | ||
1892 | return 0; | |
1893 | ||
1894 | out_free_spu_list: | |
1895 | spu_list_destroy(&np->cwq_list); | |
1896 | ||
1897 | out_free_global: | |
1898 | release_global_resources(); | |
1899 | ||
1900 | out_free_n2cp: | |
1901 | free_n2cp(np); | |
1902 | ||
1903 | return err; | |
1904 | } | |
1905 | ||
1906 | static int __devexit n2_crypto_remove(struct of_device *dev) | |
1907 | { | |
1908 | struct n2_crypto *np = dev_get_drvdata(&dev->dev); | |
1909 | ||
1910 | n2_unregister_algs(); | |
1911 | ||
1912 | spu_list_destroy(&np->cwq_list); | |
1913 | ||
1914 | release_global_resources(); | |
1915 | ||
1916 | free_n2cp(np); | |
1917 | ||
1918 | return 0; | |
1919 | } | |
1920 | ||
1921 | static struct n2_mau * __devinit alloc_ncp(void) | |
1922 | { | |
1923 | struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); | |
1924 | ||
1925 | if (mp) | |
1926 | INIT_LIST_HEAD(&mp->mau_list); | |
1927 | ||
1928 | return mp; | |
1929 | } | |
1930 | ||
1931 | static void free_ncp(struct n2_mau *mp) | |
1932 | { | |
1933 | if (mp->mau_info.ino_table) { | |
1934 | kfree(mp->mau_info.ino_table); | |
1935 | mp->mau_info.ino_table = NULL; | |
1936 | } | |
1937 | ||
1938 | kfree(mp); | |
1939 | } | |
1940 | ||
1941 | static int __devinit n2_mau_probe(struct of_device *dev, | |
1942 | const struct of_device_id *match) | |
1943 | { | |
1944 | struct mdesc_handle *mdesc; | |
1945 | const char *full_name; | |
1946 | struct n2_mau *mp; | |
1947 | int err; | |
1948 | ||
1949 | n2_spu_driver_version(); | |
1950 | ||
ff6c7341 | 1951 | full_name = dev->dev.of_node->full_name; |
0a625fd2 DM |
1952 | pr_info("Found NCP at %s\n", full_name); |
1953 | ||
1954 | mp = alloc_ncp(); | |
1955 | if (!mp) { | |
1956 | dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", | |
1957 | full_name); | |
1958 | return -ENOMEM; | |
1959 | } | |
1960 | ||
1961 | err = grab_global_resources(); | |
1962 | if (err) { | |
1963 | dev_err(&dev->dev, "%s: Unable to grab " | |
1964 | "global resources.\n", full_name); | |
1965 | goto out_free_ncp; | |
1966 | } | |
1967 | ||
1968 | mdesc = mdesc_grab(); | |
1969 | ||
1970 | if (!mdesc) { | |
1971 | dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", | |
1972 | full_name); | |
1973 | err = -ENODEV; | |
1974 | goto out_free_global; | |
1975 | } | |
1976 | ||
1977 | err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); | |
1978 | if (err) { | |
1979 | dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", | |
1980 | full_name); | |
1981 | mdesc_release(mdesc); | |
1982 | goto out_free_global; | |
1983 | } | |
1984 | ||
1985 | err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, | |
1986 | "mau", HV_NCS_QTYPE_MAU, mau_intr, | |
1987 | cpu_to_mau); | |
1988 | mdesc_release(mdesc); | |
1989 | ||
1990 | if (err) { | |
1991 | dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", | |
1992 | full_name); | |
1993 | goto out_free_global; | |
1994 | } | |
1995 | ||
1996 | dev_set_drvdata(&dev->dev, mp); | |
1997 | ||
1998 | return 0; | |
1999 | ||
2000 | out_free_global: | |
2001 | release_global_resources(); | |
2002 | ||
2003 | out_free_ncp: | |
2004 | free_ncp(mp); | |
2005 | ||
2006 | return err; | |
2007 | } | |
2008 | ||
2009 | static int __devexit n2_mau_remove(struct of_device *dev) | |
2010 | { | |
2011 | struct n2_mau *mp = dev_get_drvdata(&dev->dev); | |
2012 | ||
2013 | spu_list_destroy(&mp->mau_list); | |
2014 | ||
2015 | release_global_resources(); | |
2016 | ||
2017 | free_ncp(mp); | |
2018 | ||
2019 | return 0; | |
2020 | } | |
2021 | ||
2022 | static struct of_device_id n2_crypto_match[] = { | |
2023 | { | |
2024 | .name = "n2cp", | |
2025 | .compatible = "SUNW,n2-cwq", | |
2026 | }, | |
2027 | { | |
2028 | .name = "n2cp", | |
2029 | .compatible = "SUNW,vf-cwq", | |
2030 | }, | |
2031 | {}, | |
2032 | }; | |
2033 | ||
2034 | MODULE_DEVICE_TABLE(of, n2_crypto_match); | |
2035 | ||
2036 | static struct of_platform_driver n2_crypto_driver = { | |
ff6c7341 DM |
2037 | .driver = { |
2038 | .name = "n2cp", | |
2039 | .owner = THIS_MODULE, | |
2040 | .of_match_table = n2_crypto_match, | |
2041 | }, | |
0a625fd2 DM |
2042 | .probe = n2_crypto_probe, |
2043 | .remove = __devexit_p(n2_crypto_remove), | |
2044 | }; | |
2045 | ||
2046 | static struct of_device_id n2_mau_match[] = { | |
2047 | { | |
2048 | .name = "ncp", | |
2049 | .compatible = "SUNW,n2-mau", | |
2050 | }, | |
2051 | { | |
2052 | .name = "ncp", | |
2053 | .compatible = "SUNW,vf-mau", | |
2054 | }, | |
2055 | {}, | |
2056 | }; | |
2057 | ||
2058 | MODULE_DEVICE_TABLE(of, n2_mau_match); | |
2059 | ||
2060 | static struct of_platform_driver n2_mau_driver = { | |
ff6c7341 DM |
2061 | .driver = { |
2062 | .name = "ncp", | |
2063 | .owner = THIS_MODULE, | |
2064 | .of_match_table = n2_mau_match, | |
2065 | }, | |
0a625fd2 DM |
2066 | .probe = n2_mau_probe, |
2067 | .remove = __devexit_p(n2_mau_remove), | |
2068 | }; | |
2069 | ||
2070 | static int __init n2_init(void) | |
2071 | { | |
2072 | int err = of_register_driver(&n2_crypto_driver, &of_bus_type); | |
2073 | ||
2074 | if (!err) { | |
2075 | err = of_register_driver(&n2_mau_driver, &of_bus_type); | |
2076 | if (err) | |
2077 | of_unregister_driver(&n2_crypto_driver); | |
2078 | } | |
2079 | return err; | |
2080 | } | |
2081 | ||
2082 | static void __exit n2_exit(void) | |
2083 | { | |
2084 | of_unregister_driver(&n2_mau_driver); | |
2085 | of_unregister_driver(&n2_crypto_driver); | |
2086 | } | |
2087 | ||
2088 | module_init(n2_init); | |
2089 | module_exit(n2_exit); |