]>
Commit | Line | Data |
---|---|---|
ff13209b OO |
1 | /* |
2 | * memrar_handler 1.0: An Intel restricted access region handler device | |
3 | * | |
4 | * Copyright (C) 2010 Intel Corporation. All rights reserved. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of version 2 of the GNU General | |
8 | * Public License as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be | |
11 | * useful, but WITHOUT ANY WARRANTY; without even the implied | |
12 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR | |
13 | * PURPOSE. See the GNU General Public License for more details. | |
14 | * You should have received a copy of the GNU General Public | |
15 | * License along with this program; if not, write to the Free | |
16 | * Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 | * Boston, MA 02111-1307, USA. | |
18 | * The full GNU General Public License is included in this | |
19 | * distribution in the file called COPYING. | |
20 | * | |
21 | * ------------------------------------------------------------------- | |
22 | * | |
23 | * Moorestown restricted access regions (RAR) provide isolated | |
24 | * areas of main memory that are only acceessible by authorized | |
25 | * devices. | |
26 | * | |
27 | * The Intel Moorestown RAR handler module exposes a kernel space | |
28 | * RAR memory management mechanism. It is essentially a | |
29 | * RAR-specific allocator. | |
30 | * | |
31 | * Besides providing RAR buffer management, the RAR handler also | |
32 | * behaves in many ways like an OS virtual memory manager. For | |
33 | * example, the RAR "handles" created by the RAR handler are | |
34 | * analogous to user space virtual addresses. | |
35 | * | |
36 | * RAR memory itself is never accessed directly by the RAR | |
37 | * handler. | |
38 | */ | |
39 | ||
40 | #include <linux/miscdevice.h> | |
41 | #include <linux/fs.h> | |
42 | #include <linux/slab.h> | |
43 | #include <linux/kref.h> | |
44 | #include <linux/mutex.h> | |
45 | #include <linux/kernel.h> | |
46 | #include <linux/uaccess.h> | |
47 | #include <linux/mm.h> | |
48 | #include <linux/ioport.h> | |
49 | #include <linux/io.h> | |
50 | ||
51 | #include "../rar_register/rar_register.h" | |
52 | ||
53 | #include "memrar.h" | |
54 | #include "memrar_allocator.h" | |
55 | ||
56 | ||
57 | #define MEMRAR_VER "1.0" | |
58 | ||
59 | /* | |
60 | * Moorestown supports three restricted access regions. | |
61 | * | |
62 | * We only care about the first two, video and audio. The third, | |
63 | * reserved for Chaabi and the P-unit, will be handled by their | |
64 | * respective drivers. | |
65 | */ | |
66 | #define MRST_NUM_RAR 2 | |
67 | ||
68 | /* ---------------- -------------------- ------------------- */ | |
69 | ||
70 | /** | |
71 | * struct memrar_buffer_info - struct that keeps track of all RAR buffers | |
72 | * @list: Linked list of memrar_buffer_info objects. | |
73 | * @buffer: Core RAR buffer information. | |
74 | * @refcount: Reference count. | |
75 | * @owner: File handle corresponding to process that reserved the | |
76 | * block of memory in RAR. This will be zero for buffers | |
77 | * allocated by other drivers instead of by a user space | |
78 | * process. | |
79 | * | |
80 | * This structure encapsulates a link list of RAR buffers, as well as | |
81 | * other characteristics specific to a given list node, such as the | |
82 | * reference count on the corresponding RAR buffer. | |
83 | */ | |
84 | struct memrar_buffer_info { | |
85 | struct list_head list; | |
86 | struct RAR_buffer buffer; | |
87 | struct kref refcount; | |
88 | struct file *owner; | |
89 | }; | |
90 | ||
91 | /** | |
92 | * struct memrar_rar_info - characteristics of a given RAR | |
93 | * @base: Base bus address of the RAR. | |
94 | * @length: Length of the RAR. | |
95 | * @iobase: Virtual address of RAR mapped into kernel. | |
96 | * @allocator: Allocator associated with the RAR. Note the allocator | |
97 | * "capacity" may be smaller than the RAR length if the | |
98 | * length is not a multiple of the configured allocator | |
99 | * block size. | |
100 | * @buffers: Table that keeps track of all reserved RAR buffers. | |
101 | * @lock: Lock used to synchronize access to RAR-specific data | |
102 | * structures. | |
103 | * | |
104 | * Each RAR has an associated memrar_rar_info structure that describes | |
105 | * where in memory the RAR is located, how large it is, and a list of | |
106 | * reserved RAR buffers inside that RAR. Each RAR also has a mutex | |
107 | * associated with it to reduce lock contention when operations on | |
108 | * multiple RARs are performed in parallel. | |
109 | */ | |
110 | struct memrar_rar_info { | |
111 | dma_addr_t base; | |
112 | unsigned long length; | |
113 | void __iomem *iobase; | |
114 | struct memrar_allocator *allocator; | |
115 | struct memrar_buffer_info buffers; | |
116 | struct mutex lock; | |
117 | }; | |
118 | ||
119 | /* | |
120 | * Array of RAR characteristics. | |
121 | */ | |
122 | static struct memrar_rar_info memrars[MRST_NUM_RAR]; | |
123 | ||
124 | /* ---------------- -------------------- ------------------- */ | |
125 | ||
126 | /* Validate RAR type. */ | |
127 | static inline int memrar_is_valid_rar_type(u32 type) | |
128 | { | |
129 | return type == RAR_TYPE_VIDEO || type == RAR_TYPE_AUDIO; | |
130 | } | |
131 | ||
132 | /* Check if an address/handle falls with the given RAR memory range. */ | |
133 | static inline int memrar_handle_in_range(struct memrar_rar_info *rar, | |
134 | u32 vaddr) | |
135 | { | |
136 | unsigned long const iobase = (unsigned long) (rar->iobase); | |
137 | return (vaddr >= iobase && vaddr < iobase + rar->length); | |
138 | } | |
139 | ||
140 | /* Retrieve RAR information associated with the given handle. */ | |
141 | static struct memrar_rar_info *memrar_get_rar_info(u32 vaddr) | |
142 | { | |
143 | int i; | |
144 | for (i = 0; i < MRST_NUM_RAR; ++i) { | |
145 | struct memrar_rar_info * const rar = &memrars[i]; | |
146 | if (memrar_handle_in_range(rar, vaddr)) | |
147 | return rar; | |
148 | } | |
149 | ||
150 | return NULL; | |
151 | } | |
152 | ||
153 | /* | |
154 | * Retrieve bus address from given handle. | |
155 | * | |
156 | * Returns address corresponding to given handle. Zero if handle is | |
157 | * invalid. | |
158 | */ | |
159 | static dma_addr_t memrar_get_bus_address( | |
160 | struct memrar_rar_info *rar, | |
161 | u32 vaddr) | |
162 | { | |
163 | unsigned long const iobase = (unsigned long) (rar->iobase); | |
164 | ||
165 | if (!memrar_handle_in_range(rar, vaddr)) | |
166 | return 0; | |
167 | ||
168 | /* | |
169 | * An assumption is made that the virtual address offset is | |
170 | * the same as the bus address offset, at least based on the | |
171 | * way this driver is implemented. For example, vaddr + 2 == | |
172 | * baddr + 2. | |
173 | * | |
174 | * @todo Is that a valid assumption? | |
175 | */ | |
176 | return rar->base + (vaddr - iobase); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Retrieve physical address from given handle. | |
181 | * | |
182 | * Returns address corresponding to given handle. Zero if handle is | |
183 | * invalid. | |
184 | */ | |
185 | static dma_addr_t memrar_get_physical_address( | |
186 | struct memrar_rar_info *rar, | |
187 | u32 vaddr) | |
188 | { | |
189 | /* | |
190 | * @todo This assumes that the bus address and physical | |
191 | * address are the same. That is true for Moorestown | |
192 | * but not necessarily on other platforms. This | |
193 | * deficiency should be addressed at some point. | |
194 | */ | |
195 | return memrar_get_bus_address(rar, vaddr); | |
196 | } | |
197 | ||
198 | /* | |
199 | * Core block release code. | |
200 | * | |
201 | * Note: This code removes the node from a list. Make sure any list | |
202 | * iteration is performed using list_for_each_safe(). | |
203 | */ | |
204 | static void memrar_release_block_i(struct kref *ref) | |
205 | { | |
206 | /* | |
207 | * Last reference is being released. Remove from the table, | |
208 | * and reclaim resources. | |
209 | */ | |
210 | ||
211 | struct memrar_buffer_info * const node = | |
212 | container_of(ref, struct memrar_buffer_info, refcount); | |
213 | ||
214 | struct RAR_block_info * const user_info = | |
215 | &node->buffer.info; | |
216 | ||
217 | struct memrar_allocator * const allocator = | |
218 | memrars[user_info->type].allocator; | |
219 | ||
220 | list_del(&node->list); | |
221 | ||
222 | memrar_allocator_free(allocator, user_info->handle); | |
223 | ||
224 | kfree(node); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Initialize RAR parameters, such as bus addresses, etc. | |
229 | */ | |
230 | static int memrar_init_rar_resources(char const *devname) | |
231 | { | |
232 | /* ---- Sanity Checks ---- | |
233 | * 1. RAR bus addresses in both Lincroft and Langwell RAR | |
234 | * registers should be the same. | |
235 | * a. There's no way we can do this through IA. | |
236 | * | |
237 | * 2. Secure device ID in Langwell RAR registers should be set | |
238 | * appropriately, e.g. only LPE DMA for the audio RAR, and | |
239 | * security for the other Langwell based RAR registers. | |
240 | * a. There's no way we can do this through IA. | |
241 | * | |
242 | * 3. Audio and video RAR registers and RAR access should be | |
243 | * locked down. If not, enable RAR access control. Except | |
244 | * for debugging purposes, there is no reason for them to | |
245 | * be unlocked. | |
246 | * a. We can only do this for the Lincroft (IA) side. | |
247 | * | |
248 | * @todo Should the RAR handler driver even be aware of audio | |
249 | * and video RAR settings? | |
250 | */ | |
251 | ||
252 | /* | |
253 | * RAR buffer block size. | |
254 | * | |
255 | * We choose it to be the size of a page to simplify the | |
256 | * /dev/memrar mmap() implementation and usage. Otherwise | |
257 | * paging is not involved once an RAR is locked down. | |
258 | */ | |
259 | static size_t const RAR_BLOCK_SIZE = PAGE_SIZE; | |
260 | ||
261 | int z; | |
262 | int found_rar = 0; | |
263 | ||
264 | BUG_ON(MRST_NUM_RAR != ARRAY_SIZE(memrars)); | |
265 | ||
266 | for (z = 0; z != MRST_NUM_RAR; ++z) { | |
267 | dma_addr_t low, high; | |
268 | struct memrar_rar_info * const rar = &memrars[z]; | |
269 | ||
270 | BUG_ON(!memrar_is_valid_rar_type(z)); | |
271 | ||
272 | mutex_init(&rar->lock); | |
273 | ||
274 | /* | |
275 | * Initialize the process table before we reach any | |
276 | * code that exit on failure since the finalization | |
277 | * code requires an initialized list. | |
278 | */ | |
279 | INIT_LIST_HEAD(&rar->buffers.list); | |
280 | ||
281 | if (rar_get_address(z, &low, &high) != 0) { | |
282 | /* No RAR is available. */ | |
283 | break; | |
284 | } else if (low == 0 || high == 0) { | |
285 | /* | |
286 | * We don't immediately break out of the loop | |
287 | * since the next type of RAR may be enabled. | |
288 | */ | |
289 | rar->base = 0; | |
290 | rar->length = 0; | |
291 | rar->iobase = NULL; | |
292 | rar->allocator = NULL; | |
293 | continue; | |
294 | } | |
295 | ||
296 | /* | |
297 | * @todo Verify that LNC and LNW RAR register contents | |
298 | * addresses, security, etc are compatible and | |
299 | * consistent). | |
300 | */ | |
301 | ||
302 | rar->length = high - low + 1; | |
303 | ||
304 | /* Claim RAR memory as our own. */ | |
305 | if (request_mem_region(low, rar->length, devname) == NULL) { | |
306 | rar->length = 0; | |
307 | ||
308 | pr_err("%s: Unable to claim RAR[%d] memory.\n", | |
309 | devname, | |
310 | z); | |
311 | pr_err("%s: RAR[%d] disabled.\n", devname, z); | |
312 | ||
313 | /* | |
314 | * Rather than break out of the loop by | |
315 | * returning -EBUSY, for example, we may be | |
316 | * able to claim memory of the next RAR region | |
317 | * as our own. | |
318 | */ | |
319 | continue; | |
320 | } | |
321 | ||
322 | rar->base = low; | |
323 | ||
324 | /* | |
325 | * Now map it into the kernel address space. | |
326 | * | |
327 | * Note that the RAR memory may only be accessed by IA | |
328 | * when debugging. Otherwise attempts to access the | |
329 | * RAR memory when it is locked down will result in | |
330 | * behavior similar to writing to /dev/null and | |
331 | * reading from /dev/zero. This behavior is enforced | |
332 | * by the hardware. Even if we don't access the | |
333 | * memory, mapping it into the kernel provides us with | |
334 | * a convenient RAR handle to bus address mapping. | |
335 | */ | |
336 | rar->iobase = ioremap_nocache(rar->base, rar->length); | |
337 | if (rar->iobase == NULL) { | |
338 | pr_err("%s: Unable to map RAR memory.\n", | |
339 | devname); | |
340 | return -ENOMEM; | |
341 | } | |
342 | ||
343 | /* Initialize corresponding memory allocator. */ | |
344 | rar->allocator = memrar_create_allocator( | |
345 | (unsigned long) rar->iobase, | |
346 | rar->length, | |
347 | RAR_BLOCK_SIZE); | |
348 | if (rar->allocator == NULL) | |
349 | return -1; | |
350 | ||
351 | /* | |
352 | * ------------------------------------------------- | |
353 | * Make sure all RARs handled by us are locked down. | |
354 | * ------------------------------------------------- | |
355 | */ | |
356 | ||
357 | /* Enable RAR protection on the Lincroft side. */ | |
358 | if (0) { | |
359 | /* | |
360 | * This is mostly a sanity check since the | |
361 | * vendor should have locked down RAR in the | |
362 | * SMIP header RAR configuration. | |
363 | */ | |
364 | rar_lock(z); | |
365 | } else { | |
366 | pr_warning("%s: LNC RAR[%d] no lock sanity check.\n", | |
367 | devname, | |
368 | z); | |
369 | } | |
370 | ||
371 | /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ */ | |
372 | /* |||||||||||||||||||||||||||||||||||||||||||||||||| */ | |
373 | ||
374 | /* | |
375 | * It would be nice if we could verify that RAR | |
376 | * protection on the Langwell side is enabled, but | |
377 | * there is no way to do that from here. The | |
378 | * necessary Langwell RAR registers are not accessible | |
379 | * from the Lincroft (IA) side. | |
380 | * | |
381 | * Hopefully the ODM did the right thing and enabled | |
382 | * Langwell side RAR protection in the integrated | |
383 | * firmware SMIP header. | |
384 | */ | |
385 | ||
386 | pr_info("%s: BRAR[%d] bus address range = " | |
387 | "[0x%lx, 0x%lx]\n", | |
388 | devname, | |
389 | z, | |
390 | (unsigned long) low, | |
391 | (unsigned long) high); | |
392 | ||
6cbfa625 | 393 | pr_info("%s: BRAR[%d] size = %zu KiB\n", |
ff13209b OO |
394 | devname, |
395 | z, | |
396 | rar->allocator->capacity / 1024); | |
397 | ||
398 | found_rar = 1; | |
399 | } | |
400 | ||
401 | if (!found_rar) { | |
402 | /* | |
403 | * No RAR support. Don't bother continuing. | |
404 | * | |
405 | * Note that this is not a failure. | |
406 | */ | |
407 | pr_info("%s: No Moorestown RAR support available.\n", | |
408 | devname); | |
409 | return -ENODEV; | |
410 | } | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
415 | /* | |
416 | * Finalize RAR resources. | |
417 | */ | |
418 | static void memrar_fini_rar_resources(void) | |
419 | { | |
420 | int z; | |
421 | struct memrar_buffer_info *pos; | |
422 | struct memrar_buffer_info *tmp; | |
423 | ||
424 | /* | |
425 | * @todo Do we need to hold a lock at this point in time? | |
426 | * (module initialization failure or exit?) | |
427 | */ | |
428 | ||
429 | for (z = MRST_NUM_RAR; z-- != 0; ) { | |
430 | struct memrar_rar_info * const rar = &memrars[z]; | |
431 | ||
432 | /* Clean up remaining resources. */ | |
433 | ||
434 | list_for_each_entry_safe(pos, | |
435 | tmp, | |
436 | &rar->buffers.list, | |
437 | list) { | |
438 | kref_put(&pos->refcount, memrar_release_block_i); | |
439 | } | |
440 | ||
441 | memrar_destroy_allocator(rar->allocator); | |
442 | rar->allocator = NULL; | |
443 | ||
444 | iounmap(rar->iobase); | |
445 | rar->iobase = NULL; | |
446 | ||
447 | release_mem_region(rar->base, rar->length); | |
448 | rar->base = 0; | |
449 | ||
450 | rar->length = 0; | |
451 | } | |
452 | } | |
453 | ||
454 | static long memrar_reserve_block(struct RAR_buffer *request, | |
455 | struct file *filp) | |
456 | { | |
457 | struct RAR_block_info * const rinfo = &request->info; | |
458 | struct RAR_buffer *buffer; | |
459 | struct memrar_buffer_info *buffer_info; | |
460 | u32 handle; | |
461 | struct memrar_rar_info *rar = NULL; | |
462 | ||
463 | /* Prevent array overflow. */ | |
464 | if (!memrar_is_valid_rar_type(rinfo->type)) | |
465 | return -EINVAL; | |
466 | ||
467 | rar = &memrars[rinfo->type]; | |
468 | ||
469 | /* Reserve memory in RAR. */ | |
470 | handle = memrar_allocator_alloc(rar->allocator, rinfo->size); | |
471 | if (handle == 0) | |
472 | return -ENOMEM; | |
473 | ||
474 | buffer_info = kmalloc(sizeof(*buffer_info), GFP_KERNEL); | |
475 | ||
476 | if (buffer_info == NULL) { | |
477 | memrar_allocator_free(rar->allocator, handle); | |
478 | return -ENOMEM; | |
479 | } | |
480 | ||
481 | buffer = &buffer_info->buffer; | |
482 | buffer->info.type = rinfo->type; | |
483 | buffer->info.size = rinfo->size; | |
484 | ||
485 | /* Memory handle corresponding to the bus address. */ | |
486 | buffer->info.handle = handle; | |
487 | buffer->bus_address = memrar_get_bus_address(rar, handle); | |
488 | ||
489 | /* | |
490 | * Keep track of owner so that we can later cleanup if | |
491 | * necessary. | |
492 | */ | |
493 | buffer_info->owner = filp; | |
494 | ||
495 | kref_init(&buffer_info->refcount); | |
496 | ||
497 | mutex_lock(&rar->lock); | |
498 | list_add(&buffer_info->list, &rar->buffers.list); | |
499 | mutex_unlock(&rar->lock); | |
500 | ||
501 | rinfo->handle = buffer->info.handle; | |
502 | request->bus_address = buffer->bus_address; | |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
507 | static long memrar_release_block(u32 addr) | |
508 | { | |
509 | struct memrar_buffer_info *pos; | |
510 | struct memrar_buffer_info *tmp; | |
511 | struct memrar_rar_info * const rar = memrar_get_rar_info(addr); | |
512 | long result = -EINVAL; | |
513 | ||
514 | if (rar == NULL) | |
515 | return -EFAULT; | |
516 | ||
517 | mutex_lock(&rar->lock); | |
518 | ||
519 | /* | |
520 | * Iterate through the buffer list to find the corresponding | |
521 | * buffer to be released. | |
522 | */ | |
523 | list_for_each_entry_safe(pos, | |
524 | tmp, | |
525 | &rar->buffers.list, | |
526 | list) { | |
527 | struct RAR_block_info * const info = | |
528 | &pos->buffer.info; | |
529 | ||
530 | /* | |
531 | * Take into account handle offsets that may have been | |
532 | * added to the base handle, such as in the following | |
533 | * scenario: | |
534 | * | |
535 | * u32 handle = base + offset; | |
536 | * rar_handle_to_bus(handle); | |
537 | * rar_release(handle); | |
538 | */ | |
539 | if (addr >= info->handle | |
540 | && addr < (info->handle + info->size) | |
541 | && memrar_is_valid_rar_type(info->type)) { | |
542 | kref_put(&pos->refcount, memrar_release_block_i); | |
543 | result = 0; | |
544 | break; | |
545 | } | |
546 | } | |
547 | ||
548 | mutex_unlock(&rar->lock); | |
549 | ||
550 | return result; | |
551 | } | |
552 | ||
553 | static long memrar_get_stat(struct RAR_stat *r) | |
554 | { | |
555 | long result = -EINVAL; | |
556 | ||
557 | if (likely(r != NULL) && memrar_is_valid_rar_type(r->type)) { | |
558 | struct memrar_allocator * const allocator = | |
559 | memrars[r->type].allocator; | |
560 | ||
561 | BUG_ON(allocator == NULL); | |
562 | ||
563 | /* | |
564 | * Allocator capacity doesn't change over time. No | |
565 | * need to synchronize. | |
566 | */ | |
567 | r->capacity = allocator->capacity; | |
568 | ||
569 | mutex_lock(&allocator->lock); | |
570 | ||
571 | r->largest_block_size = allocator->largest_free_area; | |
572 | ||
573 | mutex_unlock(&allocator->lock); | |
574 | ||
575 | result = 0; | |
576 | } | |
577 | ||
578 | return result; | |
579 | } | |
580 | ||
581 | static long memrar_ioctl(struct file *filp, | |
582 | unsigned int cmd, | |
583 | unsigned long arg) | |
584 | { | |
585 | void __user *argp = (void __user *)arg; | |
586 | long result = 0; | |
587 | ||
588 | struct RAR_buffer buffer; | |
589 | struct RAR_block_info * const request = &buffer.info; | |
590 | struct RAR_stat rar_info; | |
591 | u32 rar_handle; | |
592 | ||
593 | switch (cmd) { | |
594 | case RAR_HANDLER_RESERVE: | |
595 | if (copy_from_user(request, | |
596 | argp, | |
597 | sizeof(*request))) | |
598 | return -EFAULT; | |
599 | ||
600 | result = memrar_reserve_block(&buffer, filp); | |
601 | if (result != 0) | |
602 | return result; | |
603 | ||
604 | return copy_to_user(argp, request, sizeof(*request)); | |
605 | ||
606 | case RAR_HANDLER_RELEASE: | |
607 | if (copy_from_user(&rar_handle, | |
608 | argp, | |
609 | sizeof(rar_handle))) | |
610 | return -EFAULT; | |
611 | ||
612 | return memrar_release_block(rar_handle); | |
613 | ||
614 | case RAR_HANDLER_STAT: | |
615 | if (copy_from_user(&rar_info, | |
616 | argp, | |
617 | sizeof(rar_info))) | |
618 | return -EFAULT; | |
619 | ||
620 | /* | |
621 | * Populate the RAR_stat structure based on the RAR | |
622 | * type given by the user | |
623 | */ | |
624 | if (memrar_get_stat(&rar_info) != 0) | |
625 | return -EINVAL; | |
626 | ||
627 | /* | |
628 | * @todo Do we need to verify destination pointer | |
629 | * "argp" is non-zero? Is that already done by | |
630 | * copy_to_user()? | |
631 | */ | |
632 | return copy_to_user(argp, | |
633 | &rar_info, | |
634 | sizeof(rar_info)) ? -EFAULT : 0; | |
635 | ||
636 | default: | |
637 | return -ENOTTY; | |
638 | } | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
643 | static int memrar_mmap(struct file *filp, struct vm_area_struct *vma) | |
644 | { | |
645 | /* | |
646 | * This mmap() implementation is predominantly useful for | |
647 | * debugging since the CPU will be prevented from accessing | |
648 | * RAR memory by the hardware when RAR is properly locked | |
649 | * down. | |
650 | * | |
651 | * In order for this implementation to be useful RAR memory | |
652 | * must be not be locked down. However, we only want to do | |
653 | * that when debugging. DO NOT leave RAR memory unlocked in a | |
654 | * deployed device that utilizes RAR. | |
655 | */ | |
656 | ||
657 | size_t const size = vma->vm_end - vma->vm_start; | |
658 | ||
659 | /* Users pass the RAR handle as the mmap() offset parameter. */ | |
660 | unsigned long const handle = vma->vm_pgoff << PAGE_SHIFT; | |
661 | ||
662 | struct memrar_rar_info * const rar = memrar_get_rar_info(handle); | |
663 | ||
664 | unsigned long pfn; | |
665 | ||
666 | /* Invalid RAR handle or size passed to mmap(). */ | |
667 | if (rar == NULL | |
668 | || handle == 0 | |
669 | || size > (handle - (unsigned long) rar->iobase)) | |
670 | return -EINVAL; | |
671 | ||
672 | /* | |
673 | * Retrieve physical address corresponding to the RAR handle, | |
674 | * and convert it to a page frame. | |
675 | */ | |
676 | pfn = memrar_get_physical_address(rar, handle) >> PAGE_SHIFT; | |
677 | ||
678 | ||
679 | pr_debug("memrar: mapping RAR range [0x%lx, 0x%lx) into user space.\n", | |
680 | handle, | |
681 | handle + size); | |
682 | ||
683 | /* | |
684 | * Map RAR memory into user space. This is really only useful | |
685 | * for debugging purposes since the memory won't be | |
686 | * accessible, i.e. reads return zero and writes are ignored, | |
687 | * when RAR access control is enabled. | |
688 | */ | |
689 | if (remap_pfn_range(vma, | |
690 | vma->vm_start, | |
691 | pfn, | |
692 | size, | |
693 | vma->vm_page_prot)) | |
694 | return -EAGAIN; | |
695 | ||
696 | /* vma->vm_ops = &memrar_mem_ops; */ | |
697 | ||
698 | return 0; | |
699 | } | |
700 | ||
701 | static int memrar_open(struct inode *inode, struct file *filp) | |
702 | { | |
703 | /* Nothing to do yet. */ | |
704 | ||
705 | return 0; | |
706 | } | |
707 | ||
708 | static int memrar_release(struct inode *inode, struct file *filp) | |
709 | { | |
710 | /* Free all regions associated with the given file handle. */ | |
711 | ||
712 | struct memrar_buffer_info *pos; | |
713 | struct memrar_buffer_info *tmp; | |
714 | int z; | |
715 | ||
716 | for (z = 0; z != MRST_NUM_RAR; ++z) { | |
717 | struct memrar_rar_info * const rar = &memrars[z]; | |
718 | ||
719 | mutex_lock(&rar->lock); | |
720 | ||
721 | list_for_each_entry_safe(pos, | |
722 | tmp, | |
723 | &rar->buffers.list, | |
724 | list) { | |
725 | if (filp == pos->owner) | |
726 | kref_put(&pos->refcount, | |
727 | memrar_release_block_i); | |
728 | } | |
729 | ||
730 | mutex_unlock(&rar->lock); | |
731 | } | |
732 | ||
733 | return 0; | |
734 | } | |
735 | ||
736 | /* | |
737 | * This function is part of the kernel space memrar driver API. | |
738 | */ | |
739 | size_t rar_reserve(struct RAR_buffer *buffers, size_t count) | |
740 | { | |
741 | struct RAR_buffer * const end = | |
742 | (buffers == NULL ? buffers : buffers + count); | |
743 | struct RAR_buffer *i; | |
744 | ||
745 | size_t reserve_count = 0; | |
746 | ||
747 | for (i = buffers; i != end; ++i) { | |
748 | if (memrar_reserve_block(i, NULL) == 0) | |
749 | ++reserve_count; | |
750 | else | |
751 | i->bus_address = 0; | |
752 | } | |
753 | ||
754 | return reserve_count; | |
755 | } | |
756 | EXPORT_SYMBOL(rar_reserve); | |
757 | ||
758 | /* | |
759 | * This function is part of the kernel space memrar driver API. | |
760 | */ | |
761 | size_t rar_release(struct RAR_buffer *buffers, size_t count) | |
762 | { | |
763 | struct RAR_buffer * const end = | |
764 | (buffers == NULL ? buffers : buffers + count); | |
765 | struct RAR_buffer *i; | |
766 | ||
767 | size_t release_count = 0; | |
768 | ||
769 | for (i = buffers; i != end; ++i) { | |
770 | u32 * const handle = &i->info.handle; | |
771 | if (memrar_release_block(*handle) == 0) { | |
772 | /* | |
773 | * @todo We assume we should do this each time | |
774 | * the ref count is decremented. Should | |
775 | * we instead only do this when the ref | |
776 | * count has dropped to zero, and the | |
777 | * buffer has been completely | |
778 | * released/unmapped? | |
779 | */ | |
780 | *handle = 0; | |
781 | ++release_count; | |
782 | } | |
783 | } | |
784 | ||
785 | return release_count; | |
786 | } | |
787 | EXPORT_SYMBOL(rar_release); | |
788 | ||
789 | /* | |
790 | * This function is part of the kernel space driver API. | |
791 | */ | |
792 | size_t rar_handle_to_bus(struct RAR_buffer *buffers, size_t count) | |
793 | { | |
794 | struct RAR_buffer * const end = | |
795 | (buffers == NULL ? buffers : buffers + count); | |
796 | struct RAR_buffer *i; | |
797 | struct memrar_buffer_info *pos; | |
798 | ||
799 | size_t conversion_count = 0; | |
800 | ||
801 | /* | |
802 | * Find all bus addresses corresponding to the given handles. | |
803 | * | |
804 | * @todo Not liking this nested loop. Optimize. | |
805 | */ | |
806 | for (i = buffers; i != end; ++i) { | |
807 | struct memrar_rar_info * const rar = | |
808 | memrar_get_rar_info(i->info.handle); | |
809 | ||
810 | /* | |
811 | * Check if we have a bogus handle, and then continue | |
812 | * with remaining buffers. | |
813 | */ | |
814 | if (rar == NULL) { | |
815 | i->bus_address = 0; | |
816 | continue; | |
817 | } | |
818 | ||
819 | mutex_lock(&rar->lock); | |
820 | ||
821 | list_for_each_entry(pos, &rar->buffers.list, list) { | |
822 | struct RAR_block_info * const user_info = | |
823 | &pos->buffer.info; | |
824 | ||
825 | /* | |
826 | * Take into account handle offsets that may | |
827 | * have been added to the base handle, such as | |
828 | * in the following scenario: | |
829 | * | |
830 | * u32 handle = base + offset; | |
831 | * rar_handle_to_bus(handle); | |
832 | */ | |
833 | ||
834 | if (i->info.handle >= user_info->handle | |
835 | && i->info.handle < (user_info->handle | |
836 | + user_info->size)) { | |
837 | u32 const offset = | |
838 | i->info.handle - user_info->handle; | |
839 | ||
840 | i->info.type = user_info->type; | |
841 | i->info.size = user_info->size - offset; | |
842 | i->bus_address = | |
843 | pos->buffer.bus_address | |
844 | + offset; | |
845 | ||
846 | /* Increment the reference count. */ | |
847 | kref_get(&pos->refcount); | |
848 | ||
849 | ++conversion_count; | |
850 | break; | |
851 | } else { | |
852 | i->bus_address = 0; | |
853 | } | |
854 | } | |
855 | ||
856 | mutex_unlock(&rar->lock); | |
857 | } | |
858 | ||
859 | return conversion_count; | |
860 | } | |
861 | EXPORT_SYMBOL(rar_handle_to_bus); | |
862 | ||
863 | static const struct file_operations memrar_fops = { | |
864 | .owner = THIS_MODULE, | |
865 | .unlocked_ioctl = memrar_ioctl, | |
866 | .mmap = memrar_mmap, | |
867 | .open = memrar_open, | |
868 | .release = memrar_release, | |
869 | }; | |
870 | ||
871 | static struct miscdevice memrar_miscdev = { | |
872 | .minor = MISC_DYNAMIC_MINOR, /* dynamic allocation */ | |
873 | .name = "memrar", /* /dev/memrar */ | |
874 | .fops = &memrar_fops | |
875 | }; | |
876 | ||
877 | static char const banner[] __initdata = | |
878 | KERN_INFO | |
879 | "Intel RAR Handler: " MEMRAR_VER " initialized.\n"; | |
880 | ||
881 | static int memrar_registration_callback(void *ctx) | |
882 | { | |
883 | /* | |
884 | * We initialize the RAR parameters early on so that we can | |
885 | * discontinue memrar device initialization and registration | |
886 | * if suitably configured RARs are not available. | |
887 | */ | |
888 | int result = memrar_init_rar_resources(memrar_miscdev.name); | |
889 | ||
890 | if (result != 0) | |
891 | return result; | |
892 | ||
893 | result = misc_register(&memrar_miscdev); | |
894 | ||
895 | if (result != 0) { | |
896 | pr_err("%s: misc_register() failed.\n", | |
897 | memrar_miscdev.name); | |
898 | ||
899 | /* Clean up resources previously reserved. */ | |
900 | memrar_fini_rar_resources(); | |
901 | } | |
902 | ||
903 | return result; | |
904 | } | |
905 | ||
906 | static int __init memrar_init(void) | |
907 | { | |
908 | printk(banner); | |
909 | ||
910 | return register_rar(&memrar_registration_callback, 0); | |
911 | } | |
912 | ||
913 | static void __exit memrar_exit(void) | |
914 | { | |
915 | memrar_fini_rar_resources(); | |
916 | ||
917 | misc_deregister(&memrar_miscdev); | |
918 | } | |
919 | ||
920 | ||
921 | module_init(memrar_init); | |
922 | module_exit(memrar_exit); | |
923 | ||
924 | ||
925 | MODULE_AUTHOR("Ossama Othman <ossama.othman@intel.com>"); | |
926 | MODULE_DESCRIPTION("Intel Restricted Access Region Handler"); | |
927 | MODULE_LICENSE("GPL"); | |
928 | MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR); | |
929 | MODULE_VERSION(MEMRAR_VER); | |
930 | ||
931 | ||
932 | ||
933 | /* | |
934 | Local Variables: | |
935 | c-file-style: "linux" | |
936 | End: | |
937 | */ |