]>
Commit | Line | Data |
---|---|---|
f94378f9 FGL |
1 | /* |
2 | * dsp-mmu.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * DSP iommu. | |
7 | * | |
8 | * Copyright (C) 2010 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
18 | ||
19 | #include <dspbridge/host_os.h> | |
20 | #include <plat/dmtimer.h> | |
21 | #include <dspbridge/dbdefs.h> | |
22 | #include <dspbridge/dev.h> | |
23 | #include <dspbridge/io_sm.h> | |
24 | #include <dspbridge/dspdeh.h> | |
25 | #include "_tiomap.h" | |
26 | ||
27 | #include <dspbridge/dsp-mmu.h> | |
28 | ||
29 | #define MMU_CNTL_TWL_EN (1 << 2) | |
30 | ||
31 | static struct tasklet_struct mmu_tasklet; | |
32 | ||
33 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | |
34 | static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) | |
35 | { | |
36 | void *dummy_addr; | |
37 | u32 fa, tmp; | |
38 | struct iotlb_entry e; | |
39 | struct iommu *mmu = dev_context->dsp_mmu; | |
40 | dummy_addr = (void *)__get_free_page(GFP_ATOMIC); | |
41 | ||
42 | /* | |
43 | * Before acking the MMU fault, let's make sure MMU can only | |
44 | * access entry #0. Then add a new entry so that the DSP OS | |
45 | * can continue in order to dump the stack. | |
46 | */ | |
47 | tmp = iommu_read_reg(mmu, MMU_CNTL); | |
48 | tmp &= ~MMU_CNTL_TWL_EN; | |
49 | iommu_write_reg(mmu, tmp, MMU_CNTL); | |
50 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | |
51 | e.da = fa & PAGE_MASK; | |
52 | e.pa = virt_to_phys(dummy_addr); | |
53 | e.valid = 1; | |
54 | e.prsvd = 1; | |
55 | e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; | |
56 | e.endian = MMU_RAM_ENDIAN_LITTLE; | |
57 | e.elsz = MMU_RAM_ELSZ_32; | |
58 | e.mixed = 0; | |
59 | ||
60 | load_iotlb_entry(mmu, &e); | |
61 | ||
62 | dsp_clk_enable(DSP_CLK_GPT8); | |
63 | ||
64 | dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); | |
65 | ||
66 | /* Clear MMU interrupt */ | |
67 | tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); | |
68 | iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); | |
69 | ||
70 | dump_dsp_stack(dev_context); | |
71 | dsp_clk_disable(DSP_CLK_GPT8); | |
72 | ||
73 | iopgtable_clear_entry(mmu, fa); | |
74 | free_page((unsigned long)dummy_addr); | |
75 | } | |
76 | #endif | |
77 | ||
78 | ||
79 | static void fault_tasklet(unsigned long data) | |
80 | { | |
81 | struct iommu *mmu = (struct iommu *)data; | |
82 | struct bridge_dev_context *dev_ctx; | |
83 | struct deh_mgr *dm; | |
84 | u32 fa; | |
85 | dev_get_deh_mgr(dev_get_first(), &dm); | |
86 | dev_get_bridge_context(dev_get_first(), &dev_ctx); | |
87 | ||
88 | if (!dm || !dev_ctx) | |
89 | return; | |
90 | ||
91 | fa = iommu_read_reg(mmu, MMU_FAULT_AD); | |
92 | ||
93 | #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE | |
94 | print_dsp_trace_buffer(dev_ctx); | |
95 | dump_dl_modules(dev_ctx); | |
96 | mmu_fault_print_stack(dev_ctx); | |
97 | #endif | |
98 | ||
99 | bridge_deh_notify(dm, DSP_MMUFAULT, fa); | |
100 | } | |
101 | ||
102 | /* | |
103 | * ======== mmu_fault_isr ======== | |
104 | * ISR to be triggered by a DSP MMU fault interrupt. | |
105 | */ | |
106 | static int mmu_fault_callback(struct iommu *mmu) | |
107 | { | |
108 | if (!mmu) | |
109 | return -EPERM; | |
110 | ||
111 | iommu_write_reg(mmu, 0, MMU_IRQENABLE); | |
112 | tasklet_schedule(&mmu_tasklet); | |
113 | return 0; | |
114 | } | |
115 | ||
116 | /** | |
117 | * dsp_mmu_init() - initialize dsp_mmu module and returns a handle | |
118 | * | |
119 | * This function initialize dsp mmu module and returns a struct iommu | |
120 | * handle to use it for dsp maps. | |
121 | * | |
122 | */ | |
123 | struct iommu *dsp_mmu_init() | |
124 | { | |
125 | struct iommu *mmu; | |
126 | ||
127 | mmu = iommu_get("iva2"); | |
128 | ||
129 | if (!IS_ERR(mmu)) { | |
130 | tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu); | |
131 | mmu->isr = mmu_fault_callback; | |
132 | } | |
133 | ||
134 | return mmu; | |
135 | } | |
136 | ||
137 | /** | |
138 | * dsp_mmu_exit() - destroy dsp mmu module | |
139 | * @mmu: Pointer to iommu handle. | |
140 | * | |
141 | * This function destroys dsp mmu module. | |
142 | * | |
143 | */ | |
144 | void dsp_mmu_exit(struct iommu *mmu) | |
145 | { | |
146 | if (mmu) | |
147 | iommu_put(mmu); | |
148 | tasklet_kill(&mmu_tasklet); | |
149 | } | |
150 | ||
151 | /** | |
152 | * user_va2_pa() - get physical address from userspace address. | |
153 | * @mm: mm_struct Pointer of the process. | |
154 | * @address: Virtual user space address. | |
155 | * | |
156 | */ | |
157 | static u32 user_va2_pa(struct mm_struct *mm, u32 address) | |
158 | { | |
159 | pgd_t *pgd; | |
160 | pmd_t *pmd; | |
161 | pte_t *ptep, pte; | |
162 | ||
163 | pgd = pgd_offset(mm, address); | |
164 | if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { | |
165 | pmd = pmd_offset(pgd, address); | |
166 | if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { | |
167 | ptep = pte_offset_map(pmd, address); | |
168 | if (ptep) { | |
169 | pte = *ptep; | |
170 | if (pte_present(pte)) | |
171 | return pte & PAGE_MASK; | |
172 | } | |
173 | } | |
174 | } | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | /** | |
180 | * get_io_pages() - pin and get pages of io user's buffer. | |
181 | * @mm: mm_struct Pointer of the process. | |
182 | * @uva: Virtual user space address. | |
183 | * @pages Pages to be pined. | |
184 | * @usr_pgs struct page array pointer where the user pages will be stored | |
185 | * | |
186 | */ | |
187 | static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, | |
188 | struct page **usr_pgs) | |
189 | { | |
190 | u32 pa; | |
191 | int i; | |
192 | struct page *pg; | |
193 | ||
194 | for (i = 0; i < pages; i++) { | |
195 | pa = user_va2_pa(mm, uva); | |
196 | ||
197 | if (!pfn_valid(__phys_to_pfn(pa))) | |
198 | break; | |
199 | ||
200 | pg = phys_to_page(pa); | |
201 | usr_pgs[i] = pg; | |
202 | get_page(pg); | |
203 | } | |
204 | return i; | |
205 | } | |
206 | ||
207 | /** | |
208 | * user_to_dsp_map() - maps user to dsp virtual address | |
209 | * @mmu: Pointer to iommu handle. | |
210 | * @uva: Virtual user space address. | |
211 | * @da DSP address | |
212 | * @size Buffer size to map. | |
213 | * @usr_pgs struct page array pointer where the user pages will be stored | |
214 | * | |
215 | * This function maps a user space buffer into DSP virtual address. | |
216 | * | |
217 | */ | |
218 | u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, | |
219 | struct page **usr_pgs) | |
220 | { | |
221 | int res, w; | |
2dff5279 VK |
222 | unsigned pages; |
223 | int i; | |
f94378f9 FGL |
224 | struct vm_area_struct *vma; |
225 | struct mm_struct *mm = current->mm; | |
226 | struct sg_table *sgt; | |
227 | struct scatterlist *sg; | |
228 | ||
229 | if (!size || !usr_pgs) | |
230 | return -EINVAL; | |
231 | ||
232 | pages = size / PG_SIZE4K; | |
233 | ||
234 | down_read(&mm->mmap_sem); | |
235 | vma = find_vma(mm, uva); | |
236 | while (vma && (uva + size > vma->vm_end)) | |
237 | vma = find_vma(mm, vma->vm_end + 1); | |
238 | ||
239 | if (!vma) { | |
240 | pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", | |
241 | __func__, uva, size); | |
242 | up_read(&mm->mmap_sem); | |
243 | return -EINVAL; | |
244 | } | |
245 | if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) | |
246 | w = 1; | |
247 | ||
248 | if (vma->vm_flags & VM_IO) | |
249 | i = get_io_pages(mm, uva, pages, usr_pgs); | |
250 | else | |
251 | i = get_user_pages(current, mm, uva, pages, w, 1, | |
252 | usr_pgs, NULL); | |
253 | up_read(&mm->mmap_sem); | |
254 | ||
255 | if (i < 0) | |
256 | return i; | |
257 | ||
258 | if (i < pages) { | |
259 | res = -EFAULT; | |
260 | goto err_pages; | |
261 | } | |
262 | ||
263 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
264 | if (!sgt) { | |
265 | res = -ENOMEM; | |
266 | goto err_pages; | |
267 | } | |
268 | ||
269 | res = sg_alloc_table(sgt, pages, GFP_KERNEL); | |
270 | ||
271 | if (res < 0) | |
272 | goto err_sg; | |
273 | ||
274 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | |
275 | sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); | |
276 | ||
277 | da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); | |
278 | ||
279 | if (!IS_ERR_VALUE(da)) | |
280 | return da; | |
281 | res = (int)da; | |
282 | ||
283 | sg_free_table(sgt); | |
284 | err_sg: | |
285 | kfree(sgt); | |
286 | i = pages; | |
287 | err_pages: | |
288 | while (i--) | |
289 | put_page(usr_pgs[i]); | |
290 | return res; | |
291 | } | |
292 | ||
293 | /** | |
294 | * user_to_dsp_unmap() - unmaps DSP virtual buffer. | |
295 | * @mmu: Pointer to iommu handle. | |
296 | * @da DSP address | |
297 | * | |
298 | * This function unmaps a user space buffer into DSP virtual address. | |
299 | * | |
300 | */ | |
301 | int user_to_dsp_unmap(struct iommu *mmu, u32 da) | |
302 | { | |
303 | unsigned i; | |
304 | struct sg_table *sgt; | |
305 | struct scatterlist *sg; | |
306 | ||
307 | sgt = iommu_vunmap(mmu, da); | |
308 | if (!sgt) | |
309 | return -EFAULT; | |
310 | ||
311 | for_each_sg(sgt->sgl, sg, sgt->nents, i) | |
312 | put_page(sg_page(sg)); | |
313 | sg_free_table(sgt); | |
314 | kfree(sgt); | |
315 | ||
316 | return 0; | |
317 | } |