]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
d7c30c68 | 2 | * arch/sh/kernel/cpu/sh4/sq.c |
1da177e4 LT |
3 | * |
4 | * General management API for SH-4 integrated Store Queues | |
5 | * | |
d7c30c68 | 6 | * Copyright (C) 2001 - 2006 Paul Mundt |
1da177e4 LT |
7 | * Copyright (C) 2001, 2002 M. R. Brown |
8 | * | |
1da177e4 LT |
9 | * This file is subject to the terms and conditions of the GNU General Public |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | #include <linux/init.h> | |
d7c30c68 PM |
14 | #include <linux/cpu.h> |
15 | #include <linux/bitmap.h> | |
16 | #include <linux/sysdev.h> | |
1da177e4 LT |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | |
1da177e4 | 19 | #include <linux/slab.h> |
1da177e4 | 20 | #include <linux/vmalloc.h> |
e4c2cfee | 21 | #include <linux/mm.h> |
1da177e4 LT |
22 | #include <asm/io.h> |
23 | #include <asm/page.h> | |
e4c2cfee | 24 | #include <asm/cacheflush.h> |
1da177e4 LT |
25 | #include <asm/cpu/sq.h> |
26 | ||
d7c30c68 PM |
27 | struct sq_mapping; |
28 | ||
29 | struct sq_mapping { | |
30 | const char *name; | |
31 | ||
32 | unsigned long sq_addr; | |
33 | unsigned long addr; | |
34 | unsigned int size; | |
35 | ||
36 | struct sq_mapping *next; | |
37 | }; | |
38 | ||
39 | static struct sq_mapping *sq_mapping_list; | |
1da177e4 | 40 | static DEFINE_SPINLOCK(sq_mapping_lock); |
d7c30c68 PM |
41 | static kmem_cache_t *sq_cache; |
42 | static unsigned long *sq_bitmap; | |
1da177e4 | 43 | |
d7c30c68 PM |
44 | #define store_queue_barrier() \ |
45 | do { \ | |
46 | (void)ctrl_inl(P4SEG_STORE_QUE); \ | |
47 | ctrl_outl(0, P4SEG_STORE_QUE + 0); \ | |
48 | ctrl_outl(0, P4SEG_STORE_QUE + 8); \ | |
49 | } while (0); | |
1da177e4 LT |
50 | |
51 | /** | |
52 | * sq_flush_range - Flush (prefetch) a specific SQ range | |
53 | * @start: the store queue address to start flushing from | |
54 | * @len: the length to flush | |
55 | * | |
56 | * Flushes the store queue cache from @start to @start + @len in a | |
57 | * linear fashion. | |
58 | */ | |
59 | void sq_flush_range(unsigned long start, unsigned int len) | |
60 | { | |
61 | volatile unsigned long *sq = (unsigned long *)start; | |
1da177e4 LT |
62 | |
63 | /* Flush the queues */ | |
64 | for (len >>= 5; len--; sq += 8) | |
d7c30c68 | 65 | prefetchw((void *)sq); |
1da177e4 LT |
66 | |
67 | /* Wait for completion */ | |
d7c30c68 | 68 | store_queue_barrier(); |
1da177e4 LT |
69 | } |
70 | ||
d7c30c68 | 71 | static inline void sq_mapping_list_add(struct sq_mapping *map) |
1da177e4 | 72 | { |
d7c30c68 | 73 | struct sq_mapping **p, *tmp; |
1da177e4 | 74 | |
d7c30c68 | 75 | spin_lock_irq(&sq_mapping_lock); |
1da177e4 | 76 | |
d7c30c68 PM |
77 | p = &sq_mapping_list; |
78 | while ((tmp = *p) != NULL) | |
79 | p = &tmp->next; | |
1da177e4 | 80 | |
d7c30c68 PM |
81 | map->next = tmp; |
82 | *p = map; | |
1da177e4 | 83 | |
d7c30c68 | 84 | spin_unlock_irq(&sq_mapping_lock); |
1da177e4 LT |
85 | } |
86 | ||
d7c30c68 | 87 | static inline void sq_mapping_list_del(struct sq_mapping *map) |
1da177e4 | 88 | { |
d7c30c68 PM |
89 | struct sq_mapping **p, *tmp; |
90 | ||
91 | spin_lock_irq(&sq_mapping_lock); | |
92 | ||
93 | for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) | |
94 | if (tmp == map) { | |
95 | *p = tmp->next; | |
96 | break; | |
1da177e4 | 97 | } |
1da177e4 | 98 | |
d7c30c68 | 99 | spin_unlock_irq(&sq_mapping_lock); |
1da177e4 LT |
100 | } |
101 | ||
d7c30c68 | 102 | static int __sq_remap(struct sq_mapping *map, unsigned long flags) |
1da177e4 | 103 | { |
d7c30c68 | 104 | #if defined(CONFIG_MMU) |
1da177e4 | 105 | struct vm_struct *vma; |
1da177e4 | 106 | |
1da177e4 LT |
107 | vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); |
108 | if (!vma) | |
d7c30c68 | 109 | return -ENOMEM; |
1da177e4 LT |
110 | |
111 | vma->phys_addr = map->addr; | |
112 | ||
113 | if (remap_area_pages((unsigned long)vma->addr, vma->phys_addr, | |
d7c30c68 | 114 | map->size, flags)) { |
1da177e4 | 115 | vunmap(vma->addr); |
d7c30c68 | 116 | return -EAGAIN; |
1da177e4 | 117 | } |
d7c30c68 PM |
118 | #else |
119 | /* | |
120 | * Without an MMU (or with it turned off), this is much more | |
121 | * straightforward, as we can just load up each queue's QACR with | |
122 | * the physical address appropriately masked. | |
123 | */ | |
124 | ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); | |
125 | ctrl_outl(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); | |
126 | #endif | |
1da177e4 | 127 | |
d7c30c68 | 128 | return 0; |
1da177e4 LT |
129 | } |
130 | ||
131 | /** | |
132 | * sq_remap - Map a physical address through the Store Queues | |
133 | * @phys: Physical address of mapping. | |
134 | * @size: Length of mapping. | |
135 | * @name: User invoking mapping. | |
d7c30c68 | 136 | * @flags: Protection flags. |
1da177e4 LT |
137 | * |
138 | * Remaps the physical address @phys through the next available store queue | |
139 | * address of @size length. @name is logged at boot time as well as through | |
d7c30c68 | 140 | * the sysfs interface. |
1da177e4 | 141 | */ |
d7c30c68 PM |
142 | unsigned long sq_remap(unsigned long phys, unsigned int size, |
143 | const char *name, unsigned long flags) | |
1da177e4 LT |
144 | { |
145 | struct sq_mapping *map; | |
d7c30c68 | 146 | unsigned long end; |
1da177e4 | 147 | unsigned int psz; |
d7c30c68 | 148 | int ret, page; |
1da177e4 LT |
149 | |
150 | /* Don't allow wraparound or zero size */ | |
151 | end = phys + size - 1; | |
d7c30c68 PM |
152 | if (unlikely(!size || end < phys)) |
153 | return -EINVAL; | |
1da177e4 | 154 | /* Don't allow anyone to remap normal memory.. */ |
d7c30c68 PM |
155 | if (unlikely(phys < virt_to_phys(high_memory))) |
156 | return -EINVAL; | |
1da177e4 LT |
157 | |
158 | phys &= PAGE_MASK; | |
d7c30c68 PM |
159 | size = PAGE_ALIGN(end + 1) - phys; |
160 | ||
161 | map = kmem_cache_alloc(sq_cache, GFP_KERNEL); | |
162 | if (unlikely(!map)) | |
163 | return -ENOMEM; | |
164 | ||
165 | map->addr = phys; | |
166 | map->size = size; | |
167 | map->name = name; | |
168 | ||
169 | page = bitmap_find_free_region(sq_bitmap, 0x04000000, | |
170 | get_order(map->size)); | |
171 | if (unlikely(page < 0)) { | |
172 | ret = -ENOSPC; | |
173 | goto out; | |
174 | } | |
175 | ||
176 | map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); | |
177 | ||
178 | ret = __sq_remap(map, flags); | |
179 | if (unlikely(ret != 0)) | |
180 | goto out; | |
181 | ||
182 | psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | |
183 | pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", | |
184 | likely(map->name) ? map->name : "???", | |
185 | psz, psz == 1 ? " " : "s", | |
186 | map->sq_addr, map->addr); | |
1da177e4 | 187 | |
d7c30c68 | 188 | sq_mapping_list_add(map); |
1da177e4 | 189 | |
d7c30c68 | 190 | return map->sq_addr; |
1da177e4 | 191 | |
d7c30c68 PM |
192 | out: |
193 | kmem_cache_free(sq_cache, map); | |
194 | return ret; | |
1da177e4 LT |
195 | } |
196 | ||
197 | /** | |
198 | * sq_unmap - Unmap a Store Queue allocation | |
199 | * @map: Pre-allocated Store Queue mapping. | |
200 | * | |
201 | * Unmaps the store queue allocation @map that was previously created by | |
202 | * sq_remap(). Also frees up the pte that was previously inserted into | |
203 | * the kernel page table and discards the UTLB translation. | |
204 | */ | |
d7c30c68 | 205 | void sq_unmap(unsigned long vaddr) |
1da177e4 | 206 | { |
d7c30c68 PM |
207 | struct sq_mapping **p, *map; |
208 | struct vm_struct *vma; | |
209 | int page; | |
1da177e4 | 210 | |
d7c30c68 PM |
211 | for (p = &sq_mapping_list; (map = *p); p = &map->next) |
212 | if (map->sq_addr == vaddr) | |
213 | break; | |
1da177e4 | 214 | |
d7c30c68 PM |
215 | if (unlikely(!map)) { |
216 | printk("%s: bad store queue address 0x%08lx\n", | |
217 | __FUNCTION__, vaddr); | |
218 | return; | |
219 | } | |
1da177e4 | 220 | |
d7c30c68 PM |
221 | page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; |
222 | bitmap_release_region(sq_bitmap, page, get_order(map->size)); | |
223 | ||
224 | #ifdef CONFIG_MMU | |
225 | vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); | |
226 | if (!vma) { | |
227 | printk(KERN_ERR "%s: bad address 0x%08lx\n", | |
228 | __FUNCTION__, map->sq_addr); | |
229 | return; | |
1da177e4 | 230 | } |
d7c30c68 PM |
231 | #endif |
232 | ||
233 | sq_mapping_list_del(map); | |
1da177e4 | 234 | |
d7c30c68 | 235 | kmem_cache_free(sq_cache, map); |
1da177e4 LT |
236 | } |
237 | ||
d7c30c68 PM |
238 | /* |
239 | * Needlessly complex sysfs interface. Unfortunately it doesn't seem like | |
240 | * there is any other easy way to add things on a per-cpu basis without | |
241 | * putting the directory entries somewhere stupid and having to create | |
242 | * links in sysfs by hand back in to the per-cpu directories. | |
1da177e4 | 243 | * |
d7c30c68 PM |
244 | * Some day we may want to have an additional abstraction per store |
245 | * queue, but considering the kobject hell we already have to deal with, | |
246 | * it's simply not worth the trouble. | |
1da177e4 | 247 | */ |
d7c30c68 | 248 | static struct kobject *sq_kobject[NR_CPUS]; |
1da177e4 | 249 | |
d7c30c68 PM |
250 | struct sq_sysfs_attr { |
251 | struct attribute attr; | |
252 | ssize_t (*show)(char *buf); | |
253 | ssize_t (*store)(const char *buf, size_t count); | |
254 | }; | |
1da177e4 | 255 | |
d7c30c68 | 256 | #define to_sq_sysfs_attr(attr) container_of(attr, struct sq_sysfs_attr, attr) |
1da177e4 | 257 | |
d7c30c68 PM |
258 | static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, |
259 | char *buf) | |
260 | { | |
261 | struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); | |
1da177e4 | 262 | |
d7c30c68 PM |
263 | if (likely(sattr->show)) |
264 | return sattr->show(buf); | |
1da177e4 | 265 | |
d7c30c68 | 266 | return -EIO; |
1da177e4 LT |
267 | } |
268 | ||
d7c30c68 PM |
269 | static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, |
270 | const char *buf, size_t count) | |
1da177e4 | 271 | { |
d7c30c68 | 272 | struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); |
1da177e4 | 273 | |
d7c30c68 PM |
274 | if (likely(sattr->store)) |
275 | return sattr->store(buf, count); | |
276 | ||
277 | return -EIO; | |
1da177e4 LT |
278 | } |
279 | ||
d7c30c68 PM |
280 | static ssize_t mapping_show(char *buf) |
281 | { | |
282 | struct sq_mapping **list, *entry; | |
283 | char *p = buf; | |
1da177e4 | 284 | |
d7c30c68 PM |
285 | for (list = &sq_mapping_list; (entry = *list); list = &entry->next) |
286 | p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", | |
287 | entry->sq_addr, entry->sq_addr + entry->size, | |
288 | entry->addr, entry->name); | |
289 | ||
290 | return p - buf; | |
291 | } | |
292 | ||
293 | static ssize_t mapping_store(const char *buf, size_t count) | |
1da177e4 | 294 | { |
d7c30c68 | 295 | unsigned long base = 0, len = 0; |
1da177e4 | 296 | |
d7c30c68 PM |
297 | sscanf(buf, "%lx %lx", &base, &len); |
298 | if (!base) | |
299 | return -EIO; | |
1da177e4 | 300 | |
d7c30c68 PM |
301 | if (likely(len)) { |
302 | int ret = sq_remap(base, len, "Userspace", | |
303 | pgprot_val(PAGE_SHARED)); | |
304 | if (ret < 0) | |
305 | return ret; | |
306 | } else | |
307 | sq_unmap(base); | |
1da177e4 | 308 | |
d7c30c68 PM |
309 | return count; |
310 | } | |
1da177e4 | 311 | |
d7c30c68 PM |
312 | static struct sq_sysfs_attr mapping_attr = |
313 | __ATTR(mapping, 0644, mapping_show, mapping_store); | |
1da177e4 | 314 | |
d7c30c68 PM |
315 | static struct attribute *sq_sysfs_attrs[] = { |
316 | &mapping_attr.attr, | |
317 | NULL, | |
318 | }; | |
1da177e4 | 319 | |
d7c30c68 PM |
320 | static struct sysfs_ops sq_sysfs_ops = { |
321 | .show = sq_sysfs_show, | |
322 | .store = sq_sysfs_store, | |
323 | }; | |
1da177e4 | 324 | |
d7c30c68 PM |
325 | static struct kobj_type ktype_percpu_entry = { |
326 | .sysfs_ops = &sq_sysfs_ops, | |
327 | .default_attrs = sq_sysfs_attrs, | |
328 | }; | |
1da177e4 | 329 | |
d7c30c68 | 330 | static int __devinit sq_sysdev_add(struct sys_device *sysdev) |
1da177e4 | 331 | { |
d7c30c68 PM |
332 | unsigned int cpu = sysdev->id; |
333 | struct kobject *kobj; | |
1da177e4 | 334 | |
d7c30c68 PM |
335 | sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
336 | if (unlikely(!sq_kobject[cpu])) | |
337 | return -ENOMEM; | |
1da177e4 | 338 | |
d7c30c68 PM |
339 | kobj = sq_kobject[cpu]; |
340 | kobj->parent = &sysdev->kobj; | |
341 | kobject_set_name(kobj, "%s", "sq"); | |
342 | kobj->ktype = &ktype_percpu_entry; | |
1da177e4 | 343 | |
d7c30c68 | 344 | return kobject_register(kobj); |
1da177e4 | 345 | } |
1da177e4 | 346 | |
d7c30c68 PM |
347 | static int __devexit sq_sysdev_remove(struct sys_device *sysdev) |
348 | { | |
349 | unsigned int cpu = sysdev->id; | |
350 | struct kobject *kobj = sq_kobject[cpu]; | |
1da177e4 | 351 | |
d7c30c68 PM |
352 | kobject_unregister(kobj); |
353 | return 0; | |
354 | } | |
355 | ||
356 | static struct sysdev_driver sq_sysdev_driver = { | |
357 | .add = sq_sysdev_add, | |
358 | .remove = __devexit_p(sq_sysdev_remove), | |
1da177e4 LT |
359 | }; |
360 | ||
361 | static int __init sq_api_init(void) | |
362 | { | |
d7c30c68 PM |
363 | unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; |
364 | unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; | |
365 | int ret = -ENOMEM; | |
366 | ||
1da177e4 LT |
367 | printk(KERN_NOTICE "sq: Registering store queue API.\n"); |
368 | ||
d7c30c68 PM |
369 | sq_cache = kmem_cache_create("store_queue_cache", |
370 | sizeof(struct sq_mapping), 0, 0, | |
371 | NULL, NULL); | |
372 | if (unlikely(!sq_cache)) | |
373 | return ret; | |
1da177e4 | 374 | |
d7c30c68 PM |
375 | sq_bitmap = kzalloc(size, GFP_KERNEL); |
376 | if (unlikely(!sq_bitmap)) | |
377 | goto out; | |
378 | ||
379 | ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver); | |
380 | if (unlikely(ret != 0)) | |
381 | goto out; | |
382 | ||
383 | return 0; | |
384 | ||
385 | out: | |
386 | kfree(sq_bitmap); | |
387 | kmem_cache_destroy(sq_cache); | |
757be186 NH |
388 | |
389 | return ret; | |
1da177e4 LT |
390 | } |
391 | ||
392 | static void __exit sq_api_exit(void) | |
393 | { | |
d7c30c68 PM |
394 | sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver); |
395 | kfree(sq_bitmap); | |
396 | kmem_cache_destroy(sq_cache); | |
1da177e4 LT |
397 | } |
398 | ||
399 | module_init(sq_api_init); | |
400 | module_exit(sq_api_exit); | |
401 | ||
402 | MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); | |
403 | MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); | |
404 | MODULE_LICENSE("GPL"); | |
1da177e4 LT |
405 | |
406 | EXPORT_SYMBOL(sq_remap); | |
407 | EXPORT_SYMBOL(sq_unmap); | |
1da177e4 | 408 | EXPORT_SYMBOL(sq_flush_range); |