]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm26/mm/memc.c
mm: Remove slab destructors from kmem_cache_create().
[net-next-2.6.git] / arch / arm26 / mm / memc.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm26/mm/memc.c
3 *
4 * Copyright (C) 1998-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Page table sludge for older ARM processor architectures.
11 */
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16
17#include <asm/pgtable.h>
18#include <asm/pgalloc.h>
19#include <asm/page.h>
20#include <asm/memory.h>
21#include <asm/hardware.h>
22
23#include <asm/map.h>
24
25#define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
26
e18b890b 27struct kmem_cache *pte_cache, *pgd_cache;
1da177e4
LT
28int page_nr;
29
30/*
31 * Allocate space for a page table and a MEMC table.
32 * Note that we place the MEMC
33 * table before the page directory. This means we can
34 * easily get to both tightly-associated data structures
35 * with a single pointer.
36 */
37static inline pgd_t *alloc_pgd_table(void)
38{
39 void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
40
41 if (pg2k)
42 pg2k += MEMC_TABLE_SIZE;
43
44 return (pgd_t *)pg2k;
45}
46
47/*
48 * Free a page table. this function is the counterpart to get_pgd_slow
49 * below, not alloc_pgd_table above.
50 */
51void free_pgd_slow(pgd_t *pgd)
52{
53 unsigned long tbl = (unsigned long)pgd;
54
55 tbl -= MEMC_TABLE_SIZE;
56
57 kmem_cache_free(pgd_cache, (void *)tbl);
58}
59
60/*
61 * Allocate a new pgd and fill it in ready for use
62 *
63 * A new tasks pgd is completely empty (all pages !present) except for:
64 *
65 * o The machine vectors at virtual address 0x0
66 * o The vmalloc region at the top of address space
67 *
68 */
69#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
70
71pgd_t *get_pgd_slow(struct mm_struct *mm)
72{
73 pgd_t *new_pgd, *init_pgd;
74 pmd_t *new_pmd, *init_pmd;
75 pte_t *new_pte, *init_pte;
76
77 new_pgd = alloc_pgd_table();
78 if (!new_pgd)
79 goto no_pgd;
80
1da177e4
LT
81 /*
82 * On ARM, first page must always be allocated since it contains
83 * the machine vectors.
84 */
85 new_pmd = pmd_alloc(mm, new_pgd, 0);
86 if (!new_pmd)
87 goto no_pmd;
88
872fec16 89 new_pte = pte_alloc_map(mm, new_pmd, 0);
1da177e4
LT
90 if (!new_pte)
91 goto no_pte;
92
93 init_pgd = pgd_offset(&init_mm, 0);
94 init_pmd = pmd_offset(init_pgd, 0);
95 init_pte = pte_offset(init_pmd, 0);
96
97 set_pte(new_pte, *init_pte);
872fec16 98 pte_unmap(new_pte);
1da177e4
LT
99
100 /*
101 * the page table entries are zeroed
102 * when the table is created. (see the cache_ctor functions below)
103 * Now we need to plonk the kernel (vmalloc) area at the end of
104 * the address space. We copy this from the init thread, just like
105 * the init_pte we copied above...
106 */
107 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
108 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
109
1da177e4
LT
110 /* update MEMC tables */
111 cpu_memc_update_all(new_pgd);
112 return new_pgd;
113
114no_pte:
1da177e4 115 pmd_free(new_pmd);
1da177e4 116no_pmd:
1da177e4 117 free_pgd_slow(new_pgd);
1da177e4
LT
118no_pgd:
119 return NULL;
120}
121
122/*
123 * No special code is required here.
124 */
125void setup_mm_for_reboot(char mode)
126{
127}
128
129/*
130 * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
131 * o swapper_pg_dir = 0x0207d000
132 * o kernel proper starts at 0x0208000
133 * o create (allocate) a pte to contain the machine vectors
134 * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?)
135 * o populate the init tasks page directory (pgd) with the new pte
136 * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!)
137 */
138void __init memtable_init(struct meminfo *mi)
139{
140 pte_t *pte;
141 int i;
142
143 page_nr = max_low_pfn;
144
145 pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
146 pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY);
147 pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
148
149 for (i = 1; i < PTRS_PER_PGD; i++)
150 pgd_val(swapper_pg_dir[i]) = 0;
151}
152
153void __init iotable_init(struct map_desc *io_desc)
154{
155 /* nothing to do */
156}
157
158/*
159 * We never have holes in the memmap
160 */
161void __init create_memmap_holes(struct meminfo *mi)
162{
163}
164
e18b890b 165static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
1da177e4
LT
166{
167 memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
168}
169
e18b890b 170static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
1da177e4
LT
171{
172 memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
173}
174
175void __init pgtable_cache_init(void)
176{
177 pte_cache = kmem_cache_create("pte-cache",
178 sizeof(pte_t) * PTRS_PER_PTE,
20c2df83 179 0, SLAB_PANIC, pte_cache_ctor);
1da177e4
LT
180
181 pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
182 sizeof(pgd_t) * PTRS_PER_PGD,
20c2df83 183 0, SLAB_PANIC, pgd_cache_ctor);
1da177e4 184}