]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/ia64/mm/hugetlbpage.c
[PATCH] balance_pdgat() cleanup
[net-next-2.6.git] / arch / ia64 / mm / hugetlbpage.c
CommitLineData
1da177e4
LT
1/*
2 * IA-64 Huge TLB Page Support for Kernel.
3 *
4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6 *
7 * Sep, 2003: add numa support
8 * Feb, 2004: dynamic hugetlb page size via boot parameter
9 */
10
1da177e4
LT
11#include <linux/init.h>
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/hugetlb.h>
15#include <linux/pagemap.h>
16#include <linux/smp_lock.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <asm/mman.h>
20#include <asm/pgalloc.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23
24unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
25
63551ae0 26pte_t *
1da177e4
LT
27huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
28{
29 unsigned long taddr = htlbpage_to_page(addr);
30 pgd_t *pgd;
31 pud_t *pud;
32 pmd_t *pmd;
33 pte_t *pte = NULL;
34
35 pgd = pgd_offset(mm, taddr);
36 pud = pud_alloc(mm, pgd, taddr);
37 if (pud) {
38 pmd = pmd_alloc(mm, pud, taddr);
39 if (pmd)
40 pte = pte_alloc_map(mm, pmd, taddr);
41 }
42 return pte;
43}
44
63551ae0 45pte_t *
1da177e4
LT
46huge_pte_offset (struct mm_struct *mm, unsigned long addr)
47{
48 unsigned long taddr = htlbpage_to_page(addr);
49 pgd_t *pgd;
50 pud_t *pud;
51 pmd_t *pmd;
52 pte_t *pte = NULL;
53
54 pgd = pgd_offset(mm, taddr);
55 if (pgd_present(*pgd)) {
56 pud = pud_offset(pgd, taddr);
57 if (pud_present(*pud)) {
58 pmd = pmd_offset(pud, taddr);
59 if (pmd_present(*pmd))
60 pte = pte_offset_map(pmd, taddr);
61 }
62 }
63
64 return pte;
65}
66
67#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
68
1da177e4 69/*
42b88bef
DG
70 * Don't actually need to do any preparation, but need to make sure
71 * the address is in the right region.
1da177e4 72 */
68589bc3 73int prepare_hugepage_range(unsigned long addr, unsigned long len, pgoff_t pgoff)
1da177e4 74{
68589bc3
HD
75 if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
76 return -EINVAL;
1da177e4
LT
77 if (len & ~HPAGE_MASK)
78 return -EINVAL;
79 if (addr & ~HPAGE_MASK)
80 return -EINVAL;
0a41e250 81 if (REGION_NUMBER(addr) != RGN_HPAGE)
1da177e4
LT
82 return -EINVAL;
83
84 return 0;
85}
86
1da177e4
LT
87struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
88{
89 struct page *page;
90 pte_t *ptep;
91
0a41e250 92 if (REGION_NUMBER(addr) != RGN_HPAGE)
1da177e4
LT
93 return ERR_PTR(-EINVAL);
94
95 ptep = huge_pte_offset(mm, addr);
96 if (!ptep || pte_none(*ptep))
97 return NULL;
98 page = pte_page(*ptep);
99 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
100 return page;
101}
102int pmd_huge(pmd_t pmd)
103{
104 return 0;
105}
106struct page *
107follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
108{
109 return NULL;
110}
111
3bf5ee95
HD
112void hugetlb_free_pgd_range(struct mmu_gather **tlb,
113 unsigned long addr, unsigned long end,
114 unsigned long floor, unsigned long ceiling)
1da177e4 115{
3bf5ee95 116 /*
2332c9ae 117 * This is called to free hugetlb page tables.
3bf5ee95
HD
118 *
119 * The offset of these addresses from the base of the hugetlb
120 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
121 * the standard free_pgd_range will free the right page tables.
122 *
123 * If floor and ceiling are also in the hugetlb region, they
124 * must likewise be scaled down; but if outside, left unchanged.
125 */
126
127 addr = htlbpage_to_page(addr);
128 end = htlbpage_to_page(end);
2332c9ae 129 if (REGION_NUMBER(floor) == RGN_HPAGE)
3bf5ee95 130 floor = htlbpage_to_page(floor);
2332c9ae 131 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
3bf5ee95
HD
132 ceiling = htlbpage_to_page(ceiling);
133
134 free_pgd_range(tlb, addr, end, floor, ceiling);
1da177e4
LT
135}
136
1da177e4
LT
137unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
138 unsigned long pgoff, unsigned long flags)
139{
140 struct vm_area_struct *vmm;
141
142 if (len > RGN_MAP_LIMIT)
143 return -ENOMEM;
144 if (len & ~HPAGE_MASK)
145 return -EINVAL;
0a41e250
PC
146 /* This code assumes that RGN_HPAGE != 0. */
147 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
1da177e4
LT
148 addr = HPAGE_REGION_BASE;
149 else
150 addr = ALIGN(addr, HPAGE_SIZE);
151 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
152 /* At this point: (!vmm || addr < vmm->vm_end). */
153 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
154 return -ENOMEM;
155 if (!vmm || (addr + len) <= vmm->vm_start)
156 return addr;
157 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
158 }
159}
160
161static int __init hugetlb_setup_sz(char *str)
162{
163 u64 tr_pages;
164 unsigned long long size;
165
166 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
167 /*
168 * shouldn't happen, but just in case.
169 */
170 tr_pages = 0x15557000UL;
171
172 size = memparse(str, &str);
173 if (*str || (size & (size-1)) || !(tr_pages & size) ||
174 size <= PAGE_SIZE ||
175 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
176 printk(KERN_WARNING "Invalid huge page size specified\n");
177 return 1;
178 }
179
180 hpage_shift = __ffs(size);
181 /*
182 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
183 * override here with new page shift.
184 */
185 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
186 return 1;
187}
188__setup("hugepagesz=", hugetlb_setup_sz);