]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/s390/include/asm/tlb.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ecryptfs...
[net-next-2.6.git] / arch / s390 / include / asm / tlb.h
CommitLineData
1da177e4
LT
1#ifndef _S390_TLB_H
2#define _S390_TLB_H
3
4/*
ba8a9229
MS
5 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
1da177e4 22 */
ba8a9229
MS
23
24#include <linux/mm.h>
25#include <linux/swap.h>
26#include <asm/processor.h>
27#include <asm/pgalloc.h>
28#include <asm/smp.h>
29#include <asm/tlbflush.h>
30
31#ifndef CONFIG_SMP
32#define TLB_NR_PTRS 1
33#else
34#define TLB_NR_PTRS 508
35#endif
36
37struct mmu_gather {
38 struct mm_struct *mm;
39 unsigned int fullmm;
40 unsigned int nr_ptes;
5a216a20 41 unsigned int nr_pxds;
ba8a9229
MS
42 void *array[TLB_NR_PTRS];
43};
44
45DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
46
47static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm,
48 unsigned int full_mm_flush)
49{
50 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
51
52 tlb->mm = mm;
050eef36 53 tlb->fullmm = full_mm_flush;
ba8a9229 54 tlb->nr_ptes = 0;
5a216a20 55 tlb->nr_pxds = TLB_NR_PTRS;
ba8a9229
MS
56 if (tlb->fullmm)
57 __tlb_flush_mm(mm);
58 return tlb;
59}
60
61static inline void tlb_flush_mmu(struct mmu_gather *tlb,
62 unsigned long start, unsigned long end)
63{
5a216a20 64 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < TLB_NR_PTRS))
ba8a9229
MS
65 __tlb_flush_mm(tlb->mm);
66 while (tlb->nr_ptes > 0)
5e541973 67 pte_free(tlb->mm, tlb->array[--tlb->nr_ptes]);
5a216a20
MS
68 while (tlb->nr_pxds < TLB_NR_PTRS)
69 /* pgd_free frees the pointer as region or segment table */
70 pgd_free(tlb->mm, tlb->array[tlb->nr_pxds++]);
ba8a9229
MS
71}
72
73static inline void tlb_finish_mmu(struct mmu_gather *tlb,
74 unsigned long start, unsigned long end)
75{
76 tlb_flush_mmu(tlb, start, end);
77
78 /* keep the page table cache within bounds */
79 check_pgt_cache();
80
81 put_cpu_var(mmu_gathers);
82}
1da177e4
LT
83
84/*
ba8a9229
MS
85 * Release the page cache reference for a pte removed by
86 * tlb_ptep_clear_flush. In both flush modes the tlb fo a page cache page
87 * has already been freed, so just do free_page_and_swap_cache.
1da177e4 88 */
ba8a9229
MS
89static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
90{
91 free_page_and_swap_cache(page);
92}
1da177e4 93
ba8a9229
MS
94/*
95 * pte_free_tlb frees a pte table and clears the CRSTE for the
96 * page table from the tlb.
97 */
9e1b32ca
BH
98static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
99 unsigned long address)
ba8a9229
MS
100{
101 if (!tlb->fullmm) {
146e4b3c 102 tlb->array[tlb->nr_ptes++] = pte;
5a216a20 103 if (tlb->nr_ptes >= tlb->nr_pxds)
ba8a9229
MS
104 tlb_flush_mmu(tlb, 0, 0);
105 } else
146e4b3c 106 pte_free(tlb->mm, pte);
ba8a9229 107}
1da177e4 108
ba8a9229
MS
109/*
110 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
111 * segment table entry from the tlb.
6252d702
MS
112 * If the mm uses a two level page table the single pmd is freed
113 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
114 * to avoid the double free of the pmd in this case.
ba8a9229 115 */
9e1b32ca
BH
116static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
117 unsigned long address)
ba8a9229
MS
118{
119#ifdef __s390x__
6252d702
MS
120 if (tlb->mm->context.asce_limit <= (1UL << 31))
121 return;
ba8a9229 122 if (!tlb->fullmm) {
5a216a20
MS
123 tlb->array[--tlb->nr_pxds] = pmd;
124 if (tlb->nr_ptes >= tlb->nr_pxds)
ba8a9229
MS
125 tlb_flush_mmu(tlb, 0, 0);
126 } else
5e541973 127 pmd_free(tlb->mm, pmd);
1da177e4 128#endif
ba8a9229
MS
129}
130
5a216a20
MS
131/*
132 * pud_free_tlb frees a pud table and clears the CRSTE for the
133 * region third table entry from the tlb.
6252d702
MS
134 * If the mm uses a three level page table the single pud is freed
135 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
136 * to avoid the double free of the pud in this case.
5a216a20 137 */
9e1b32ca
BH
138static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
139 unsigned long address)
5a216a20
MS
140{
141#ifdef __s390x__
6252d702
MS
142 if (tlb->mm->context.asce_limit <= (1UL << 42))
143 return;
5a216a20
MS
144 if (!tlb->fullmm) {
145 tlb->array[--tlb->nr_pxds] = pud;
146 if (tlb->nr_ptes >= tlb->nr_pxds)
147 tlb_flush_mmu(tlb, 0, 0);
148 } else
149 pud_free(tlb->mm, pud);
150#endif
151}
190a1d72 152
ba8a9229
MS
153#define tlb_start_vma(tlb, vma) do { } while (0)
154#define tlb_end_vma(tlb, vma) do { } while (0)
155#define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
156#define tlb_migrate_finish(mm) do { } while (0)
157
158#endif /* _S390_TLB_H */