]>
Commit | Line | Data |
---|---|---|
9e41a49a PV |
1 | /* |
2 | * Handle caching attributes in page tables (PAT) | |
3 | * | |
4 | * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | |
5 | * Suresh B Siddha <suresh.b.siddha@intel.com> | |
6 | * | |
7 | * Interval tree (augmented rbtree) used to store the PAT memory type | |
8 | * reservations. | |
9 | */ | |
10 | ||
11 | #include <linux/seq_file.h> | |
12 | #include <linux/debugfs.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/rbtree.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/gfp.h> | |
18 | ||
19 | #include <asm/pgtable.h> | |
20 | #include <asm/pat.h> | |
21 | ||
22 | #include "pat_internal.h" | |
23 | ||
24 | /* | |
25 | * The memtype tree keeps track of memory type for specific | |
26 | * physical memory areas. Without proper tracking, conflicting memory | |
27 | * types in different mappings can cause CPU cache corruption. | |
28 | * | |
29 | * The tree is an interval tree (augmented rbtree) with tree ordered | |
30 | * on starting address. Tree can contain multiple entries for | |
31 | * different regions which overlap. All the aliases have the same | |
32 | * cache attributes of course. | |
33 | * | |
34 | * memtype_lock protects the rbtree. | |
35 | */ | |
36 | ||
37 | static void memtype_rb_augment_cb(struct rb_node *node); | |
38 | static struct rb_root memtype_rbroot = RB_AUGMENT_ROOT(&memtype_rb_augment_cb); | |
39 | ||
40 | static int is_node_overlap(struct memtype *node, u64 start, u64 end) | |
41 | { | |
42 | if (node->start >= end || node->end <= start) | |
43 | return 0; | |
44 | ||
45 | return 1; | |
46 | } | |
47 | ||
48 | static u64 get_subtree_max_end(struct rb_node *node) | |
49 | { | |
50 | u64 ret = 0; | |
51 | if (node) { | |
52 | struct memtype *data = container_of(node, struct memtype, rb); | |
53 | ret = data->subtree_max_end; | |
54 | } | |
55 | return ret; | |
56 | } | |
57 | ||
58 | /* Update 'subtree_max_end' for a node, based on node and its children */ | |
59 | static void update_node_max_end(struct rb_node *node) | |
60 | { | |
61 | struct memtype *data; | |
62 | u64 max_end, child_max_end; | |
63 | ||
64 | if (!node) | |
65 | return; | |
66 | ||
67 | data = container_of(node, struct memtype, rb); | |
68 | max_end = data->end; | |
69 | ||
70 | child_max_end = get_subtree_max_end(node->rb_right); | |
71 | if (child_max_end > max_end) | |
72 | max_end = child_max_end; | |
73 | ||
74 | child_max_end = get_subtree_max_end(node->rb_left); | |
75 | if (child_max_end > max_end) | |
76 | max_end = child_max_end; | |
77 | ||
78 | data->subtree_max_end = max_end; | |
79 | } | |
80 | ||
81 | /* Update 'subtree_max_end' for a node and all its ancestors */ | |
82 | static void update_path_max_end(struct rb_node *node) | |
83 | { | |
84 | u64 old_max_end, new_max_end; | |
85 | ||
86 | while (node) { | |
87 | struct memtype *data = container_of(node, struct memtype, rb); | |
88 | ||
89 | old_max_end = data->subtree_max_end; | |
90 | update_node_max_end(node); | |
91 | new_max_end = data->subtree_max_end; | |
92 | ||
93 | if (new_max_end == old_max_end) | |
94 | break; | |
95 | ||
96 | node = rb_parent(node); | |
97 | } | |
98 | } | |
99 | ||
100 | /* Find the first (lowest start addr) overlapping range from rb tree */ | |
101 | static struct memtype *memtype_rb_lowest_match(struct rb_root *root, | |
102 | u64 start, u64 end) | |
103 | { | |
104 | struct rb_node *node = root->rb_node; | |
105 | struct memtype *last_lower = NULL; | |
106 | ||
107 | while (node) { | |
108 | struct memtype *data = container_of(node, struct memtype, rb); | |
109 | ||
110 | if (get_subtree_max_end(node->rb_left) > start) { | |
111 | /* Lowest overlap if any must be on left side */ | |
112 | node = node->rb_left; | |
113 | } else if (is_node_overlap(data, start, end)) { | |
114 | last_lower = data; | |
115 | break; | |
116 | } else if (start >= data->start) { | |
117 | /* Lowest overlap if any must be on right side */ | |
118 | node = node->rb_right; | |
119 | } else { | |
120 | break; | |
121 | } | |
122 | } | |
123 | return last_lower; /* Returns NULL if there is no overlap */ | |
124 | } | |
125 | ||
126 | static struct memtype *memtype_rb_exact_match(struct rb_root *root, | |
127 | u64 start, u64 end) | |
128 | { | |
129 | struct memtype *match; | |
130 | ||
131 | match = memtype_rb_lowest_match(root, start, end); | |
132 | while (match != NULL && match->start < end) { | |
133 | struct rb_node *node; | |
134 | ||
135 | if (match->start == start && match->end == end) | |
136 | return match; | |
137 | ||
138 | node = rb_next(&match->rb); | |
139 | if (node) | |
140 | match = container_of(node, struct memtype, rb); | |
141 | else | |
142 | match = NULL; | |
143 | } | |
144 | ||
145 | return NULL; /* Returns NULL if there is no exact match */ | |
146 | } | |
147 | ||
148 | static int memtype_rb_check_conflict(struct rb_root *root, | |
149 | u64 start, u64 end, | |
150 | unsigned long reqtype, unsigned long *newtype) | |
151 | { | |
152 | struct rb_node *node; | |
153 | struct memtype *match; | |
154 | int found_type = reqtype; | |
155 | ||
156 | match = memtype_rb_lowest_match(&memtype_rbroot, start, end); | |
157 | if (match == NULL) | |
158 | goto success; | |
159 | ||
160 | if (match->type != found_type && newtype == NULL) | |
161 | goto failure; | |
162 | ||
163 | dprintk("Overlap at 0x%Lx-0x%Lx\n", match->start, match->end); | |
164 | found_type = match->type; | |
165 | ||
166 | node = rb_next(&match->rb); | |
167 | while (node) { | |
168 | match = container_of(node, struct memtype, rb); | |
169 | ||
170 | if (match->start >= end) /* Checked all possible matches */ | |
171 | goto success; | |
172 | ||
173 | if (is_node_overlap(match, start, end) && | |
174 | match->type != found_type) { | |
175 | goto failure; | |
176 | } | |
177 | ||
178 | node = rb_next(&match->rb); | |
179 | } | |
180 | success: | |
181 | if (newtype) | |
182 | *newtype = found_type; | |
183 | ||
184 | return 0; | |
185 | ||
186 | failure: | |
187 | printk(KERN_INFO "%s:%d conflicting memory types " | |
188 | "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start, | |
189 | end, cattr_name(found_type), cattr_name(match->type)); | |
190 | return -EBUSY; | |
191 | } | |
192 | ||
193 | static void memtype_rb_augment_cb(struct rb_node *node) | |
194 | { | |
195 | if (node) | |
196 | update_path_max_end(node); | |
197 | } | |
198 | ||
199 | static void memtype_rb_insert(struct rb_root *root, struct memtype *newdata) | |
200 | { | |
201 | struct rb_node **node = &(root->rb_node); | |
202 | struct rb_node *parent = NULL; | |
203 | ||
204 | while (*node) { | |
205 | struct memtype *data = container_of(*node, struct memtype, rb); | |
206 | ||
207 | parent = *node; | |
208 | if (newdata->start <= data->start) | |
209 | node = &((*node)->rb_left); | |
210 | else if (newdata->start > data->start) | |
211 | node = &((*node)->rb_right); | |
212 | } | |
213 | ||
214 | rb_link_node(&newdata->rb, parent, node); | |
215 | rb_insert_color(&newdata->rb, root); | |
216 | } | |
217 | ||
218 | int rbt_memtype_check_insert(struct memtype *new, unsigned long *ret_type) | |
219 | { | |
220 | int err = 0; | |
221 | ||
222 | err = memtype_rb_check_conflict(&memtype_rbroot, new->start, new->end, | |
223 | new->type, ret_type); | |
224 | ||
225 | if (!err) { | |
4daa2a80 PV |
226 | if (ret_type) |
227 | new->type = *ret_type; | |
228 | ||
6a4f3b52 | 229 | new->subtree_max_end = new->end; |
9e41a49a PV |
230 | memtype_rb_insert(&memtype_rbroot, new); |
231 | } | |
232 | return err; | |
233 | } | |
234 | ||
20413f27 | 235 | struct memtype *rbt_memtype_erase(u64 start, u64 end) |
9e41a49a PV |
236 | { |
237 | struct memtype *data; | |
238 | ||
239 | data = memtype_rb_exact_match(&memtype_rbroot, start, end); | |
240 | if (!data) | |
20413f27 | 241 | goto out; |
9e41a49a PV |
242 | |
243 | rb_erase(&data->rb, &memtype_rbroot); | |
20413f27 XF |
244 | out: |
245 | return data; | |
9e41a49a PV |
246 | } |
247 | ||
248 | struct memtype *rbt_memtype_lookup(u64 addr) | |
249 | { | |
250 | struct memtype *data; | |
251 | data = memtype_rb_lowest_match(&memtype_rbroot, addr, addr + PAGE_SIZE); | |
252 | return data; | |
253 | } | |
254 | ||
255 | #if defined(CONFIG_DEBUG_FS) | |
256 | int rbt_memtype_copy_nth_element(struct memtype *out, loff_t pos) | |
257 | { | |
258 | struct rb_node *node; | |
259 | int i = 1; | |
260 | ||
261 | node = rb_first(&memtype_rbroot); | |
262 | while (node && pos != i) { | |
263 | node = rb_next(node); | |
264 | i++; | |
265 | } | |
266 | ||
267 | if (node) { /* pos == i */ | |
268 | struct memtype *this = container_of(node, struct memtype, rb); | |
269 | *out = *this; | |
270 | return 0; | |
271 | } else { | |
272 | return 1; | |
273 | } | |
274 | } | |
275 | #endif |