]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/ksm.c
tg3: Replace pr_err with sensible alternatives
[net-next-2.6.git] / mm / ksm.c
index 56a0da1f9979d7eaa9d85cbc0b20ef76215e4abf..a93f1b7f508cde17cb0b41f7183641dcfdb7a26f 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1563,10 +1563,12 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;
@@ -1614,10 +1616,12 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;
@@ -1664,10 +1668,12 @@ int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
 again:
        hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) {
                struct anon_vma *anon_vma = rmap_item->anon_vma;
+               struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
                spin_lock(&anon_vma->lock);
-               list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) {
+                       vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)
                                continue;