]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/page-writeback.c
ipv6: AF_INET6 link address family
[net-next-2.6.git] / mm / page-writeback.c
index 7262aacea8a201c073bd7ee5522f2e4b59fec937..b840afa89761ce0d83690ff963385399246fa6d8 100644 (file)
@@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 
        if (vm_dirty_bytes)
                dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
-       else {
-               int dirty_ratio;
-
-               dirty_ratio = vm_dirty_ratio;
-               if (dirty_ratio < 5)
-                       dirty_ratio = 5;
-               dirty = (dirty_ratio * available_memory) / 100;
-       }
+       else
+               dirty = (vm_dirty_ratio * available_memory) / 100;
 
        if (dirty_background_bytes)
                background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
@@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * catch-up. This avoids (excessively) small writeouts
                 * when the bdi limits are ramping up.
                 */
-               if (nr_reclaimable + nr_writeback <
+               if (nr_reclaimable + nr_writeback <=
                                (background_thresh + dirty_thresh) / 2)
                        break;
 
@@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping,
                 * the last resort safeguard.
                 */
                dirty_exceeded =
-                       (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh)
-                       || (nr_reclaimable + nr_writeback >= dirty_thresh);
+                       (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
+                       || (nr_reclaimable + nr_writeback > dirty_thresh);
 
                if (!dirty_exceeded)
                        break;
@@ -836,7 +830,8 @@ void tag_pages_for_writeback(struct address_space *mapping,
                spin_unlock_irq(&mapping->tree_lock);
                WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
                cond_resched();
-       } while (tagged >= WRITEBACK_TAG_BATCH);
+               /* We check 'start' to handle wrapping when end == ~0UL */
+       } while (tagged >= WRITEBACK_TAG_BATCH && start);
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
@@ -984,22 +979,16 @@ continue_unlock:
                                }
                        }
 
-                       if (wbc->nr_to_write > 0) {
-                               if (--wbc->nr_to_write == 0 &&
-                                   wbc->sync_mode == WB_SYNC_NONE) {
-                                       /*
-                                        * We stop writing back only if we are
-                                        * not doing integrity sync. In case of
-                                        * integrity sync we have to keep going
-                                        * because someone may be concurrently
-                                        * dirtying pages, and we might have
-                                        * synced a lot of newly appeared dirty
-                                        * pages, but have not synced all of the
-                                        * old dirty pages.
-                                        */
-                                       done = 1;
-                                       break;
-                               }
+                       /*
+                        * We stop writing back only if we are not doing
+                        * integrity sync. In case of integrity sync we have to
+                        * keep going until we have written all the pages
+                        * we tagged for writeback prior to entering this loop.
+                        */
+                       if (--wbc->nr_to_write <= 0 &&
+                           wbc->sync_mode == WB_SYNC_NONE) {
+                               done = 1;
+                               break;
                        }
                }
                pagevec_release(&pvec);
@@ -1126,11 +1115,25 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
 {
        if (mapping_cap_account_dirty(mapping)) {
                __inc_zone_page_state(page, NR_FILE_DIRTY);
+               __inc_zone_page_state(page, NR_DIRTIED);
                __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
                task_dirty_inc(current);
                task_io_account_write(PAGE_CACHE_SIZE);
        }
 }
+EXPORT_SYMBOL(account_page_dirtied);
+
+/*
+ * Helper function for set_page_writeback family.
+ * NOTE: Unlike account_page_dirtied this does not rely on being atomic
+ * wrt interrupts.
+ */
+void account_page_writeback(struct page *page)
+{
+       inc_zone_page_state(page, NR_WRITEBACK);
+       inc_zone_page_state(page, NR_WRITTEN);
+}
+EXPORT_SYMBOL(account_page_writeback);
 
 /*
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
@@ -1370,7 +1373,7 @@ int test_set_page_writeback(struct page *page)
                ret = TestSetPageWriteback(page);
        }
        if (!ret)
-               inc_zone_page_state(page, NR_WRITEBACK);
+               account_page_writeback(page);
        return ret;
 
 }