commit-hurd
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[gnumach] 01/01: patches/99_revert-page-cache-policy.patch


From: Samuel Thibault
Subject: [gnumach] 01/01: patches/99_revert-page-cache-policy.patch
Date: Wed, 20 Apr 2016 09:48:29 +0000

This is an automated email from the git hooks/post-receive script.

sthibault pushed a commit to branch master
in repository gnumach.

commit 4a734a815a31cf4962689ab19574cfc74f16c6ef
Author: Samuel Thibault <address@hidden>
Date:   Wed Apr 20 11:48:05 2016 +0200

    patches/99_revert-page-cache-policy.patch
    
    Revert the new cache policy for now: when ext2fs has a lot of objects,
    vm_map_enter becomes very slow
---
 debian/changelog                                 |   2 +
 debian/patches/99_revert-page-cache-policy.patch | 441 +++++++++++++++++++++++
 debian/patches/series                            |   1 +
 3 files changed, 444 insertions(+)

diff --git a/debian/changelog b/debian/changelog
index c032ecf..0842120 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -5,6 +5,8 @@ gnumach (2:1.6+git20160311-2) UNRELEASED; urgency=medium
   * patches/70_dde.patch: Fix crash when delivering the interrupt makes the
     thread sleep. It is fine to do another check loop when running the
     interrupt handlers anyway.
+  * patches/99_revert-page-cache-policy.patch: Revert the new cache policy for
+    now: when ext2fs has a lot of objects, vm_map_enter becomes very slow.
 
  -- Samuel Thibault <address@hidden>  Fri, 11 Mar 2016 23:21:56 +0000
 
diff --git a/debian/patches/99_revert-page-cache-policy.patch 
b/debian/patches/99_revert-page-cache-policy.patch
new file mode 100644
index 0000000..ad12f00
--- /dev/null
+++ b/debian/patches/99_revert-page-cache-policy.patch
@@ -0,0 +1,441 @@
+Revert c774e89387a43d737abbdd99781a294c1cceebb2 and
+98d64d1a78172b1efc26cac36a367eec8496926f to work around performance regression
+
+diff --git a/vm/vm_object.c b/vm/vm_object.c
+index bc30128..e7cfff1 100644
+--- a/vm/vm_object.c
++++ b/vm/vm_object.c
+@@ -64,6 +64,8 @@ void memory_object_release(
+       pager_request_t pager_request,
+       ipc_port_t      pager_name); /* forward */
+ 
++void vm_object_deactivate_pages(vm_object_t);
++
+ /*
+  *    Virtual memory objects maintain the actual data
+  *    associated with allocated virtual memory.  A given
+@@ -164,9 +166,8 @@ vm_object_t                kernel_object = 
&kernel_object_store;
+  *
+  *    The kernel may choose to terminate objects from this
+  *    queue in order to reclaim storage.  The current policy
+- *    is to let memory pressure dynamically adjust the number
+- *    of unreferenced objects. The pageout daemon attempts to
+- *    collect objects after removing pages from them.
++ *    is to permit a fixed maximum number of unreferenced
++ *    objects (vm_object_cached_max).
+  *
+  *    A simple lock (accessed by routines
+  *    vm_object_cache_{lock,lock_try,unlock}) governs the
+@@ -182,6 +183,7 @@ vm_object_t                kernel_object = 
&kernel_object_store;
+  */
+ queue_head_t  vm_object_cached_list;
+ int           vm_object_cached_count;
++int           vm_object_cached_max = 4000;    /* may be patched*/
+ 
+ decl_simple_lock_data(,vm_object_cached_lock_data)
+ 
+@@ -303,7 +305,6 @@ void vm_object_bootstrap(void)
+ 
+       vm_object_template.paging_in_progress = 0;
+       vm_object_template.can_persist = FALSE;
+-      vm_object_template.cached = FALSE;
+       vm_object_template.internal = TRUE;
+       vm_object_template.temporary = TRUE;
+       vm_object_template.alive = TRUE;
+@@ -350,60 +351,6 @@ void vm_object_init(void)
+ }
+ 
+ /*
+- *    Object cache management functions.
+- *
+- *    Both the cache and the object must be locked
+- *    before calling these functions.
+- */
+-
+-static void vm_object_cache_add(
+-      vm_object_t     object)
+-{
+-      assert(!object->cached);
+-      queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
+-      vm_object_cached_count++;
+-      vm_object_cached_pages_update(object->resident_page_count);
+-      object->cached = TRUE;
+-}
+-
+-static void vm_object_cache_remove(
+-      vm_object_t     object)
+-{
+-      assert(object->cached);
+-      queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
+-      vm_object_cached_count--;
+-      vm_object_cached_pages_update(-object->resident_page_count);
+-      object->cached = FALSE;
+-}
+-
+-void vm_object_collect(
+-      register vm_object_t    object)
+-{
+-      vm_object_unlock(object);
+-
+-      /*
+-       *      The cache lock must be acquired in the proper order.
+-       */
+-
+-      vm_object_cache_lock();
+-      vm_object_lock(object);
+-
+-      /*
+-       *      If the object was referenced while the lock was
+-       *      dropped, cancel the termination.
+-       */
+-
+-      if (!vm_object_collectable(object)) {
+-              vm_object_unlock(object);
+-              vm_object_cache_unlock();
+-              return;
+-      }
+-
+-      vm_object_cache_remove(object);
+-      vm_object_terminate(object);
+-}
+-
+-/*
+  *    vm_object_reference:
+  *
+  *    Gets another reference to the given object.
+@@ -463,31 +410,103 @@ void vm_object_deallocate(
+ 
+               /*
+                *      See whether this object can persist.  If so, enter
+-               *      it in the cache.
++               *      it in the cache, then deactivate all of its
++               *      pages.
+                */
+-              if (object->can_persist && (object->resident_page_count > 0)) {
+-                      vm_object_cache_add(object);
++              if (object->can_persist) {
++                      boolean_t       overflow;
++
++                      /*
++                       *      Enter the object onto the queue
++                       *      of "cached" objects.  Remember whether
++                       *      we've caused the queue to overflow,
++                       *      as a hint.
++                       */
++
++                      queue_enter(&vm_object_cached_list, object,
++                              vm_object_t, cached_list);
++                      overflow = (++vm_object_cached_count > 
vm_object_cached_max);
++                      
vm_object_cached_pages_update(object->resident_page_count);
+                       vm_object_cache_unlock();
++
++                      vm_object_deactivate_pages(object);
+                       vm_object_unlock(object);
+-                      return;
+-              }
+ 
+-              if (object->pager_created &&
+-                  !object->pager_initialized) {
++                      /*
++                       *      If we didn't overflow, or if the queue has
++                       *      been reduced back to below the specified
++                       *      minimum, then quit.
++                       */
++                      if (!overflow)
++                              return;
++
++                      while (TRUE) {
++                              vm_object_cache_lock();
++                              if (vm_object_cached_count <=
++                                  vm_object_cached_max) {
++                                      vm_object_cache_unlock();
++                                      return;
++                              }
++
++                              /*
++                               *      If we must trim down the queue, take
++                               *      the first object, and proceed to
++                               *      terminate it instead of the original
++                               *      object.  Have to wait for pager init.
++                               *  if it's in progress.
++                               */
++                              object= (vm_object_t)
++                                  queue_first(&vm_object_cached_list);
++                              vm_object_lock(object);
++
++                              if (!(object->pager_created &&
++                                  !object->pager_initialized)) {
++
++                                      /*
++                                       *  Ok to terminate, hang on to lock.
++                                       */
++                                      break;
++                              }
++
++                              vm_object_assert_wait(object,
++                                      VM_OBJECT_EVENT_INITIALIZED, FALSE);
++                              vm_object_unlock(object);
++                              vm_object_cache_unlock();
++                              thread_block((void (*)()) 0);
++
++                              /*
++                               *  Continue loop to check if cache still
++                               *  needs to be trimmed.
++                               */
++                      }
+ 
+                       /*
+-                       *      Have to wait for initialization.
+-                       *      Put reference back and retry
+-                       *      when it's initialized.
++                       *      Actually remove object from cache.
+                        */
+ 
+-                      object->ref_count++;
+-                      vm_object_assert_wait(object,
+-                              VM_OBJECT_EVENT_INITIALIZED, FALSE);
+-                      vm_object_unlock(object);
+-                      vm_object_cache_unlock();
+-                      thread_block((void (*)()) 0);
+-                      continue;
++                      queue_remove(&vm_object_cached_list, object,
++                                      vm_object_t, cached_list);
++                      vm_object_cached_count--;
++
++                      assert(object->ref_count == 0);
++              }
++              else {
++                      if (object->pager_created &&
++                          !object->pager_initialized) {
++
++                              /*
++                               *      Have to wait for initialization.
++                               *      Put reference back and retry
++                               *      when it's initialized.
++                               */
++                              object->ref_count++;
++                              vm_object_assert_wait(object,
++                                      VM_OBJECT_EVENT_INITIALIZED, FALSE);
++                              vm_object_unlock(object);
++                              vm_object_cache_unlock();
++                              thread_block((void (*)()) 0);
++                              continue;
++                        }
+               }
+ 
+               /*
+@@ -514,6 +533,8 @@ void vm_object_deallocate(
+       }
+ }
+ 
++boolean_t     vm_object_terminate_remove_all = FALSE;
++
+ /*
+  *    Routine:        vm_object_terminate
+  *    Purpose:
+@@ -618,7 +639,6 @@ void vm_object_terminate(
+ 
+       assert(object->ref_count == 0);
+       assert(object->paging_in_progress == 0);
+-      assert(!object->cached);
+ 
+       /*
+        *      Throw away port rights... note that they may
+@@ -851,6 +871,28 @@ kern_return_t memory_object_destroy(
+ }
+ 
+ /*
++ *    vm_object_deactivate_pages
++ *
++ *    Deactivate all pages in the specified object.  (Keep its pages
++ *    in memory even though it is no longer referenced.)
++ *
++ *    The object must be locked.
++ */
++void vm_object_deactivate_pages(
++      register vm_object_t    object)
++{
++      register vm_page_t      p;
++
++      queue_iterate(&object->memq, p, vm_page_t, listq) {
++              vm_page_lock_queues();
++              if (!p->busy)
++                      vm_page_deactivate(p);
++              vm_page_unlock_queues();
++      }
++}
++
++
++/*
+  *    Routine:        vm_object_pmap_protect
+  *
+  *    Purpose:
+@@ -1804,8 +1846,12 @@ vm_object_t vm_object_lookup(
+ 
+                       assert(object->alive);
+ 
+-                      if (object->ref_count == 0)
+-                              vm_object_cache_remove(object);
++                      if (object->ref_count == 0) {
++                              queue_remove(&vm_object_cached_list, object,
++                                           vm_object_t, cached_list);
++                              vm_object_cached_count--;
++                              
vm_object_cached_pages_update(-object->resident_page_count);
++                      }
+ 
+                       object->ref_count++;
+                       vm_object_unlock(object);
+@@ -1832,8 +1878,12 @@ vm_object_t vm_object_lookup_name(
+ 
+                       assert(object->alive);
+ 
+-                      if (object->ref_count == 0)
+-                              vm_object_cache_remove(object);
++                      if (object->ref_count == 0) {
++                              queue_remove(&vm_object_cached_list, object,
++                                           vm_object_t, cached_list);
++                              vm_object_cached_count--;
++                              
vm_object_cached_pages_update(-object->resident_page_count);
++                      }
+ 
+                       object->ref_count++;
+                       vm_object_unlock(object);
+@@ -1865,8 +1915,12 @@ void vm_object_destroy(
+ 
+       object = (vm_object_t) pager->ip_kobject;
+       vm_object_lock(object);
+-      if (object->ref_count == 0)
+-              vm_object_cache_remove(object);
++      if (object->ref_count == 0) {
++              queue_remove(&vm_object_cached_list, object,
++                              vm_object_t, cached_list);
++              vm_object_cached_count--;
++              vm_object_cached_pages_update(-object->resident_page_count);
++      }
+       object->ref_count++;
+ 
+       object->can_persist = FALSE;
+@@ -2014,8 +2068,12 @@ restart:
+ 
+       if ((object != VM_OBJECT_NULL) && !must_init) {
+               vm_object_lock(object);
+-              if (object->ref_count == 0)
+-                      vm_object_cache_remove(object);
++              if (object->ref_count == 0) {
++                      queue_remove(&vm_object_cached_list, object,
++                                      vm_object_t, cached_list);
++                      vm_object_cached_count--;
++                      
vm_object_cached_pages_update(-object->resident_page_count);
++              }
+               object->ref_count++;
+               vm_object_unlock(object);
+ 
+@@ -2524,7 +2582,6 @@ void vm_object_collapse(
+                       );
+ 
+                       assert(backing_object->alive);
+-                      assert(!backing_object->cached);
+                       backing_object->alive = FALSE;
+                       vm_object_unlock(backing_object);
+ 
+@@ -2653,7 +2710,7 @@ void vm_object_page_remove(
+        *      It balances vm_object_lookup vs iteration.
+        */
+ 
+-      if (atop(end - start) < object->resident_page_count/16) {
++      if (atop(end - start) < (unsigned)object->resident_page_count/16) {
+               vm_object_page_remove_lookup++;
+ 
+               for (; start < end; start += PAGE_SIZE) {
+@@ -2880,7 +2937,7 @@ void vm_object_print(
+               (vm_offset_t) object, (vm_offset_t) object->size,
+               object->ref_count);
+       printf("\n");
+-      iprintf("%lu resident pages,", object->resident_page_count);
++      iprintf(" %d resident pages,", object->resident_page_count);
+        printf(" %d absent pages,", object->absent_count);
+        printf(" %d paging ops\n", object->paging_in_progress);
+       indent += 1;
+diff --git a/vm/vm_object.h b/vm/vm_object.h
+index eb8a0c2..71c8545 100644
+--- a/vm/vm_object.h
++++ b/vm/vm_object.h
+@@ -72,7 +72,7 @@ struct vm_object {
+                                                */
+ 
+       int                     ref_count;      /* Number of references */
+-      unsigned long           resident_page_count;
++      int                     resident_page_count;
+                                               /* number of resident pages */
+ 
+       struct vm_object        *copy;          /* Object that should receive
+@@ -148,9 +148,8 @@ struct vm_object {
+                                                */
+       /* boolean_t */         use_shared_copy : 1,/* Use shared (i.e.,
+                                                * delayed) copy on write */
+-      /* boolean_t */         shadowed: 1,    /* Shadow may exist */
++      /* boolean_t */         shadowed: 1;    /* Shadow may exist */
+ 
+-      /* boolean_t */         cached: 1;      /* Object is cached */
+       queue_chain_t           cached_list;    /* Attachment point for the list
+                                                * of objects cached as a result
+                                                * of their can_persist value
+@@ -170,7 +169,6 @@ vm_object_t        kernel_object;          /* the single 
kernel object */
+ 
+ extern void           vm_object_bootstrap(void);
+ extern void           vm_object_init(void);
+-extern void           vm_object_collect(vm_object_t);
+ extern void           vm_object_terminate(vm_object_t);
+ extern vm_object_t    vm_object_allocate(vm_size_t);
+ extern void           vm_object_reference(vm_object_t);
+@@ -292,10 +290,6 @@ vm_object_t vm_object_copy_delayed(
+  *    Routines implemented as macros
+  */
+ 
+-#define vm_object_collectable(object)                                 \
+-      (((object)->ref_count == 0)                                     \
+-      && ((object)->resident_page_count == 0))
+-
+ #define       vm_object_paging_begin(object)                                  
\
+       ((object)->paging_in_progress++)
+ 
+diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
+index 72f96cb..f06e8f8 100644
+--- a/vm/vm_pageout.c
++++ b/vm/vm_pageout.c
+@@ -748,12 +748,7 @@ void vm_pageout_scan(void)
+                   reclaim_page:
+                       vm_page_free(m);
+                       vm_page_unlock_queues();
+-
+-                      if (vm_object_collectable(object))
+-                              vm_object_collect(object);
+-                      else
+-                              vm_object_unlock(object);
+-
++                      vm_object_unlock(object);
+                       continue;
+               }
+ 
+diff --git a/vm/vm_resident.c b/vm/vm_resident.c
+index 79481a7..21ab570 100644
+--- a/vm/vm_resident.c
++++ b/vm/vm_resident.c
+@@ -372,7 +372,7 @@ void vm_page_insert(
+        */
+ 
+       object->resident_page_count++;
+-      assert(object->resident_page_count != 0);
++      assert(object->resident_page_count >= 0);
+ 
+       if (object->can_persist && (object->ref_count == 0))
+               vm_object_cached_pages_update(1);
+@@ -479,7 +479,7 @@ void vm_page_replace(
+        */
+ 
+       object->resident_page_count++;
+-      assert(object->resident_page_count != 0);
++      assert(object->resident_page_count >= 0);
+ 
+       if (object->can_persist && (object->ref_count == 0))
+               vm_object_cached_pages_update(1);
diff --git a/debian/patches/series b/debian/patches/series
index b804345..1d64b0f 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -4,3 +4,4 @@
 50_initrd.patch
 60_bigmem.patch
 70_dde.patch
+99_revert-page-cache-policy.patch

-- 
Alioth's /usr/local/bin/git-commit-notice on 
/srv/git.debian.org/git/pkg-hurd/gnumach.git



reply via email to

[Prev in Thread] Current Thread [Next in Thread]