gnugo-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[gnugo-devel] undo speed problem solved


From: Paul Pogonyshev
Subject: [gnugo-devel] undo speed problem solved
Date: Wed, 16 Apr 2003 23:09:23 -0400
User-agent: KMail/1.5.9

- hashtable_clear() doesn't clear already clean parts of the table
- `stackp' pseudofield in Read_result renamed `remaining_depth'

this patch makes undoing nearly instant. more than that, it must
speed up regression somewhat. for instance, gnu go now takes less
than 3 seconds to pass through reading.tst :)

on the whole, the patch must reduce the time spent in
hashtable_clear() about two times. that's because it is called
when `loadsgf' is encountered and then again when `gg_genmove'
follows it. with the patch the second call will return immediatly.

in light-reading test cases (like reading.tst and probably all other
non-global-position-valuation tests) time is saved because
hashtable_clear() now only clears "dirty" part of table (but if
hashtable_partially_clear() has been called at least once, we can do
nothing but clear the whole table since it is unknown where the still
allocated nodes and results are scattered).

i fixed a fixme in cache.h and renamed `stackp' to `remaining_depth'
since it is more logical. also added a couple of macros in cache.h
(rr_is_*) and used them in cache.c.

no regression changes of course.

Paul

Index: engine/cache.c
===================================================================
RCS file: /cvsroot/gnugo/gnugo/engine/cache.c,v
retrieving revision 1.21
diff -u -p -r1.21 cache.c
--- engine/cache.c      5 Mar 2003 18:52:56 -0000       1.21
+++ engine/cache.c      16 Apr 2003 20:04:39 -0000
@@ -52,7 +52,7 @@ static Read_result *hashnode_new_result(
 
 static void hashnode_unlink_closed_results(Hashnode *node, 
                                           int exclusions, 
-                                          unsigned int stackplimit,
+                                          unsigned int remaining_depth_limit,
                                           int statistics[][20]);
 static void hashtable_partially_clear(Hashtable *table);
 static int do_get_read_result(int routine, int komaster, int kom_pos,
@@ -73,7 +73,7 @@ read_result_dump(Read_result *result, FI
          rr_get_routine(*result),
          I(rr_get_str(*result)),
          J(rr_get_str(*result)),
-         rr_get_stackp(*result));
+         rr_get_remaining_depth(*result));
   fprintf(outfile, "Result: %u %u, (%d, %d)\n",
          rr_get_status(*result),
          rr_get_result(*result),
@@ -232,7 +232,8 @@ hashtable_init(Hashtable *table,
   }
   table->result_limit = table->all_results + num_results;
 
-  /* Initialize the table and all nodes to the empty state . */
+  /* Force complete table clearing. */
+  table->first_pass = 0;
   hashtable_clear(table);
 
   return 1;
@@ -277,34 +278,45 @@ hashtable_clear(Hashtable *table)
 {
   int bucket;
   Hashnode *node;
+  Hashnode *node_limit;
   Read_result *result;
+  Read_result *result_limit;
 
   if (!table)
     return;
 
+  /* If the table is alredy clean, return immediatly. */
+  if (table->first_pass && table->free_node == table->all_nodes)
+    return;
+
   /* Initialize all hash buckets to the empty list. */
   for (bucket = 0; bucket < table->hashtablesize; ++bucket)
     table->hashtable[bucket] = NULL;
 
-  /* Mark all nodes as free. */
+  /* Mark all nodes as free. Don't clean non-allocated nodes. */
+  node_limit = table->first_pass ? table->free_node : table->node_limit;
   table->free_node = table->all_nodes;
-  for (node = table->all_nodes; node < table->node_limit; node++)
+  for (node = table->all_nodes; node < node_limit; node++)
     node->results = NULL;
 
-  /* Mark all read_results as free. */
+  /* Mark all read_results as free. Don't clean non-allocated results. */
+  result_limit = table->first_pass ? table->free_result : table->result_limit;
   table->free_result = table->all_results;
-  for (result = table->all_results; result < table->result_limit; result++)
+  for (result = table->all_results; result < result_limit; result++)
     result->data2 = 0;
+
+  table->first_pass = 1;
 }
 
 
 /* Unlink all closed results except for those which has `routine' value marked
- * in `exceptions' or large enough `stackp' from the linked list of results at
- * a node. It is assumed that the node contains at least one result.
+ * in `exceptions' or large enough `remaining_depth' from the linked list of
+ * results at a node. It is assumed that the node contains at least one result.
  */
 static void
 hashnode_unlink_closed_results(Hashnode *node, 
-                              int exclusions, unsigned int stackplimit,
+                              int exclusions,
+                              unsigned int remaining_depth_limit,
                               int statistics[][20])
 {
   Read_result *result = node->results;
@@ -312,11 +324,11 @@ hashnode_unlink_closed_results(Hashnode 
 
   /* Traverse all node results. */
   do {
-    unsigned int result_stackp = depth - rr_get_stackp(*result);
+    unsigned int result_remaining_depth = rr_get_remaining_depth(*result);
     int result_routine = rr_get_routine(*result);
 
     if (debug & DEBUG_READING_PERFORMANCE) {
-      int stat_stackp = result_stackp;
+      int stat_stackp = depth - result_remaining_depth;
 
       if (stat_stackp > 19)
        stat_stackp = 19;
@@ -324,12 +336,12 @@ hashnode_unlink_closed_results(Hashnode 
        stat_stackp = 0;
 
       gg_assert(result_routine >= 0 && result_routine < NUM_ROUTINES);
-      statistics[result_routine][result_stackp]++;
+      statistics[result_routine][stat_stackp]++;
     }
 
-    if (rr_get_status(*result) == 2    /* Closed. */
-       && ((1 << result_routine) & exclusions) == 0
-       && result_stackp >= stackplimit) {
+    if (rr_is_closed(*result)
+       && result_remaining_depth <= remaining_depth_limit
+       && ((1 << result_routine) & exclusions) == 0) {
       /* Unlink the result and mark it as free. */
       *link = result->next;
       result->data2 = 0;
@@ -360,6 +372,7 @@ hashtable_partially_clear(Hashtable *tab
 {
   int k, l;
   Hashnode *node;
+  const int remaining_depth_limit = depth - 3;
 
   int statistics[NUM_ROUTINES][20];
 
@@ -387,7 +400,7 @@ hashtable_partially_clear(Hashtable *tab
      */
     hashnode_unlink_closed_results(node, 
                                   (1 << OWL_ATTACK | 1 << OWL_DEFEND
-                                   | 1 << SEMEAI), 3,
+                                   | 1 << SEMEAI), remaining_depth_limit,
                                   statistics);
 
     if (node->results == NULL) {
@@ -444,6 +457,8 @@ hashtable_partially_clear(Hashtable *tab
    */
   table->free_node = table->all_nodes;
   table->free_result = table->all_results;
+
+  table->first_pass = 0;
 }
 
 
@@ -565,7 +580,7 @@ hashnode_new_result(Hashtable *table, Ha
 
   /* If the first result is not free, skip until we find one which is free. */
   while (table->free_result < table->result_limit
-        && rr_get_status(*(table->free_result)) != 0)
+        && !rr_is_free(*(table->free_result)))
     table->free_result++;
   
   if (table->free_result == table->result_limit) {
@@ -582,7 +597,7 @@ hashnode_new_result(Hashtable *table, Ha
     }
 
     while (table->free_result < table->result_limit
-          && rr_get_status(*(table->free_result)) != 0)
+          && !rr_is_free(*(table->free_result)))
       table->free_result++;
 
     if (table->free_result == table->result_limit) {
Index: engine/cache.h
===================================================================
RCS file: /cvsroot/gnugo/gnugo/engine/cache.h,v
retrieving revision 1.26
diff -u -p -r1.26 cache.h
--- engine/cache.h      5 Mar 2003 18:52:56 -0000       1.26
+++ engine/cache.h      16 Apr 2003 20:04:40 -0000
@@ -50,14 +50,11 @@
  * The data1 field packs into 32 bits the following
  * fields:
  *
- * komaster:  3 bits (EMPTY, BLACK, WHITE, or GRAY)
- * kom_pos : 10 bits (allows MAX_BOARD up to 31)
- * routine :  4 bits (currently 10 different choices)
- * str1    : 10 bits
- * stackp  :  5 bits (actually remaining depth, depth - stackp)
- *
- * FIXME: Rename stackp to something like remaining_depth at some
- *        appropriate time.
+ * komaster       :  3 bits (EMPTY, BLACK, WHITE, or GRAY)
+ * kom_pos        : 10 bits (allows MAX_BOARD up to 31)
+ * routine        :  4 bits (currently 10 different choices)
+ * str1           : 10 bits
+ * remaining_depth :  5 bits (depth - stackp)
  *
  * The data2 field packs into 32 bits the following
  * fields:
@@ -84,18 +81,18 @@ typedef struct read_result_t {
 #define RR_STATUS_CLOSED       (2 << 28)
 
 /* Get parts of a Read_result identifying the input data. */
-#define rr_get_komaster(rr)   (((rr).data1  >> 29) & 0x07)
-#define rr_get_kom_pos(rr)    (((rr).data1  >> 19) & 0x3ff)
-#define rr_get_routine(rr)    (((rr).data1  >> 15) & 0x0f)
-#define rr_get_str1(rr)       (((rr).data1  >>  5) & 0x3ff)
-#define rr_get_stackp(rr)     (((rr).data1  >>  0) & 0x1f)
-#define rr_get_str2(rr)       (((rr).data2  >>  0) & 0x3ff)
-#define rr_get_str(rr)        rr_get_str1(rr)
+#define rr_get_komaster(rr)        (((rr).data1  >> 29) & 0x07)
+#define rr_get_kom_pos(rr)         (((rr).data1  >> 19) & 0x3ff)
+#define rr_get_routine(rr)         (((rr).data1  >> 15) & 0x0f)
+#define rr_get_str1(rr)            (((rr).data1  >>  5) & 0x3ff)
+#define rr_get_remaining_depth(rr)  (((rr).data1  >>  0) & 0x1f)
+#define rr_get_str2(rr)            (((rr).data2  >>  0) & 0x3ff)
+#define rr_get_str(rr)             rr_get_str1(rr)
 
 /* Set corresponding parts. */
-#define rr_input_data1(routine, komaster, kom_pos, str1, stackp) \
+#define rr_input_data1(routine, komaster, kom_pos, str1, remaining_depth) \
        (((((((((komaster) << 10) | (kom_pos)) << 4) \
-         | (routine)) << 10) | (str1)) << 5) | (stackp))
+         | (routine)) << 10) | (str1)) << 5) | (remaining_depth))
 #define rr_input_data2(str2) (str2) \
 
 /* Get parts of a Read_result constituting the result of a search. */
@@ -105,6 +102,11 @@ typedef struct read_result_t {
 #define rr_get_move(rr)        (((rr).data2 >> 10) & 0x3ff)
 #define rr_get_result(rr)      rr_get_result1(rr)
 
+#define rr_is_free(rr)        (((rr).data2 \
+                               & (RR_STATUS_OPEN | RR_STATUS_CLOSED)) == 0)
+#define rr_is_open(rr)        (((rr).data2 & RR_STATUS_OPEN) != 0)
+#define rr_is_closed(rr)       (((rr).data2 & RR_STATUS_CLOSED) != 0)
+
 /* Set corresponding parts. Closes the result entry. */
 #define rr_set_result_move(rr, result, move) \
        (rr).data2 = (((rr).data2 & 0x3ff) | RR_STATUS_CLOSED \
@@ -156,6 +158,12 @@ typedef struct hashtable {
   Read_result  *all_results;   /* Pointer to all allocated results. */
   Read_result  *result_limit;  /* Pointer to the enf of all_results[] array. */
   Read_result  *free_result;   /* Pointer to the first free result. */
+
+  /* True if hashtable_partially_clear() hasn't been called yet. In this case
+   * allocated nodes and results (if any) go continuosly in memory which
+   * simplifies clearing.
+   */
+  int           first_pass;
 } Hashtable;
 
 





reply via email to

[Prev in Thread] Current Thread [Next in Thread]