gnunet-svn
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[GNUnet-SVN] r5207 - GNUnet/src/server


From: gnunet
Subject: [GNUnet-SVN] r5207 - GNUnet/src/server
Date: Sun, 1 Jul 2007 18:40:01 -0600 (MDT)

Author: grothoff
Date: 2007-07-01 18:40:01 -0600 (Sun, 01 Jul 2007)
New Revision: 5207

Modified:
   GNUnet/src/server/connection.c
   GNUnet/src/server/handler.c
Log:
increase message size, reduce send frequency

Modified: GNUnet/src/server/connection.c
===================================================================
--- GNUnet/src/server/connection.c      2007-07-02 00:36:33 UTC (rev 5206)
+++ GNUnet/src/server/connection.c      2007-07-02 00:40:01 UTC (rev 5207)
@@ -62,7 +62,7 @@
 
 /* tuning parameters */
 
-#define DEBUG_CONNECTION YES
+#define DEBUG_CONNECTION NO
 
 /**
  * output knapsack priorities into a file?
@@ -871,36 +871,39 @@
  * <p>
  *
  * @param priority the highest priority of contents in the packet
+ * @param overhead how much is the header-overhead? 1 for just
+ *                 the header, 2 for header is 50%, 3 for header is 33%, etc.
+ *                 (the higher the better)
  * @return OK if the packet should be handled, SYSERR if the packet should be 
dropped.
  */
-static int outgoingCheck(unsigned int priority) {
+static int outgoingCheck(unsigned int priority,
+                        unsigned int overhead) {
   int load;
   unsigned int delta;
 
   load = os_network_monitor_get_load(load_monitor, Upload);  /* how much free 
bandwidth do we have? */
-  if(load >= 150) {
-    return SYSERR;              /* => always drop */
-  }
-  if(load > 100) {
-    if(priority >= EXTREME_PRIORITY) {
+  if (load >= 150) 
+    return SYSERR;              /* => always drop */  
+  if (load > 100) {
+    if (priority >= EXTREME_PRIORITY) 
       return OK;                /* allow administrative msgs */
-    }
-    else {
-      return SYSERR;            /* but nothing else */
-    }
+    else 
+      return SYSERR;            /* but nothing else */    
   }
-  if(load <= 50) {              /* everything goes */
-    return OK;                  /* allow */
-  }
-  /* Now load in [51, 100].  Between 51% and 100% load:
+  if (overhead > 50)
+    overhead = 50; /* bound */
+  if (load <= overhead) 
+    return OK;     
+  /* Suppose overhead = 50, then:
+     Now load in [51, 100].  Between 51% and 100% load:
      at 51% require priority >= 1 = (load-50)^3
      at 52% require priority >= 8 = (load-50)^3
      at 75% require priority >= 15626 = (load-50)^3
      at 100% require priority >= 125000 = (load-50)^3
      (cubic function)
    */
-  delta = load - 50;            /* now delta is in [1,50] with 50 == 100% load 
*/
-  if(delta * delta * delta > priority) {
+  delta = load - overhead;            /* now delta is in [1,50] with 50 == 
100% load */
+  if (delta * delta * delta > priority) {
 #if DEBUG_POLICY
     GE_LOG(ectx,
           GE_DEBUG | GE_REQUEST | GE_USER,
@@ -911,8 +914,7 @@
           delta * delta * delta);
 #endif
     return SYSERR;              /* drop */
-  }
-  else {
+  } else {
 #if DEBUG_POLICY
     GE_LOG(ectx,
           GE_DEBUG | GE_REQUEST | GE_USER,
@@ -934,25 +936,35 @@
  */
 static int checkSendFrequency(BufferEntry * be) {
   cron_t msf;
+  int load;
+  unsigned int i;
 
+  for (i=0;i<be->sendBufferSize;i++)
+    if (be->sendBuffer[i]->pri >= EXTREME_PRIORITY)
+      return OK;
+
   if (be->max_bpm == 0)
     be->max_bpm = 1;
 
   if (be->session.mtu == 0) {
     msf =    /* ms per message */
-      EXPECTED_MTU / (be->max_bpm * cronMINUTES / cronMILLIS) /* bytes per ms 
*/
-      /2;
+      EXPECTED_MTU / (be->max_bpm * cronMINUTES / cronMILLIS); /* bytes per ms 
*/
   } else {
     msf =    /* ms per message */
       be->session.mtu           /* byte per message */
-      / (be->max_bpm * cronMINUTES / cronMILLIS)  /* bytes per ms */
-      / 2;                       /* some head-room */
+      / (be->max_bpm * cronMINUTES / cronMILLIS);  /* bytes per ms */
   }
   /* Also: allow at least 2 * MINIMUM_SAMPLE_COUNT knapsack
      solutions for any MIN_SAMPLE_TIME! */
   if (msf > 2 * MIN_SAMPLE_TIME / MINIMUM_SAMPLE_COUNT)
     msf = 2 * MIN_SAMPLE_TIME / MINIMUM_SAMPLE_COUNT;
-
+  load = os_cpu_get_load(ectx, cfg);
+  if (load == -1)
+    load = 50;
+  /* adjust send frequency; if load is smaller
+     than 25%, decrease frequency, otherwise
+     increase it (quadratically)! */
+  msf = msf * load * load / 25 / 25;
   if (be->lastSendAttempt + msf > get_time()) {
 #if DEBUG_CONNECTION
     GE_LOG(ectx,
@@ -1469,7 +1481,6 @@
     return NO;                     /* must not run */
   }
   be->inSendBuffer = YES;
-
   if ( (OK != ensureTransportConnected(be)) ||
        (OK != checkSendFrequency(be)) ){
     be->inSendBuffer = NO;
@@ -1518,7 +1529,8 @@
   /* check if we (sender) have enough bandwidth available
      if so, trigger callbacks on selected entries; if either
      fails, return (but clean up garbage) */
-  if ( (SYSERR == outgoingCheck(priority)) ||
+  if ( (SYSERR == outgoingCheck(priority,
+                               totalMessageSize / sizeof(P2P_PACKET_HEADER))) 
||
        (0 == prepareSelectedMessages(be)) ) {
     expireSendBufferEntries(be);
     be->inSendBuffer = NO;

Modified: GNUnet/src/server/handler.c
===================================================================
--- GNUnet/src/server/handler.c 2007-07-02 00:36:33 UTC (rev 5206)
+++ GNUnet/src/server/handler.c 2007-07-02 00:40:01 UTC (rev 5207)
@@ -37,6 +37,18 @@
 #define DEBUG_HANDLER NO
 
 /**
+ * Track how many messages we are discarding?
+ */
+#define TRACK_DISCARD YES
+
+/**
+ * Track how much time was spent on each
+ * type of message?
+ */
+#define MEASURE_TIME YES
+
+
+/**
  * How many incoming packages do we have in the buffer
  * (max.). Must be >= THREAD_COUNT to make sense.
  */
@@ -107,8 +119,6 @@
 
 static struct GE_Context * ectx;
 
-#define MEASURE_TIME YES
-
 #if MEASURE_TIME
 static cron_t time_by_type[P2P_PROTO_MAX_USED];
 static unsigned int count_by_type[P2P_PROTO_MAX_USED];
@@ -586,6 +596,12 @@
  * (receive implementation).
  */
 void core_receive(P2P_PACKET * mp) {
+#if TRACK_DISCARD
+  static unsigned int discarded;
+  static unsigned int blacklisted;
+  static unsigned int accepted;
+#endif
+
   if ( (threads_running == NO) ||
        (mainShutdownSignal != NULL) ||
        (SYSERR == SEMAPHORE_DOWN(bufferQueueWrite_, NO)) ) {
@@ -597,6 +613,13 @@
           mp->size);
     FREE(mp->msg);
     FREE(mp);
+#if TRACK_DISCARD
+    if (globalLock_ != NULL)
+      MUTEX_LOCK(globalLock_);
+    discarded++;
+    if (globalLock_ != NULL)
+      MUTEX_UNLOCK(globalLock_);
+#endif
     return;
   }
   /* check for blacklisting */
@@ -610,9 +633,11 @@
           GE_DEBUG | GE_DEVELOPER | GE_REQUEST,
           "Strictly blacklisted peer `%s' sent message, dropping for now.\n",
           (char*)&enc);
-    if (OK == getBandwidthAssignedTo(&mp->sender, NULL, NULL)) {
-      abort();
-    }
+#if TRACK_DISCARD
+    MUTEX_LOCK(globalLock_);
+    blacklisted++;
+    MUTEX_UNLOCK(globalLock_);
+#endif
     FREE(mp->msg);
     FREE(mp);
     return;
@@ -635,6 +660,17 @@
   if (bq_firstFree_ == QUEUE_LENGTH)
     bq_firstFree_ = 0;
   bufferQueue_[bq_firstFree_++] = mp;
+#if TRACK_DISCARD
+  accepted++;
+  if (0 == accepted % 64)
+    GE_LOG(ectx,
+          GE_DEBUG | GE_DEVELOPER | GE_REQUEST,
+          "Accepted: %u discarded: %u blacklisted: %u, ratio: %f\n",
+          accepted,
+          discarded,
+          blacklisted,
+          1.0 * accepted / (blacklisted + discarded + 1)); 
+#endif
   MUTEX_UNLOCK(globalLock_);
   SEMAPHORE_UP(bufferQueueRead_);
 }
@@ -755,12 +791,13 @@
   for (i=0;i<P2P_PROTO_MAX_USED;i++) {
     if (count_by_type[i] == 0)
       continue;
-    fprintf(stderr,
-           "%10u msgs of type %2u took %16llu ms (%llu on average)\n",
-           count_by_type[i],
-           i,
-           time_by_type[i],
-           time_by_type[i] / count_by_type[i]);
+    GE_LOG(ectx,
+          GE_DEBUG | GE_DEVELOPER | GE_REQUEST,
+          "%10u msgs of type %2u took %16llu ms (%llu on average)\n",
+          count_by_type[i],
+          i,
+          time_by_type[i],
+          time_by_type[i] / count_by_type[i]);
   }         
 #endif
 }





reply via email to

[Prev in Thread] Current Thread [Next in Thread]