summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 6667ac0)
raw | patch | inline | side by side (parent: 6667ac0)
author | Florian Forster <octo@huhu.verplant.org> | |
Sat, 17 Jan 2009 10:19:08 +0000 (11:19 +0100) | ||
committer | Florian Forster <octo@huhu.verplant.org> | |
Sat, 17 Jan 2009 10:19:08 +0000 (11:19 +0100) |
On very busy systems, the thread apparently may not be scheduled often enough.
So the receive buffer fills up quickly and data may be lost.
This patch changes the `mutex_lock' to a `mutex_trylock' and data is only
appended to the global receive-queue if the lock can be obtained without
blocking.
If the lock cannot be obtained without blocking, the data is instead appended
to a private queue and that queue is appended to the global queue when the lock
can next be taken.
So the receive buffer fills up quickly and data may be lost.
This patch changes the `mutex_lock' to a `mutex_trylock' and data is only
appended to the global receive-queue if the lock can be obtained without
blocking.
If the lock cannot be obtained without blocking, the data is instead appended
to a private queue and that queue is appended to the global queue when the lock
can next be taken.
src/network.c | patch | blob | history |
diff --git a/src/network.c b/src/network.c
index 9e391bb2dfd7c91b1cfd641fa5db40ea4da18b01..34f89d96a39898cc4e27eb98024245fd686d1a65 100644 (file)
--- a/src/network.c
+++ b/src/network.c
int i;
int status;
+ receive_list_entry_t *private_list_head;
+ receive_list_entry_t *private_list_tail;
+
if (listen_sockets_num == 0)
network_add_listen_socket (NULL, NULL);
return (-1);
}
+ private_list_head = NULL;
+ private_list_tail = NULL;
+
while (listen_loop == 0)
{
status = poll (listen_sockets, listen_sockets_num, -1);
ERROR ("network plugin: malloc failed.");
return (-1);
}
- memset (ent, '\0', sizeof (receive_list_entry_t));
+ memset (ent, 0, sizeof (receive_list_entry_t));
+ ent->next = NULL;
/* Hopefully this be optimized out by the compiler. It
* might help prevent stupid bugs in the future though.
memcpy (ent->data, buffer, buffer_len);
ent->data_len = buffer_len;
- pthread_mutex_lock (&receive_list_lock);
- if (receive_list_head == NULL)
- {
- receive_list_head = ent;
- receive_list_tail = ent;
- }
+ if (private_list_head == NULL)
+ private_list_head = ent;
else
+ private_list_tail->next = ent;
+ private_list_tail = ent;
+
+ /* Do not block here. Blocking here has led to
+ * insufficient performance in the past. */
+ if (pthread_mutex_trylock (&receive_list_lock) == 0)
{
- receive_list_tail->next = ent;
- receive_list_tail = ent;
+ if (receive_list_head == NULL)
+ receive_list_head = private_list_head;
+ else
+ receive_list_tail->next = private_list_head;
+ receive_list_tail = private_list_tail;
+
+ private_list_head = NULL;
+ private_list_tail = NULL;
+
+ pthread_cond_signal (&receive_list_cond);
+ pthread_mutex_unlock (&receive_list_lock);
}
- pthread_cond_signal (&receive_list_cond);
- pthread_mutex_unlock (&receive_list_lock);
} /* for (listen_sockets) */
} /* while (listen_loop == 0) */
+ /* Make sure everything is dispatched before exiting. */
+ if (private_list_head != NULL)
+ {
+ pthread_mutex_lock (&receive_list_lock);
+
+ if (receive_list_head == NULL)
+ receive_list_head = private_list_head;
+ else
+ receive_list_tail->next = private_list_head;
+ receive_list_tail = private_list_tail;
+
+ private_list_head = NULL;
+ private_list_tail = NULL;
+
+ pthread_cond_signal (&receive_list_cond);
+ pthread_mutex_unlock (&receive_list_lock);
+ }
+
return (0);
}