diff options
author | David S. Miller <davem@davemloft.net> | 2013-07-23 02:31:37 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-07-23 02:31:37 -0700 |
commit | e7074e4c5edb0acaa979ea08e533736f906a9d68 (patch) | |
tree | 05d6ef7fb250464b8a46d556376f334c03c770a6 /nptl | |
parent | 1fe2988f523ddbad93ca7abc98fea982f2ae0505 (diff) | |
download | glibc-e7074e4c5edb0acaa979ea08e533736f906a9d68.tar glibc-e7074e4c5edb0acaa979ea08e533736f906a9d68.tar.gz glibc-e7074e4c5edb0acaa979ea08e533736f906a9d68.tar.bz2 glibc-e7074e4c5edb0acaa979ea08e533736f906a9d68.zip |
Increase nptl test case buffer size so we really block on current Linux kernels.
* tst-cancel4.c (WRITE_BUFFER_SIZE): Increase to 16384.
Diffstat (limited to 'nptl')
-rw-r--r-- | nptl/ChangeLog | 4 | ||||
-rw-r--r-- | nptl/tst-cancel4.c | 25 |
2 files changed, 28 insertions, 1 deletions
diff --git a/nptl/ChangeLog b/nptl/ChangeLog index fc2ef41a94..0e4747e789 100644 --- a/nptl/ChangeLog +++ b/nptl/ChangeLog @@ -1,3 +1,7 @@ +2013-07-22 David S. Miller <davem@davemloft.net> + + * tst-cancel4.c (WRITE_BUFFER_SIZE): Increase to 16384. + 2013-07-19 Dominik Vogt <vogt@de.ibm.com> * pthread_mutex_lock.c: Fix whitespace. diff --git a/nptl/tst-cancel4.c b/nptl/tst-cancel4.c index 9ffd5d1419..10b7c6e1b0 100644 --- a/nptl/tst-cancel4.c +++ b/nptl/tst-cancel4.c @@ -83,7 +83,30 @@ static pthread_barrier_t b2; # define IPC_ADDVAL 0 #endif -#define WRITE_BUFFER_SIZE 4096 +/* The WRITE_BUFFER_SIZE value needs to be choosen such that if we set + the socket send buffer size to '1', a write of this size on that + socket will block. + + The Linux kernel imposes a minimum send socket buffer size + which has changed over the years. Currently the value is: + + 2 * (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) + + which is attempting to make sure that with standard MTUs, + TCP can always queue up at least 2 full sized packets. + + Furthermore, there is logic in the socket send paths that + will allow one more packet (of any size) to be queued up as + long as some socket buffer space remains. Blocking only + occurs when we try to queue up a new packet and the send + buffer space has already been fully consumed. + + Therefore we must set this value to the largest possible value of + the formula above (and since it depends upon the size of "struct + sk_buff", it is dependent upon machine word size etc.) plus some + slack space. */ + +#define WRITE_BUFFER_SIZE 16384 /* Cleanup handling test. */ static int cl_called; |