summaryrefslogtreecommitdiff
path: root/net/sctp/outqueue.c
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <marcelo.leitner@gmail.com>2016-03-10 18:33:07 -0300
committerDavid S. Miller <davem@davemloft.net>2016-03-13 22:29:07 -0400
commitcea8768f333e3f0bc231d8b815aa4a9e63fa990c (patch)
tree60bf6ef493bdc7f9816f4c62f65b7aa106a9a0ce /net/sctp/outqueue.c
parent6f15cdbf8a8ac2e22767cc8b1eae225702733c95 (diff)
sctp: allow sctp_transmit_packet and others to use gfp
Currently sctp_sendmsg() triggers some calls that will allocate memory with GFP_ATOMIC even when not necessary. In the case of sctp_packet_transmit it will allocate a linear skb that will be used to construct the packet and this may cause sends to fail due to ENOMEM more often than anticipated specially with big MTUs. This patch thus allows it to inherit gfp flags from upper calls so that it can use GFP_KERNEL if it was triggered by a sctp_sendmsg call or similar. All others, like retransmits or flushes started from BH, are still allocated using GFP_ATOMIC. In netperf tests this didn't result in any performance drawbacks when memory is not too fragmented and made it trigger ENOMEM way less often. Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/outqueue.c')
-rw-r--r--net/sctp/outqueue.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c0380cfb16ae..f03541d0f12d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -68,7 +68,7 @@ static void sctp_mark_missing(struct sctp_outq *q,
static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
+static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
/* Add data to the front of the queue. */
static inline void sctp_outq_head_data(struct sctp_outq *q,
@@ -285,7 +285,7 @@ void sctp_outq_free(struct sctp_outq *q)
}
/* Put a new chunk in an sctp_outq. */
-int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
+int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
{
struct net *net = sock_net(q->asoc->base.sk);
int error = 0;
@@ -341,7 +341,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
return error;
if (!q->cork)
- error = sctp_outq_flush(q, 0);
+ error = sctp_outq_flush(q, 0, gfp);
return error;
}
@@ -510,7 +510,7 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
* will be flushed at the end.
*/
if (reason != SCTP_RTXR_FAST_RTX)
- error = sctp_outq_flush(q, /* rtx_timeout */ 1);
+ error = sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
if (error)
q->asoc->base.sk->sk_err = -error;
@@ -601,12 +601,12 @@ redo:
* control chunks are already freed so there
* is nothing we can do.
*/
- sctp_packet_transmit(pkt);
+ sctp_packet_transmit(pkt, GFP_ATOMIC);
goto redo;
}
/* Send this packet. */
- error = sctp_packet_transmit(pkt);
+ error = sctp_packet_transmit(pkt, GFP_ATOMIC);
/* If we are retransmitting, we should only
* send a single packet.
@@ -622,7 +622,7 @@ redo:
case SCTP_XMIT_RWND_FULL:
/* Send this packet. */
- error = sctp_packet_transmit(pkt);
+ error = sctp_packet_transmit(pkt, GFP_ATOMIC);
/* Stop sending DATA as there is no more room
* at the receiver.
@@ -632,7 +632,7 @@ redo:
case SCTP_XMIT_DELAY:
/* Send this packet. */
- error = sctp_packet_transmit(pkt);
+ error = sctp_packet_transmit(pkt, GFP_ATOMIC);
/* Stop sending DATA because of nagle delay. */
done = 1;
@@ -685,12 +685,12 @@ redo:
}
/* Cork the outqueue so queued chunks are really queued. */
-int sctp_outq_uncork(struct sctp_outq *q)
+int sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
{
if (q->cork)
q->cork = 0;
- return sctp_outq_flush(q, 0);
+ return sctp_outq_flush(q, 0, gfp);
}
@@ -703,7 +703,7 @@ int sctp_outq_uncork(struct sctp_outq *q)
* locking concerns must be made. Today we use the sock lock to protect
* this function.
*/
-static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
+static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
{
struct sctp_packet *packet;
struct sctp_packet singleton;
@@ -825,7 +825,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
sctp_packet_init(&singleton, transport, sport, dport);
sctp_packet_config(&singleton, vtag, 0);
sctp_packet_append_chunk(&singleton, chunk);
- error = sctp_packet_transmit(&singleton);
+ error = sctp_packet_transmit(&singleton, gfp);
if (error < 0)
return error;
break;
@@ -856,7 +856,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
case SCTP_CID_ASCONF:
case SCTP_CID_FWD_TSN:
status = sctp_packet_transmit_chunk(packet, chunk,
- one_packet);
+ one_packet, gfp);
if (status != SCTP_XMIT_OK) {
/* put the chunk back */
list_add(&chunk->list, &q->control_chunk_list);
@@ -1011,7 +1011,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
atomic_read(&chunk->skb->users) : -1);
/* Add the chunk to the packet. */
- status = sctp_packet_transmit_chunk(packet, chunk, 0);
+ status = sctp_packet_transmit_chunk(packet, chunk, 0, gfp);
switch (status) {
case SCTP_XMIT_PMTU_FULL:
@@ -1088,7 +1088,7 @@ sctp_flush_out:
send_ready);
packet = &t->packet;
if (!sctp_packet_empty(packet))
- error = sctp_packet_transmit(packet);
+ error = sctp_packet_transmit(packet, gfp);
/* Clear the burst limited state, if any */
sctp_transport_burst_reset(t);