#include <net/sock.h>
+#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
/* XXX For now we're using only 48 bits sequence numbers */
const int dccp_header_size = sizeof(*dh) +
sizeof(struct dccp_hdr_ext) +
- dccp_packet_hdr_len(dcb->dccpd_type);
+ dccp_packet_hdr_len(dcb->dccpd_type);
int err, set_ack = 1;
u64 ackno = dp->dccps_gsr;
- /*
- * FIXME: study DCCP_PKT_SYNC[ACK] to see what is the right thing
- * to do here...
- */
dccp_inc_seqno(&dp->dccps_gss);
- dcb->dccpd_seq = dp->dccps_gss;
- dccp_insert_options(sk, skb);
-
switch (dcb->dccpd_type) {
case DCCP_PKT_DATA:
set_ack = 0;
ackno = dcb->dccpd_seq;
break;
}
+
+ dcb->dccpd_seq = dp->dccps_gss;
+ dccp_insert_options(sk, skb);
skb->h.raw = skb_push(skb, dccp_header_size);
dh = dccp_hdr(skb);
- /* Data packets are not cloned as they are never retransmitted */
- if (skb_cloned(skb))
+
+ if (!skb->sk)
skb_set_owner_w(skb, sk);
/* Build DCCP header and checksum it. */
switch (dcb->dccpd_type) {
case DCCP_PKT_REQUEST:
- dccp_hdr_request(skb)->dccph_req_service = dcb->dccpd_service;
+ dccp_hdr_request(skb)->dccph_req_service =
+ dp->dccps_service;
break;
case DCCP_PKT_RESET:
- dccp_hdr_reset(skb)->dccph_reset_code = dcb->dccpd_reset_code;
+ dccp_hdr_reset(skb)->dccph_reset_code =
+ dcb->dccpd_reset_code;
break;
}
dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr,
inet->daddr);
- if (dcb->dccpd_type == DCCP_PKT_ACK ||
- dcb->dccpd_type == DCCP_PKT_DATAACK)
+ if (set_ack)
dccp_event_ack_sent(sk);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
err = ip_queue_xmit(skb, 0);
if (err <= 0)
return err;
int mss_now;
/*
- * FIXME: we really should be using the af_specific thing to support IPv6.
- * mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
+ * FIXME: we really should be using the af_specific thing to support
+ * IPv6.
+ * mss_now = pmtu - tp->af_specific->net_header_len -
+ * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
*/
- mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext);
+ mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) -
+ sizeof(struct dccp_hdr_ext);
/* Now subtract optional transport overhead */
mss_now -= dp->dccps_ext_header_len;
return mss_now;
}
-int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, const int len)
+void dccp_write_space(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
+ /* Should agree with poll, otherwise some programs break */
+ if (sock_writeable(sk))
+ sk_wake_async(sk, 2, POLL_OUT);
+
+ read_unlock(&sk->sk_callback_lock);
+}
+
+/**
+ * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet
+ * @sk: socket to wait for
+ * @timeo: for how long
+ */
+static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb,
+ long *timeo)
+{
+ struct dccp_sock *dp = dccp_sk(sk);
+ DEFINE_WAIT(wait);
+ long delay;
+ int rc;
+
+ while (1) {
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+
+ if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ goto do_error;
+ if (!*timeo)
+ goto do_nonblock;
+ if (signal_pending(current))
+ goto do_interrupted;
+
+ rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
+ skb->len);
+ if (rc <= 0)
+ break;
+ delay = msecs_to_jiffies(rc);
+ if (delay > *timeo || delay < 0)
+ goto do_nonblock;
+
+ sk->sk_write_pending++;
+ release_sock(sk);
+ *timeo -= schedule_timeout(delay);
+ lock_sock(sk);
+ sk->sk_write_pending--;
+ }
+out:
+ finish_wait(sk->sk_sleep, &wait);
+ return rc;
+
+do_error:
+ rc = -EPIPE;
+ goto out;
+do_nonblock:
+ rc = -EAGAIN;
+ goto out;
+do_interrupted:
+ rc = sock_intr_errno(*timeo);
+ goto out;
+}
+
+int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
{
const struct dccp_sock *dp = dccp_sk(sk);
- int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, len);
+ int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb,
+ skb->len);
+
+ if (err > 0)
+ err = dccp_wait_for_ccid(sk, skb, timeo);
if (err == 0) {
- const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
+ const int len = skb->len;
if (sk->sk_state == DCCP_PARTOPEN) {
/* See 8.1.5. Handshake Completion */
inet_csk(sk)->icsk_rto,
DCCP_RTO_MAX);
dcb->dccpd_type = DCCP_PKT_DATAACK;
- /*
- * FIXME: we really should have a
- * dccps_ack_pending or use icsk.
- */
- } else if (inet_csk_ack_scheduled(sk) ||
- (dp->dccps_options.dccpo_send_ack_vector &&
- ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 &&
- ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1))
+ } else if (dccp_ack_pending(sk))
dcb->dccpd_type = DCCP_PKT_DATAACK;
else
dcb->dccpd_type = DCCP_PKT_DATA;
err = dccp_transmit_skb(sk, skb);
ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
- }
+ } else
+ kfree_skb(skb);
return err;
}
struct request_sock *req)
{
struct dccp_hdr *dh;
+ struct dccp_request_sock *dreq;
const int dccp_header_size = sizeof(struct dccp_hdr) +
sizeof(struct dccp_hdr_ext) +
sizeof(struct dccp_hdr_response);
skb->dst = dst_clone(dst);
skb->csum = 0;
+ dreq = dccp_rsk(req);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
- DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss;
+ DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss;
dccp_insert_options(sk, skb);
skb->h.raw = skb_push(skb, dccp_header_size);
dh->dccph_sport = inet_sk(sk)->sport;
dh->dccph_dport = inet_rsk(req)->rmt_port;
- dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
+ dh->dccph_doff = (dccp_header_size +
+ DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
dh->dccph_type = DCCP_PKT_RESPONSE;
dh->dccph_x = 1;
- dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss);
- dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr);
+ dccp_hdr_set_seq(dh, dreq->dreq_iss);
+ dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
+ dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr,
inet_rsk(req)->rmt_addr);
dh->dccph_sport = inet_sk(sk)->sport;
dh->dccph_dport = inet_sk(sk)->dport;
- dh->dccph_doff = (dccp_header_size + DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
+ dh->dccph_doff = (dccp_header_size +
+ DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
dh->dccph_type = DCCP_PKT_RESET;
dh->dccph_x = 1;
dccp_hdr_set_seq(dh, dp->dccps_gss);
skb_reserve(skb, MAX_DCCP_HEADER);
DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
- /* FIXME: set service to something meaningful, coming
- * from userspace*/
- DCCP_SKB_CB(skb)->dccpd_service = 0;
skb->csum = 0;
skb_set_owner_w(skb, sk);
if (skb == NULL) {
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX, TCP_RTO_MAX);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ TCP_DELACK_MAX,
+ DCCP_RTO_MAX);
return;
}
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
-void dccp_send_sync(struct sock *sk, u64 seq)
+void dccp_send_sync(struct sock *sk, const u64 seq,
+ const enum dccp_pkt_type pkt_type)
{
/*
* We are not putting this on the write queue, so
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, MAX_DCCP_HEADER);
skb->csum = 0;
- DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_SYNC;
+ DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
DCCP_SKB_CB(skb)->dccpd_seq = seq;
skb_set_owner_w(skb, sk);
dccp_transmit_skb(sk, skb);
}
-/* Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under any circumstances.
+/*
+ * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
+ * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
+ * any circumstances.
*/
-void dccp_send_close(struct sock *sk)
+void dccp_send_close(struct sock *sk, const int active)
{
struct dccp_sock *dp = dccp_sk(sk);
struct sk_buff *skb;
+ const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = alloc_skb(sk->sk_prot->max_header, GFP_KERNEL);
- if (skb != NULL)
- break;
- yield();
- }
+ skb = alloc_skb(sk->sk_prot->max_header, prio);
+ if (skb == NULL)
+ return;
/* Reserve space for headers and prepare control bits. */
skb_reserve(skb, sk->sk_prot->max_header);
skb->csum = 0;
- DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
+ DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
+ DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;
skb_set_owner_w(skb, sk);
- dccp_transmit_skb(sk, skb);
-
- ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk);
- ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk);
+ if (active) {
+ BUG_TRAP(sk->sk_send_head == NULL);
+ sk->sk_send_head = skb;
+ dccp_transmit_skb(sk, skb_clone(skb, prio));
+ } else
+ dccp_transmit_skb(sk, skb);
}