type
stringclasses
5 values
content
stringlengths
9
163k
functions
else if(rq->bRequest == USBRQ_CLEAR_FEATURE || rq->bRequest == USBRQ_SET_FEATURE){ /* 1|3 */ if(rq->wValue.bytes[0] == 0 && rq->wIndex.bytes[0] == 0x81){ /* feature 0 == HALT for endpoint == 1 */ usbTxLen1 = rq->bRequest == USBRQ_CLEAR_FEATURE ? USBPID_NAK : USBPID_STALL; USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */ # if USB_CFG_HAVE_INTRIN_ENDPOINT3 USB_SET_DATATOKEN3(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */ # endif }
functions
USB_CFG_IMPLEMENT_FN_WRITE if(replyLen == 0xff){ /* use user-supplied read/write function */ if((rq->bmRequestType & USBRQ_DIR_MASK) == USBRQ_DIR_DEVICE_TO_HOST){ replyLen = rq->wLength.bytes[0]; /* IN transfers only */ }
functions
else if(rval != 0){ /* This was the final package */ replyLen = 0; /* answer with a zero-sized data packet */ }
functions
void usbBuildTxBlock(void) { uchar wantLen, len, txLen, token; wantLen = usbMsgLen; if(wantLen > 8) wantLen = 8; usbMsgLen -= wantLen; token = USBPID_DATA1; if(usbMsgFlags & USB_FLG_TX_PACKET) token = USBPID_DATA0; usbMsgFlags++; len = usbRead(usbTxBuf + 1, wantLen); if(len <= 8){ /* valid data packet */ usbCrc16Append(&usbTxBuf[1], len); txLen = len + 4; /* length including sync byte */ if(len < 8) /* a partial package identifies end of message */ usbMsgLen = 0xff; }
functions
uchar isNotSE0(void) { uchar rval; /* We want to do * return (USBIN & USBMASK); * here, but the compiler does int-expansion acrobatics. * We can avoid this by assigning to a char-sized variable. */ rval = USBIN & USBMASK; return rval; }
functions
void usbPoll(void) { schar len; uchar i; if((len = usbRxLen) > 0){ /* We could check CRC16 here -- but ACK has already been sent anyway. If you * need data integrity checks with this driver, check the CRC in your app * code and report errors back to the host. Since the ACK was already sent, * retries must be handled on application level. * unsigned crc = usbCrc16(buffer + 1, usbRxLen - 3); */ usbProcessRx(usbRxBuf + USB_BUFSIZE + 1 - usbInputBufOffset, len - 3); #if USB_CFG_HAVE_FLOWCONTROL if(usbRxLen > 0) /* only mark as available if not inactivated */ usbRxLen = 0; #else usbRxLen = 0; /* mark rx buffer as available */ #endif }
functions
void usbInit(void) { #if USB_INTR_CFG_SET != 0 USB_INTR_CFG |= USB_INTR_CFG_SET; #endif #if USB_INTR_CFG_CLR != 0 USB_INTR_CFG &= ~(USB_INTR_CFG_CLR); #endif USB_INTR_ENABLE |= (1 << USB_INTR_ENABLE_BIT); #if USB_CFG_HAVE_INTRIN_ENDPOINT USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */ # if USB_CFG_HAVE_INTRIN_ENDPOINT3 USB_SET_DATATOKEN3(USB_INITIAL_DATATOKEN); /* reset data toggling for interrupt endpoint */ # endif #endif }
includes
#include <assert.h>
includes
#include <vlc_common.h>
includes
#include <vlc_plugin.h>
includes
#include <vlc_demux.h>
defines
#define FRAME_LENGTH 28 /* samples per frame */
structs
struct demux_sys_t { es_out_id_t *p_es; int64_t i_data_offset; unsigned int i_data_size; unsigned int i_block_frames; unsigned int i_frame_size; unsigned int i_bitrate; date_t pts; };
functions
int Open( vlc_object_t * p_this ) { demux_t *p_demux = (demux_t*)p_this; const uint8_t *peek; /* XA file heuristic */ if( vlc_stream_Peek( p_demux->s, &peek, 10 ) < 10 ) return VLC_EGENERIC; if( memcmp( peek, "XAI", 4 ) && memcmp( peek, "XAJ", 4 ) ) return VLC_EGENERIC; if( GetWLE( peek + 8 ) != 1 ) /* format tag */ return VLC_EGENERIC; demux_sys_t *p_sys = malloc( sizeof( demux_sys_t ) ); if( unlikely( p_sys == NULL ) ) return VLC_ENOMEM; /* read XA header*/ xa_header_t xa; if( vlc_stream_Read( p_demux->s, &xa, 24 ) < 24 ) { free( p_sys ); return VLC_EGENERIC; }
functions
int Demux( demux_t *p_demux ) { demux_sys_t *p_sys = p_demux->p_sys; block_t *p_block; int64_t i_offset; unsigned i_frames = p_sys->i_block_frames; i_offset = vlc_stream_Tell( p_demux->s ); if( p_sys->i_data_size > 0 && i_offset >= p_sys->i_data_offset + p_sys->i_data_size ) { /* EOF */ return 0; }
functions
void Close ( vlc_object_t * p_this ) { demux_sys_t *p_sys = ((demux_t *)p_this)->p_sys; free( p_sys ); }
functions
int Control( demux_t *p_demux, int i_query, va_list args ) { demux_sys_t *p_sys = p_demux->p_sys; return demux_vaControlHelper( p_demux->s, p_sys->i_data_offset, p_sys->i_data_size ? p_sys->i_data_offset + p_sys->i_data_size : -1, p_sys->i_bitrate, p_sys->i_frame_size, i_query, args ); }
includes
#include <string.h>
includes
#include <stdlib.h>
functions
int cursor_visibility(int tid, int sid, char visibility) { if(SCR(tid, sid).curs_invisible != !visibility) { SCR(tid, sid).curs_invisible = !visibility; if(!record_update(tid, sid, visibility ? UPD_CURS : UPD_CURS_INVIS)) { if(ltm_curerr.err_no == ESRCH) return 0; else return -1; }
functions
int cursor_abs_move(int tid, int sid, enum axis axis, ushort num) { int ret = 0; uint old; SCR(tid, sid).curs_prev_not_set = 0; switch(axis) { case X: old = SCR(tid, sid).cursor.x; if(num < SCR(tid, sid).cols) SCR(tid, sid).cursor.x = num; else SCR(tid, sid).cursor.x = SCR(tid, sid).cols-1; if(old == SCR(tid, sid).cursor.x) return 0; break; case Y: old = SCR(tid, sid).cursor.y; if(num < SCR(tid, sid).lines) SCR(tid, sid).cursor.y = num; else SCR(tid, sid).cursor.y = SCR(tid, sid).lines-1; if(old == SCR(tid, sid).cursor.y) return 0; break; default: LTM_ERR(EINVAL, "Invalid axis", error); }
functions
int cursor_rel_move(int tid, int sid, enum direction direction, ushort num) { int ret = 0; if(!num) return 0; switch(direction) { case UP: return cursor_abs_move(tid, sid, Y, num <= SCR(tid, sid).cursor.y ? SCR(tid, sid).cursor.y - num : 0); case DOWN: return cursor_abs_move(tid, sid, Y, SCR(tid, sid).cursor.y + num); case LEFT: return cursor_abs_move(tid, sid, X, num <= SCR(tid, sid).cursor.x ? SCR(tid, sid).cursor.x - num : 0); case RIGHT: return cursor_abs_move(tid, sid, X, SCR(tid, sid).cursor.x + num); default: LTM_ERR(EINVAL, "Invalid direction", error); }
functions
int cursor_horiz_tab(int tid, int sid) { /* don't hardcode 8 here in the future? */ char dist = 8 - (SCR(tid, sid).cursor.x % 8); return cursor_rel_move(tid, sid, RIGHT, dist); }
functions
int cursor_down(int tid, int sid) { if(SCR(tid, sid).cursor.y == SCR(tid, sid).lines-1 && SCR(tid, sid).autoscroll) return screen_scroll(tid, sid); else return cursor_rel_move(tid, sid, DOWN, 1); }
functions
int cursor_vertical_tab(int tid, int sid) { if(cursor_down(tid, sid) == -1) return -1; bitarr_unset_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y); return 0; }
functions
int cursor_line_break(int tid, int sid) { if(cursor_vertical_tab(tid, sid) == -1) return -1; if(cursor_abs_move(tid, sid, X, 0) == -1) return -1; return 0; }
functions
int cursor_wrap(int tid, int sid) { if(cursor_down(tid, sid) == -1) return -1; bitarr_set_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y); if(cursor_abs_move(tid, sid, X, 0) == -1) return -1; return 0; }
functions
int cursor_advance(int tid, int sid) { if(SCR(tid, sid).cursor.x == SCR(tid, sid).cols-1) { if(!SCR(tid, sid).curs_prev_not_set) { SCR(tid, sid).curs_prev_not_set = 1; return 0; }
includes
#include <net/mptcp.h>
includes
#include <net/ipv6.h>
includes
#include <net/tcp.h>
includes
#include <linux/compiler.h>
includes
#include <linux/gfp.h>
includes
#include <linux/module.h>
defines
#define pr_fmt(fmt) "TCP: " fmt
defines
#define OPTION_SACK_ADVERTISE (1 << 0)
defines
#define OPTION_TS (1 << 1)
defines
#define OPTION_MD5 (1 << 2)
defines
#define OPTION_WSCALE (1 << 3)
defines
#define OPTION_FAST_OPEN_COOKIE (1 << 8)
defines
#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
structs
struct tsq_tasklet { struct tasklet_struct tasklet; struct list_head head; /* queue of tcp sockets */ };
functions
void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); unsigned int prior_packets = tp->packets_out; tcp_advance_send_head(sk, skb); tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->packets_out += tcp_skb_pcount(skb); if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { tcp_rearm_rto(sk); }
functions
__u32 tcp_acceptable_seq(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); if (!before(tcp_wnd_end(tp), tp->snd_nxt)) return tp->snd_nxt; else return tcp_wnd_end(tp); }
functions
__u16 tcp_advertise_mss(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); int mss = tp->advmss; if (dst) { unsigned int metric = dst_metric_advmss(dst); if (metric < mss) { mss = metric; tp->advmss = mss; }
functions
void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) { struct tcp_sock *tp = tcp_sk(sk); s32 delta = tcp_time_stamp - tp->lsndtime; u32 restart_cwnd = tcp_init_cwnd(tp, dst); u32 cwnd = tp->snd_cwnd; tcp_ca_event(sk, CA_EVENT_CWND_RESTART); tp->snd_ssthresh = tcp_current_ssthresh(sk); restart_cwnd = min(restart_cwnd, cwnd); while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) cwnd >>= 1; tp->snd_cwnd = max(cwnd, restart_cwnd); tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_used = 0; }
functions
void tcp_event_data_sent(struct tcp_sock *tp, struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); const u32 now = tcp_time_stamp; const struct dst_entry *dst = __sk_dst_get(sk); if (sysctl_tcp_slow_start_after_idle && (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)) tcp_cwnd_restart(sk, __sk_dst_get(sk)); tp->lsndtime = now; /* If it is a reply for ato after last received * packet, enter pingpong mode. */ if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato && (!dst || !dst_metric(dst, RTAX_QUICKACK))) icsk->icsk_ack.pingpong = 1; }
functions
void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) { tcp_dec_quickack_mode(sk, pkts); inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); }
functions
u32 tcp_default_init_rwnd(u32 mss) { /* Initial receive window should be twice of TCP_INIT_CWND to * enable proper sending of new unsent data during fast recovery * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a * limit when mss is larger than 1460. */ u32 init_rwnd = TCP_INIT_CWND * 2; if (mss > 1460) init_rwnd = max((1460 * init_rwnd) / mss, 2U); return init_rwnd; }
functions
void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, __u32 *window_clamp, int wscale_ok, __u8 *rcv_wscale, __u32 init_rcv_wnd, const struct sock *sk) { unsigned int space; if (tcp_sk(sk)->mpc) mptcp_select_initial_window(&__space, window_clamp, sk); space = (__space < 0 ? 0 : __space); /* If no clamp set the clamp to the max possible scaled window */ if (*window_clamp == 0) (*window_clamp) = (65535 << 14); space = min(*window_clamp, space); /* Quantize space offering to a multiple of mss if possible. */ if (space > mss) space = (space / mss) * mss; /* NOTE: offering an initial window larger than 32767 * will break some buggy TCP stacks. If the admin tells us * it is likely we could be speaking with such a buggy stack * we will truncate our initial window offering to 32K-1 * unless the remote has sent us a window scaling option, * which we interpret as a sign the remote TCP is not * misinterpreting the window field as a signed quantity. */ if (sysctl_tcp_workaround_signed_windows) (*rcv_wnd) = min(space, MAX_TCP_WINDOW); else (*rcv_wnd) = space; (*rcv_wscale) = 0; if (wscale_ok) { /* Set window scaling on max possible window * See RFC1323 for an explanation of the limit to 14 */ space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); space = min_t(u32, space, *window_clamp); while (space > 65535 && (*rcv_wscale) < 14) { space >>= 1; (*rcv_wscale)++; }
functions
u16 tcp_select_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* The window must never shrink at the meta-level. At the subflow we * have to allow this. Otherwise we may announce a window too large * for the current meta-level sk_rcvbuf. */ u32 cur_win = tcp_receive_window(tp->mpc ? tcp_sk(mptcp_meta_sk(sk)) : tp); u32 new_win = __tcp_select_window(sk); /* Never shrink the offered window */ if (new_win < cur_win) { /* Danger Will Robinson! * Don't update rcv_wup/rcv_wnd here or else * we will not be able to advertise a zero * window in time. --DaveM * * Relax Will Robinson. */ new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); }
functions
void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; if (!(tp->ecn_flags & TCP_ECN_OK)) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; }
functions
void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); tp->ecn_flags = 0; if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; }
functions
void TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) { if (inet_rsk(req)->ecn_ok) th->ece = 1; }
functions
void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, int tcp_header_len) { struct tcp_sock *tp = tcp_sk(sk); if (tp->ecn_flags & TCP_ECN_OK) { /* Not-retransmitted data segment: set ECT and inject CWR. */ if (skb->len != tcp_header_len && !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { INET_ECN_xmit(sk); if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; tcp_hdr(skb)->cwr = 1; skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; }
functions
void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; TCP_SKB_CB(skb)->tcp_flags = flags; TCP_SKB_CB(skb)->sacked = 0; skb_shinfo(skb)->gso_segs = 1; skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; TCP_SKB_CB(skb)->seq = seq; if (flags & (TCPHDR_SYN | TCPHDR_FIN)) seq++; TCP_SKB_CB(skb)->end_seq = seq; }
functions
bool tcp_urg_mode(const struct tcp_sock *tp) { return tp->snd_una != tp->snd_up; }
functions
void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, struct tcp_out_options *opts, struct sk_buff *skb) { u16 options = opts->options; /* mungable copy */ if (unlikely(OPTION_MD5 & options)) { *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* overload cookie hash location */ opts->hash_location = (__u8 *)ptr; ptr += 4; }
functions
int tcp_syn_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_sock *tp = tcp_sk(sk); unsigned int remaining = MAX_TCP_OPTION_SPACE; struct tcp_fastopen_request *fastopen = tp->fastopen_req; #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); if (*md5) { opts->options |= OPTION_MD5; remaining -= TCPOLEN_MD5SIG_ALIGNED; }
functions
int tcp_synack_options(struct sock *sk, struct request_sock *req, unsigned int mss, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5, struct tcp_fastopen_cookie *foc) { struct inet_request_sock *ireq = inet_rsk(req); unsigned int remaining = MAX_TCP_OPTION_SPACE; #ifdef CONFIG_TCP_MD5SIG *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); if (*md5) { opts->options |= OPTION_MD5; remaining -= TCPOLEN_MD5SIG_ALIGNED; /* We can't fit any SACK blocks in a packet with MD5 + TS * options. There was discussion about disabling SACK * rather than TS in order to fit in better with old, * buggy kernels, but that was deemed to be unnecessary. */ ireq->tstamp_ok &= !ireq->sack_ok; }
functions
int tcp_established_options(struct sock *sk, struct sk_buff *skb, struct tcp_out_options *opts, struct tcp_md5sig_key **md5) { struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; struct tcp_sock *tp = tcp_sk(sk); unsigned int size = 0; unsigned int eff_sacks; #ifdef CONFIG_TCP_MD5SIG *md5 = tp->af_specific->md5_lookup(sk, sk); if (unlikely(*md5)) { opts->options |= OPTION_MD5; size += TCPOLEN_MD5SIG_ALIGNED; }
functions
void tcp_tsq_handler(struct sock *sk) { if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); }
functions
void tcp_tasklet_func(unsigned long data) { struct tsq_tasklet *tsq = (struct tsq_tasklet *)data; LIST_HEAD(list); unsigned long flags; struct list_head *q, *n; struct tcp_sock *tp; struct sock *sk, *meta_sk; local_irq_save(flags); list_splice_init(&tsq->head, &list); local_irq_restore(flags); list_for_each_safe(q, n, &list) { tp = list_entry(q, struct tcp_sock, tsq_node); list_del(&tp->tsq_node); sk = (struct sock *)tp; meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk; bh_lock_sock(meta_sk); if (!sock_owned_by_user(meta_sk)) { tcp_tsq_handler(sk); if (tp->mpc) tcp_tsq_handler(meta_sk); }
functions
void tcp_release_cb(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); unsigned long flags, nflags; /* perform an atomic operation only if at least one flag is set */ do { flags = tp->tsq_flags; if (!(flags & TCP_DEFERRED_ALL)) return; nflags = flags & ~TCP_DEFERRED_ALL; }
functions
__init tcp_tasklet_init(void) { int i; for_each_possible_cpu(i) { struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i); INIT_LIST_HEAD(&tsq->head); tasklet_init(&tsq->tasklet, tcp_tasklet_func, (unsigned long)tsq); }
functions
void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) && !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) { unsigned long flags; struct tsq_tasklet *tsq; /* Keep a ref on socket. * This last ref will be released in tcp_tasklet_func() */ atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); /* queue this socket to tasklet queue */ local_irq_save(flags); tsq = &__get_cpu_var(tsq_tasklet); list_add(&tp->tsq_node, &tsq->head); tasklet_schedule(&tsq->tasklet); local_irq_restore(flags); }
functions
int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, gfp_t gfp_mask) { const struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet; struct tcp_sock *tp; struct tcp_skb_cb *tcb; struct tcp_out_options opts; unsigned int tcp_options_size, tcp_header_size; struct tcp_md5sig_key *md5; struct tcphdr *th; int err; BUG_ON(!skb || !tcp_skb_pcount(skb)); /* If congestion control is doing timestamping, we must * take such a timestamp before we potentially clone/copy. */ if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) __net_timestamp(skb); if (likely(clone_it)) { const struct sk_buff *fclone = skb + 1; if (unlikely(skb->fclone == SKB_FCLONE_ORIG && fclone->fclone == SKB_FCLONE_CLONE)) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); if (unlikely(skb_cloned(skb))) { struct sk_buff *newskb; if (mptcp_is_data_seq(skb)) skb_push(skb, MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN + MPTCP_SUB_LEN_SEQ_ALIGN); newskb = pskb_copy(skb, gfp_mask); if (mptcp_is_data_seq(skb)) { skb_pull(skb, MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN + MPTCP_SUB_LEN_SEQ_ALIGN); if (newskb) skb_pull(newskb, MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_ACK_ALIGN + MPTCP_SUB_LEN_SEQ_ALIGN); }
functions
void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); }
functions
void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) || (!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) { /* Avoid the costly divide in the normal * non-TSO case. */ skb_shinfo(skb)->gso_segs = 1; skb_shinfo(skb)->gso_size = 0; skb_shinfo(skb)->gso_type = 0; }
functions
void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, int decr) { struct tcp_sock *tp = tcp_sk(sk); if (!tp->sacked_out || tcp_is_reno(tp)) return; if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) tp->fackets_out -= decr; }
functions
void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) { struct tcp_sock *tp = tcp_sk(sk); tp->packets_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) tp->sacked_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= decr; if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) tp->lost_out -= decr; /* Reno case is special. Sigh... */ if (tcp_is_reno(tp) && decr > 0) tp->sacked_out -= min_t(u32, tp->sacked_out, decr); tcp_adjust_fackets_out(sk, skb, decr); if (tp->lost_skb_hint && before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) tp->lost_cnt_hint -= decr; tcp_verify_left_out(tp); }
functions
int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int nsize, old_factor; int nlen; u8 flags; if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb)) mptcp_fragment(sk, skb, len, mss_now, 0); if (WARN_ON(len > skb->len)) return -EINVAL; nsize = skb_headlen(skb) - len; if (nsize < 0) nsize = 0; if (skb_cloned(skb) && skb_is_nonlinear(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return -ENOMEM; /* Get a new skb... force flag on. */ buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); if (buff == NULL) return -ENOMEM; /* We'll just try again later. */ sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); nlen = skb->len - len - nsize; buff->truesize += nlen; skb->truesize -= nlen; /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { /* Copy and checksum data tail into the new buffer. */ buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), nsize, 0); skb_trim(skb, len); skb->csum = csum_block_sub(skb->csum, buff->csum, len); }
functions
void __pskb_trim_head(struct sk_buff *skb, int len) { int i, k, eat; eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); len -= eat; if (!len) return; }
functions
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) { if (tcp_sk(sk)->mpc && !is_meta_sk(sk) && mptcp_is_data_seq(skb)) return mptcp_trim_head(sk, skb, len); if (skb_unclone(skb, GFP_ATOMIC)) return -ENOMEM; __pskb_trim_head(skb, len); TCP_SKB_CB(skb)->seq += len; skb->ip_summed = CHECKSUM_PARTIAL; skb->truesize -= len; sk->sk_wmem_queued -= len; sk_mem_uncharge(sk, len); sock_set_flag(sk, SOCK_QUEUE_SHRUNK); /* Any change of skb->len requires recalculation of tso factor. */ if (tcp_skb_pcount(skb) > 1) tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); #ifdef CONFIG_MPTCP /* Some data got acked - we assume that the seq-number reached the dest. * Anyway, our MPTCP-option has been trimmed above - we lost it here. * Only remove the SEQ if the call does not come from a meta retransmit. */ if (tcp_sk(sk)->mpc && !is_meta_sk(sk)) TCP_SKB_CB(skb)->mptcp_flags &= ~MPTCPHDR_SEQ; #endif return 0; }
functions
int __tcp_mtu_to_mss(struct sock *sk, int pmtu) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); int mss_now; /* Calculate base mss without TCP options: It is MMS_S - sizeof(tcphdr) of rfc1122 */ mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ if (icsk->icsk_af_ops->net_frag_header_len) { const struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst_allfrag(dst)) mss_now -= icsk->icsk_af_ops->net_frag_header_len; }
functions
int tcp_mtu_to_mss(struct sock *sk, int pmtu) { /* Subtract TCP options size, not including SACKs */ return __tcp_mtu_to_mss(sk, pmtu) - (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr)); }
functions
int tcp_mss_to_mtu(struct sock *sk, int mss) { const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); int mtu; mtu = mss + tp->tcp_header_len + icsk->icsk_ext_hdr_len + icsk->icsk_af_ops->net_header_len; /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */ if (icsk->icsk_af_ops->net_frag_header_len) { const struct dst_entry *dst = __sk_dst_get(sk); if (dst && dst_allfrag(dst)) mtu += icsk->icsk_af_ops->net_frag_header_len; }
functions
void tcp_mtup_init(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1; icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + icsk->icsk_af_ops->net_header_len; icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); icsk->icsk_mtup.probe_size = 0; }
functions
int tcp_sync_mss(struct sock *sk, u32 pmtu) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); int mss_now; if (icsk->icsk_mtup.search_high > pmtu) icsk->icsk_mtup.search_high = pmtu; mss_now = tcp_mtu_to_mss(sk, pmtu); mss_now = tcp_bound_to_half_wnd(tp, mss_now); /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; if (icsk->icsk_mtup.enabled) mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low)); tp->mss_cache = mss_now; return mss_now; }
functions
int tcp_current_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); const struct dst_entry *dst = __sk_dst_get(sk); u32 mss_now; unsigned int header_len; struct tcp_out_options opts; struct tcp_md5sig_key *md5; mss_now = tp->mss_cache; if (dst) { u32 mtu = dst_mtu(dst); if (mtu != inet_csk(sk)->icsk_pmtu_cookie) mss_now = tcp_sync_mss(sk, mtu); }
functions
void tcp_cwnd_validate(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->packets_out >= tp->snd_cwnd) { /* Network is feed fully. */ tp->snd_cwnd_used = 0; tp->snd_cwnd_stamp = tcp_time_stamp; }
functions
int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk; u32 needed, window, max_len; if (!tp->mpc) window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; else /* We need to evaluate the available space in the sending window * at the subflow level. However, the subflow seq has not yet * been set. Nevertheless we know that the caller will set it to * write_seq. */ window = tcp_wnd_end(tp) - tp->write_seq; max_len = mss_now * max_segs; if (likely(max_len <= window && skb != tcp_write_queue_tail(meta_sk))) return max_len; needed = min(skb->len, window); if (max_len <= needed) return max_len; return needed - needed % mss_now; }
functions
int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb) { u32 in_flight, cwnd; /* Don't be strict about the congestion window for the final FIN. */ if (skb && ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) && tcp_skb_pcount(skb) == 1) return 1; in_flight = tcp_packets_in_flight(tp); cwnd = tp->snd_cwnd; if (in_flight < cwnd) return (cwnd - in_flight); return 0; }
functions
int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, unsigned int mss_now) { int tso_segs = tcp_skb_pcount(skb); if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { tcp_set_skb_tso_segs(sk, skb, mss_now); tso_segs = tcp_skb_pcount(skb); }
functions
bool tcp_minshall_check(const struct tcp_sock *tp) { return after(tp->snd_sml, tp->snd_una) && !after(tp->snd_sml, tp->snd_nxt); }
functions
bool tcp_nagle_check(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int mss_now, int nonagle) { return skb->len < mss_now && ((nonagle & TCP_NAGLE_CORK) || (!nonagle && tp->packets_out && tcp_minshall_check(tp))); }
functions
bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss, int nonagle) { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). * * This is implemented in the callers, where they modify the 'nonagle' * argument based upon the location of SKB in the send queue. */ if (nonagle & TCP_NAGLE_PUSH) return true; /* Don't use the nagle rule for urgent data (or for the final FIN). */ if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) return true; if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) return true; return false; }
functions
bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, unsigned int cur_mss) { u32 end_seq = TCP_SKB_CB(skb)->end_seq; if (skb->len > cur_mss) end_seq = TCP_SKB_CB(skb)->seq + cur_mss; return !after(end_seq, tcp_wnd_end(tp)); }
functions
int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, unsigned int cur_mss, int nonagle) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int cwnd_quota; tcp_init_tso_segs(sk, skb, cur_mss); if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) return 0; cwnd_quota = tcp_cwnd_test(tp, skb); if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) cwnd_quota = 0; return cwnd_quota; }
functions
bool tcp_may_send_now(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = tcp_send_head(sk); return skb && tcp_snd_test(sk, skb, tcp_current_mss(sk), (tcp_skb_is_last(sk, skb) ? tp->nonagle : TCP_NAGLE_PUSH)); }
functions
int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now, gfp_t gfp) { struct sk_buff *buff; int nlen = skb->len - len; u8 flags; if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb)) mptso_fragment(sk, skb, len, mss_now, gfp, 0); /* All of a TSO frame must be composed of paged data. */ if (skb->len != skb->data_len) return tcp_fragment(sk, skb, len, mss_now); buff = sk_stream_alloc_skb(sk, 0, gfp); if (unlikely(buff == NULL)) return -ENOMEM; sk->sk_wmem_queued += buff->truesize; sk_mem_charge(sk, buff->truesize); buff->truesize += nlen; skb->truesize -= nlen; /* Correct the sequence numbers. */ TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; /* PSH and FIN should only be set in the second packet. */ flags = TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); TCP_SKB_CB(buff)->tcp_flags = flags; /* This packet was never sent out yet, so no SACK bits. */ TCP_SKB_CB(buff)->sacked = 0; buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; skb_split(skb, buff, len); /* Fix up tso_factor for both original and new SKB. */ tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(sk, buff, mss_now); /* Link BUFF into the send queue. */ skb_header_release(buff); tcp_insert_write_queue_after(skb, buff, sk); return 0; }
functions
bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk; struct tcp_sock *meta_tp = tcp_sk(meta_sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; int win_divisor; if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) goto send_now; if (icsk->icsk_ca_state != TCP_CA_Open) goto send_now; /* Defer for less than two clock ticks. */ if (meta_tp->tso_deferred && (((u32)jiffies << 1) >> 1) - (meta_tp->tso_deferred >> 1) > 1) goto send_now; in_flight = tcp_packets_in_flight(tp); BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); if (!tp->mpc) send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; else /* We need to evaluate the available space in the sending window * at the subflow level. However, the subflow seq has not yet * been set. Nevertheless we know that the caller will set it to * write_seq. */ send_win = tcp_wnd_end(tp) - tp->write_seq; /* From in_flight test above, we know that cwnd > in_flight. */ cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ if (limit >= min_t(unsigned int, sk->sk_gso_max_size, sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ if ((skb != tcp_write_queue_tail(meta_sk)) && (limit >= skb->len)) goto send_now; win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); if (win_divisor) { u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); /* If at least some fraction of a window is available, * just use it. */ chunk /= win_divisor; if (limit >= chunk) goto send_now; }
functions
int tcp_mtu_probe(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *skb, *nskb, *next; int len; int probe_size; int size_needed; int copy; int mss_now; /* Not currently probing/verifying, * not in recovery, * have enough cwnd, and * not SACKing (the variable headers throw things off) */ if (!icsk->icsk_mtup.enabled || icsk->icsk_mtup.probe_size || inet_csk(sk)->icsk_ca_state != TCP_CA_Open || tp->snd_cwnd < 11 || tp->rx_opt.num_sacks || tp->rx_opt.dsack) return -1; /* Very simple search strategy: just double the MSS. */ mss_now = tcp_current_mss(sk); probe_size = 2 * tp->mss_cache; size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { /* TODO: set timer for probe_converge_event */ return -1; }
functions
bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int tso_segs, sent_pkts; int cwnd_quota; int result; //printf("mptcp?::%d, %d\n", sk->__sk_common.skc_daddr, sk->__sk_common.skc_rcv_saddr); if (is_meta_sk(sk)) return mptcp_write_xmit(sk, mss_now, nonagle, push_one, gfp); sent_pkts = 0; if (!push_one) { /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { return false; }
functions
else if (result > 0) { sent_pkts = 1; }
functions
bool tcp_schedule_loss_probe(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 timeout, tlp_time_stamp, rto_time_stamp; u32 rtt = tp->srtt >> 3; if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS)) return false; /* No consecutive loss probes. */ if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { tcp_rearm_rto(sk); return false; }
functions
void tcp_send_loss_probe(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int pcount; int mss = tcp_current_mss(sk); int err = -1; if (tcp_send_head(sk) != NULL) { err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC); goto rearm_timer; }
functions
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle) { /* If we are closed, the bytes will have to remain here. * In time closedown will finish, we empty the write queue and * all will be happy. */ if (unlikely(sk->sk_state == TCP_CLOSE)) return; if (tcp_write_xmit(sk, cur_mss, nonagle, 0, sk_gfp_atomic(sk, GFP_ATOMIC))) tcp_check_probe_timer(sk); }
functions
void tcp_push_one(struct sock *sk, unsigned int mss_now) { struct sk_buff *skb = tcp_send_head(sk); BUG_ON(!skb || skb->len < mss_now); tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation); }
functions
u32 __tcp_select_window(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* MSS for the peer's data. Previous versions used mss_clamp * here. I don't know if the value based on our guesses * of peer's MSS is better for the performance. It's more correct * but may be worse for the performance because of rcv_mss * fluctuations. --SAW 1998/11/1 */ int mss = icsk->icsk_ack.rcv_mss; int free_space = tcp_space(sk); int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); int window; if (tp->mpc) return __mptcp_select_window(sk); if (mss > full_space) mss = full_space; if (free_space < (full_space >> 1)) { icsk->icsk_ack.quick = 0; if (sk_under_memory_pressure(sk)) tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); if (free_space < mss) return 0; }