type stringclasses 5
values | content stringlengths 9 163k |
|---|---|
functions | else if(rq->bRequest == USBRQ_CLEAR_FEATURE || rq->bRequest == USBRQ_SET_FEATURE){ /* 1|3 */
if(rq->wValue.bytes[0] == 0 && rq->wIndex.bytes[0] == 0x81){ /* feature 0 == HALT for endpoint == 1 */
usbTxLen1 = rq->bRequest == USBRQ_CLEAR_FEATURE ? USBPID_NAK : USBPID_STALL;... |
functions | USB_CFG_IMPLEMENT_FN_WRITE
if(replyLen == 0xff){ /* use user-supplied read/write function */
if((rq->bmRequestType & USBRQ_DIR_MASK) == USBRQ_DIR_DEVICE_TO_HOST){
replyLen = rq->wLength.bytes[0]; /* IN transfers only */
} |
functions | else if(rval != 0){ /* This was the final package */
replyLen = 0; /* answer with a zero-sized data packet */
} |
functions | void usbBuildTxBlock(void)
{
uchar wantLen, len, txLen, token;
wantLen = usbMsgLen;
if(wantLen > 8)
wantLen = 8;
usbMsgLen -= wantLen;
token = USBPID_DATA1;
if(usbMsgFlags & USB_FLG_TX_PACKET)
token = USBPID_DATA0;
usbMsgFlags++;
len = usbRead(usbTxBuf + 1, wantLen);
i... |
functions | uchar isNotSE0(void)
{
uchar rval;
/* We want to do
* return (USBIN & USBMASK);
* here, but the compiler does int-expansion acrobatics.
* We can avoid this by assigning to a char-sized variable.
*/
rval = USBIN & USBMASK;
return rval;
} |
functions | void usbPoll(void)
{
schar len;
uchar i;
if((len = usbRxLen) > 0){
/* We could check CRC16 here -- but ACK has already been sent anyway. If you
* need data integrity checks with this driver, check the CRC in your app
* code and report errors back to the host. Since the ACK was already sent,
* retries must b... |
functions | void usbInit(void)
{
#if USB_INTR_CFG_SET != 0
USB_INTR_CFG |= USB_INTR_CFG_SET;
#endif
#if USB_INTR_CFG_CLR != 0
USB_INTR_CFG &= ~(USB_INTR_CFG_CLR);
#endif
USB_INTR_ENABLE |= (1 << USB_INTR_ENABLE_BIT);
#if USB_CFG_HAVE_INTRIN_ENDPOINT
USB_SET_DATATOKEN1(USB_INITIAL_DATATOKEN); /* reset data toggling... |
includes |
#include <assert.h> |
includes | #include <vlc_common.h> |
includes | #include <vlc_plugin.h> |
includes | #include <vlc_demux.h> |
defines |
#define FRAME_LENGTH 28 /* samples per frame */ |
structs | struct demux_sys_t
{
es_out_id_t *p_es;
int64_t i_data_offset;
unsigned int i_data_size;
unsigned int i_block_frames;
unsigned int i_frame_size;
unsigned int i_bitrate;
date_t pts;
}; |
functions | int Open( vlc_object_t * p_this )
{
demux_t *p_demux = (demux_t*)p_this;
const uint8_t *peek;
/* XA file heuristic */
if( vlc_stream_Peek( p_demux->s, &peek, 10 ) < 10 )
return VLC_EGENERIC;
if( memcmp( peek, "XAI", 4 ) && memcmp( peek, "XAJ", 4 ) )
return VLC_EGENERIC;
if( ... |
functions | int Demux( demux_t *p_demux )
{
demux_sys_t *p_sys = p_demux->p_sys;
block_t *p_block;
int64_t i_offset;
unsigned i_frames = p_sys->i_block_frames;
i_offset = vlc_stream_Tell( p_demux->s );
if( p_sys->i_data_size > 0 &&
i_offset >= p_sys->i_data_offset + p_sys->i_data_size )... |
functions | void Close ( vlc_object_t * p_this )
{
demux_sys_t *p_sys = ((demux_t *)p_this)->p_sys;
free( p_sys );
} |
functions | int Control( demux_t *p_demux, int i_query, va_list args )
{
demux_sys_t *p_sys = p_demux->p_sys;
return demux_vaControlHelper( p_demux->s, p_sys->i_data_offset,
p_sys->i_data_size ? p_sys->i_data_offset
+ p_sys->i_data_size : -1,
... |
includes | #include <string.h> |
includes | #include <stdlib.h> |
functions | int cursor_visibility(int tid, int sid, char visibility) {
if(SCR(tid, sid).curs_invisible != !visibility) {
SCR(tid, sid).curs_invisible = !visibility;
if(!record_update(tid, sid, visibility ? UPD_CURS : UPD_CURS_INVIS)) {
if(ltm_curerr.err_no == ESRCH) return 0;
else return -1;
} |
functions | int cursor_abs_move(int tid, int sid, enum axis axis, ushort num) {
int ret = 0;
uint old;
SCR(tid, sid).curs_prev_not_set = 0;
switch(axis) {
case X:
old = SCR(tid, sid).cursor.x;
if(num < SCR(tid, sid).cols)
SCR(tid, sid).cursor.x = num;
else
SCR(tid, sid).cursor.x = SCR(tid, sid).cols-1;
... |
functions | int cursor_rel_move(int tid, int sid, enum direction direction, ushort num) {
int ret = 0;
if(!num) return 0;
switch(direction) {
case UP:
return cursor_abs_move(tid, sid, Y, num <= SCR(tid, sid).cursor.y ? SCR(tid, sid).cursor.y - num : 0);
case DOWN:
return cursor_abs_move(tid, sid, Y, SCR(tid, sid).cu... |
functions | int cursor_horiz_tab(int tid, int sid) {
/* don't hardcode 8 here in the future? */
char dist = 8 - (SCR(tid, sid).cursor.x % 8);
return cursor_rel_move(tid, sid, RIGHT, dist);
} |
functions | int cursor_down(int tid, int sid) {
if(SCR(tid, sid).cursor.y == SCR(tid, sid).lines-1 && SCR(tid, sid).autoscroll)
return screen_scroll(tid, sid);
else
return cursor_rel_move(tid, sid, DOWN, 1);
} |
functions | int cursor_vertical_tab(int tid, int sid) {
if(cursor_down(tid, sid) == -1) return -1;
bitarr_unset_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y);
return 0;
} |
functions | int cursor_line_break(int tid, int sid) {
if(cursor_vertical_tab(tid, sid) == -1) return -1;
if(cursor_abs_move(tid, sid, X, 0) == -1) return -1;
return 0;
} |
functions | int cursor_wrap(int tid, int sid) {
if(cursor_down(tid, sid) == -1) return -1;
bitarr_set_index(SCR(tid, sid).wrapped, SCR(tid, sid).cursor.y);
if(cursor_abs_move(tid, sid, X, 0) == -1) return -1;
return 0;
} |
functions | int cursor_advance(int tid, int sid) {
if(SCR(tid, sid).cursor.x == SCR(tid, sid).cols-1) {
if(!SCR(tid, sid).curs_prev_not_set) {
SCR(tid, sid).curs_prev_not_set = 1;
return 0;
} |
includes |
#include <net/mptcp.h> |
includes | #include <net/ipv6.h> |
includes | #include <net/tcp.h> |
includes |
#include <linux/compiler.h> |
includes | #include <linux/gfp.h> |
includes | #include <linux/module.h> |
defines |
#define pr_fmt(fmt) "TCP: " fmt |
defines |
#define OPTION_SACK_ADVERTISE (1 << 0) |
defines | #define OPTION_TS (1 << 1) |
defines | #define OPTION_MD5 (1 << 2) |
defines | #define OPTION_WSCALE (1 << 3) |
defines | #define OPTION_FAST_OPEN_COOKIE (1 << 8) |
defines |
#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ |
structs | struct tsq_tasklet {
struct tasklet_struct tasklet;
struct list_head head; /* queue of tcp sockets */
}; |
functions | void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
unsigned int prior_packets = tp->packets_out;
tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out += tcp_skb_pcount(s... |
functions | __u32 tcp_acceptable_seq(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
if (!before(tcp_wnd_end(tp), tp->snd_nxt))
return tp->snd_nxt;
else
return tcp_wnd_end(tp);
} |
functions | __u16 tcp_advertise_mss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
if (dst) {
unsigned int metric = dst_metric_advmss(dst);
if (metric < mss) {
mss = metric;
tp->advmss = mss;
} |
functions | void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime;
u32 restart_cwnd = tcp_init_cwnd(tp, dst);
u32 cwnd = tp->snd_cwnd;
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
tp->snd_ssthresh = tcp_current_ssthresh(sk);
restart_cw... |
functions | void tcp_event_data_sent(struct tcp_sock *tp,
struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
const struct dst_entry *dst = __sk_dst_get(sk);
if (sysctl_tcp_slow_start_after_idle &&
(!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
tcp... |
functions | void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
} |
functions | u32 tcp_default_init_rwnd(u32 mss)
{
/* Initial receive window should be twice of TCP_INIT_CWND to
* enable proper sending of new unsent data during fast recovery
* (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
* limit when mss is larger than 1460.
*/
u32 init_rwnd = TCP_INIT_CWND * 2;
if (mss ... |
functions | void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
int wscale_ok, __u8 *rcv_wscale,
__u32 init_rcv_wnd, const struct sock *sk)
{
unsigned int space;
if (tcp_sk(sk)->mpc)
mptcp_select_initial_window(&__space, window_clamp, sk);
space = (__spac... |
functions | u16 tcp_select_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
/* The window must never shrink at the meta-level. At the subflow we
* have to allow this. Otherwise we may announce a window too large
* for the current meta-level sk_rcvbuf.
*/
u32 cur_win = tcp_receive_window(tp->mpc ? tcp_sk(mptcp_m... |
functions | void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
} |
functions | void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0;
if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
} |
functions | void
TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
{
if (inet_rsk(req)->ecn_ok)
th->ece = 1;
} |
functions | void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
int tcp_header_len)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->ecn_flags & TCP_ECN_OK) {
/* Not-retransmitted data segment: set ECT and inject CWR. */
if (skb->len != tcp_header_len &&
!before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
INET_ECN_xmi... |
functions | void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
{
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
skb_shinfo(skb)->gso_segs = 1;
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
TCP_SKB_CB(skb)->seq = seq;
if (... |
functions | bool tcp_urg_mode(const struct tcp_sock *tp)
{
return tp->snd_una != tp->snd_up;
} |
functions | void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
struct tcp_out_options *opts, struct sk_buff *skb)
{
u16 options = opts->options; /* mungable copy */
if (unlikely(OPTION_MD5 & options)) {
*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
... |
functions | int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
struct tcp_fastopen_request *fastopen = tp->fastopen_req;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->... |
functions | int tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5,
struct tcp_fastopen_cookie *foc)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPT... |
functions | int tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned int size = 0;
unsigned int eff_sacks;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_sp... |
functions | void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
} |
functions | void tcp_tasklet_func(unsigned long data)
{
struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
LIST_HEAD(list);
unsigned long flags;
struct list_head *q, *n;
struct tcp_sock *tp;
struct sock *sk, *meta_sk;
local_irq_save(flags);
list_splice_init(&tsq->head, &list);
local_irq_restore(flags);
list_for_each... |
functions | void tcp_release_cb(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nflags;
/* perform an atomic operation only if at least one flag is set */
do {
flags = tp->tsq_flags;
if (!(flags & TCP_DEFERRED_ALL))
return;
nflags = flags & ~TCP_DEFERRED_ALL;
} |
functions | __init tcp_tasklet_init(void)
{
int i;
for_each_possible_cpu(i) {
struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
INIT_LIST_HEAD(&tsq->head);
tasklet_init(&tsq->tasklet,
tcp_tasklet_func,
(unsigned long)tsq);
} |
functions | void tcp_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
!test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
unsigned long flags;
struct tsq_tasklet *tsq;
/* Keep a ref on socket.
* This last ref will be r... |
functions | int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
gfp_t gfp_mask)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
struct tcp_out_options opts;
unsigned int tcp_options_size, tcp_header_size;
struct tcp... |
functions | void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->t... |
functions | void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) ||
(!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) {
/* Avoid the costly divide in the normal
* non-TSO case.
*/
... |
functions | void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->sacked_out || tcp_is_reno(tp))
return;
if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
tp->fackets_out -= decr;
} |
functions | void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->packets_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= decr;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= decr;
if (TCP_SKB_CB(skb)-... |
functions | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
unsigned int mss_now)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff;
int nsize, old_factor;
int nlen;
u8 flags;
if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
mptcp_fragment(sk, skb, len, mss_now, 0);
if (WARN_ON(len > skb->len))
... |
functions | void __pskb_trim_head(struct sk_buff *skb, int len)
{
int i, k, eat;
eat = min_t(int, len, skb_headlen(skb));
if (eat) {
__skb_pull(skb, eat);
len -= eat;
if (!len)
return;
} |
functions | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
if (tcp_sk(sk)->mpc && !is_meta_sk(sk) && mptcp_is_data_seq(skb))
return mptcp_trim_head(sk, skb, len);
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
__pskb_trim_head(skb, len);
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_PART... |
functions | int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
/* Calculate base mss without TCP options:
It is MMS_S - sizeof(tcphdr) of rfc1122
*/
mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(str... |
functions | int tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
/* Subtract TCP options size, not including SACKs */
return __tcp_mtu_to_mss(sk, pmtu) -
(tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
} |
functions | int tcp_mss_to_mtu(struct sock *sk, int mss)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
int mtu;
mtu = mss +
tp->tcp_header_len +
icsk->icsk_ext_hdr_len +
icsk->icsk_af_ops->net_header_len;
/* IPv6 adds a frag_hdr in case RTAX_FEATURE_AL... |
functions | void tcp_mtup_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
icsk->icsk_af_ops->net_header_len;
icsk->icsk_mtup.... |
functions | int tcp_sync_mss(struct sock *sk, u32 pmtu)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
if (icsk->icsk_mtup.search_high > pmtu)
icsk->icsk_mtup.search_high = pmtu;
mss_now = tcp_mtu_to_mss(sk, pmtu);
mss_now = tcp_bound_to_half_wnd(tp, mss_now);
/* And ... |
functions | int tcp_current_mss(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct dst_entry *dst = __sk_dst_get(sk);
u32 mss_now;
unsigned int header_len;
struct tcp_out_options opts;
struct tcp_md5sig_key *md5;
mss_now = tp->mss_cache;
if (dst) {
u32 mtu = dst_mtu(dst);
if (mtu != inet_csk(sk)-... |
functions | void tcp_cwnd_validate(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->packets_out >= tp->snd_cwnd) {
/* Network is feed fully. */
tp->snd_cwnd_used = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
} |
functions | int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
unsigned int mss_now, unsigned int max_segs)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
u32 needed, window, max_len;
if (!tp->mpc)
window = tcp_wnd_end(tp) - TCP_SKB_CB(skb... |
functions | int tcp_cwnd_test(const struct tcp_sock *tp,
const struct sk_buff *skb)
{
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
if (skb &&
((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || mptcp_is_data_fin(skb)) &&
tcp_skb_pcount(skb) == 1)
return 1;
in_flight =... |
functions | int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
} |
functions | bool tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
!after(tp->snd_sml, tp->snd_nxt);
} |
functions | bool tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned int mss_now, int nonagle)
{
return skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
} |
functions | bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
/* Nagle rule does not apply to frames, which sit in the middle of the
* write_queue (they have no chances to get new data).
*
* This is implemented in the callers, where they modify the 'nonagle'... |
functions | bool tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
unsigned int cur_mss)
{
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (skb->len > cur_mss)
end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
return !after(end_seq, tcp_wnd_end(tp));
} |
functions | int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
unsigned int cur_mss, int nonagle)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int cwnd_quota;
tcp_init_tso_segs(sk, skb, cur_mss);
if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
return 0;
cwnd_quota = tcp_cwnd_test(tp, skb);
if (cwnd_... |
functions | bool tcp_may_send_now(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
return skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk),
(tcp_skb_is_last(sk, skb) ?
tp->nonagle : TCP_NAGLE_PUSH));
} |
functions | int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
unsigned int mss_now, gfp_t gfp)
{
struct sk_buff *buff;
int nlen = skb->len - len;
u8 flags;
if (tcp_sk(sk)->mpc && mptcp_is_data_seq(skb))
mptso_fragment(sk, skb, len, mss_now, gfp, 0);
/* All of a TSO frame must be composed of paged... |
functions | bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sock *meta_sk = tp->mpc ? mptcp_meta_sk(sk) : sk;
struct tcp_sock *meta_tp = tcp_sk(meta_sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight;
int win_divisor... |
functions | int tcp_mtu_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb, *nskb, *next;
int len;
int probe_size;
int size_needed;
int copy;
int mss_now;
/* Not currently probing/verifying,
* not in recovery,
* have enough cwnd, and
* no... |
functions | bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int tso_segs, sent_pkts;
int cwnd_quota;
int result;
//printf("mptcp?::%d, %d\n", sk->__sk_common.skc_daddr, sk->__sk_common.skc_rcv_saddr);
if... |
functions | else if (result > 0) {
sent_pkts = 1;
} |
functions | bool tcp_schedule_loss_probe(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout, tlp_time_stamp, rto_time_stamp;
u32 rtt = tp->srtt >> 3;
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
return false;
/* No consecutive loss probes. */
... |
functions | void tcp_send_loss_probe(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int pcount;
int mss = tcp_current_mss(sk);
int err = -1;
if (tcp_send_head(sk) != NULL) {
err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
goto rearm_timer;
} |
functions | void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle)
{
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and
* all will be happy.
*/
if (unlikely(sk->sk_state == TCP_CLOSE))
return;
if (tcp_write_xmit(sk... |
functions | void tcp_push_one(struct sock *sk, unsigned int mss_now)
{
struct sk_buff *skb = tcp_send_head(sk);
BUG_ON(!skb || skb->len < mss_now);
tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
} |
functions | u32 __tcp_select_window(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous versions used mss_clamp
* here. I don't know if the value based on our guesses
* of peer's MSS is better for the performance. It's more correct
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.