text
stringlengths
9
39.2M
dir
stringlengths
26
295
lang
stringclasses
185 values
created_date
timestamp[us]
updated_date
timestamp[us]
repo_name
stringlengths
1
97
repo_full_name
stringlengths
7
106
star
int64
1k
183k
len_tokens
int64
1
13.8M
```lua -- Gatekeeper - DDoS protection system. -- -- This program is free software: you can redistribute it and/or modify -- (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- -- along with this program. If not, see <path_to_url module(..., package.seeall) require "gatekeeper/stdcdefs" require "gatekeeper/lpmlib" -- -- C functions exported through FFI -- local ffi = require("ffi") -- Structs ffi.cdef[[ enum gk_flow_state { GK_REQUEST, GK_GRANTED, GK_DECLINED, GK_BPF }; enum protocols { ICMP = 1, TCP = 6, UDP = 17, ICMPV6 = 58, IPV4 = 0x0800, IPV6 = 0x86DD, }; enum icmp_types { ICMP_ECHO_REQUEST_TYPE = 8, }; enum icmp_codes { ICMP_ECHO_REQUEST_CODE = 0, }; enum icmpv6_types { ICMPV6_ECHO_REQUEST_TYPE = 128, }; enum icmpv6_codes { ICMPV6_ECHO_REQUEST_CODE = 0, }; struct rte_ipv4_hdr { uint8_t version_ihl; uint8_t type_of_service; uint16_t total_length; uint16_t packet_id; uint16_t fragment_offset; uint8_t time_to_live; uint8_t next_proto_id; uint16_t hdr_checksum; uint32_t src_addr; uint32_t dst_addr; } __attribute__((__packed__)); struct rte_ipv6_hdr { uint32_t vtc_flow; uint16_t payload_len; uint8_t proto; uint8_t hop_limits; uint8_t src_addr[16]; uint8_t dst_addr[16]; } __attribute__((__packed__)); struct rte_tcp_hdr { uint16_t src_port; uint16_t dst_port; uint32_t sent_seq; uint32_t recv_ack; uint8_t data_off; uint8_t tcp_flags; uint16_t rx_win; uint16_t cksum; uint16_t tcp_urp; } __attribute__((__packed__)); struct rte_udp_hdr { uint16_t src_port; uint16_t dst_port; uint16_t dgram_len; uint16_t dgram_cksum; } __attribute__((__packed__)); struct rte_icmp_hdr { uint8_t icmp_type; uint8_t icmp_code; uint16_t icmp_cksum; uint16_t icmp_ident; uint16_t icmp_seq_nb; } __attribute__((__packed__)); struct icmpv6_hdr { uint8_t icmpv6_type; uint8_t icmpv6_code; uint16_t icmpv6_cksum; } __attribute__((__packed__)); struct gt_packet_headers { uint16_t outer_ethertype; uint16_t inner_ip_ver; uint8_t l4_proto; uint8_t priority; uint8_t outer_ecn; uint16_t upper_len; void *l2_hdr; void *outer_l3_hdr; void *inner_l3_hdr; void *l4_hdr; bool frag; /* This struct has hidden fields. */ }; struct ip_flow { uint16_t proto; union { struct { uint32_t src; uint32_t dst; } v4; struct { uint8_t src[16]; uint8_t dst[16]; } v6; } f; }; struct ggu_granted { uint32_t tx_rate_kib_sec; uint32_t cap_expire_sec; uint32_t next_renewal_ms; uint32_t renewal_step_ms; } __attribute__ ((packed)); struct ggu_declined { uint32_t expire_sec; } __attribute__ ((packed)); struct gk_bpf_cookie { uint64_t mem[8]; }; struct ggu_bpf { uint32_t expire_sec; uint8_t program_index; uint8_t reserved; uint16_t cookie_len; struct gk_bpf_cookie cookie; } __attribute__ ((packed)); struct ggu_policy { uint8_t state; struct ip_flow flow; union { struct ggu_granted granted; struct ggu_declined declined; struct ggu_bpf bpf; } params; }; struct granted_params { uint32_t tx_rate_kib_sec; uint32_t next_renewal_ms; uint32_t renewal_step_ms; } __attribute__ ((packed)); struct grantedv2_params { uint32_t tx1_rate_kib_sec; uint32_t tx2_rate_kib_sec; uint32_t next_renewal_ms; uint32_t renewal_step_ms; bool direct_if_possible; } __attribute__ ((packed)); static const unsigned char TCPSRV_MAX_NUM_PORTS = 12; struct tcpsrv_ports { uint16_t p[TCPSRV_MAX_NUM_PORTS]; }; struct tcpsrv_params { uint32_t tx1_rate_kib_sec; uint32_t next_renewal_ms; uint32_t renewal_step_ms:24; uint8_t listening_port_count:4; uint8_t remote_port_count:4; struct tcpsrv_ports ports; } __attribute__ ((packed)); uint16_t gt_cpu_to_be_16(uint16_t x); uint32_t gt_cpu_to_be_32(uint32_t x); uint16_t gt_be_to_cpu_16(uint16_t x); uint32_t gt_be_to_cpu_32(uint32_t x); unsigned int gt_lcore_id(void); ]] c = ffi.C BPF_INDEX_GRANTED = 0 BPF_INDEX_DECLINED = 1 BPF_INDEX_GRANTEDV2 = 2 BPF_INDEX_WEB = 3 BPF_INDEX_TCPSRV = 4 function decision_granted_nobpf(policy, tx_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms) policy.state = c.GK_GRANTED policy.params.granted.tx_rate_kib_sec = tx_rate_kib_sec policy.params.granted.cap_expire_sec = cap_expire_sec policy.params.granted.next_renewal_ms = next_renewal_ms policy.params.granted.renewal_step_ms = renewal_step_ms return true end function decision_declined_nobpf(policy, expire_sec) policy.state = c.GK_DECLINED policy.params.declined.expire_sec = expire_sec return false end function decision_granted(policy, tx_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms) policy.state = c.GK_BPF policy.params.bpf.expire_sec = cap_expire_sec policy.params.bpf.program_index = BPF_INDEX_GRANTED policy.params.bpf.reserved = 0 policy.params.bpf.cookie_len = ffi.sizeof("struct granted_params") local params = ffi.cast("struct granted_params *", policy.params.bpf.cookie) params.tx_rate_kib_sec = tx_rate_kib_sec params.next_renewal_ms = next_renewal_ms params.renewal_step_ms = renewal_step_ms return true end function decision_declined(policy, expire_sec) policy.state = c.GK_BPF policy.params.bpf.expire_sec = expire_sec policy.params.bpf.program_index = BPF_INDEX_DECLINED policy.params.bpf.reserved = 0 policy.params.bpf.cookie_len = 0 return false end function decision_grantedv2_will_full_params(program_index, policy, tx1_rate_kib_sec, tx2_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms, direct_if_possible) policy.state = c.GK_BPF policy.params.bpf.expire_sec = cap_expire_sec policy.params.bpf.program_index = program_index policy.params.bpf.reserved = 0 policy.params.bpf.cookie_len = ffi.sizeof("struct grantedv2_params") local params = ffi.cast("struct grantedv2_params *", policy.params.bpf.cookie) params.tx1_rate_kib_sec = tx1_rate_kib_sec params.tx2_rate_kib_sec = tx2_rate_kib_sec params.next_renewal_ms = next_renewal_ms params.renewal_step_ms = renewal_step_ms params.direct_if_possible = direct_if_possible return true end local function ipairs_skip_first(a) local f, t, i = ipairs(a) return f, t, i + 1 end local function sort_unique(array) if #array < 2 then return end table.sort(array) -- Unique local prv_indx = 1 local prv_elem = array[prv_indx] for i, v in ipairs_skip_first(array) do if prv_elem ~= v then prv_indx = prv_indx + 1 prv_elem = v array[prv_indx] = prv_elem end if prv_indx < i then array[i] = nil end end end -- CAUTION: Do not refer to the arrays @listening_ports and @remote_ports -- once this function returns. function tcpsrv_ports(listening_ports, remote_ports) sort_unique(listening_ports) sort_unique(remote_ports) local total_ports = #listening_ports + #remote_ports if total_ports > c.TCPSRV_MAX_NUM_PORTS then error("There are " .. total_ports .. " ports; maximum of " .. c.TCPSRV_MAX_NUM_PORTS .. " ports") end local ret = { listening_port_count = #listening_ports, remote_port_count = #remote_ports, ports = listening_ports, } -- Padding. local pad_n = c.TCPSRV_MAX_NUM_PORTS - total_ports for i = 1, pad_n do table.insert(ret.ports, 0) end -- Add remote ports in reverse order. for i = #remote_ports, 1, -1 do table.insert(ret.ports, remote_ports[i]) end assert(#ret.ports == c.TCPSRV_MAX_NUM_PORTS) return ret end -- The BPF tcp-services.c only supports 12 (listening + remote) ports. -- The BPF tcp-services.c does not support idiosyncratic services like FTP. -- If you need more than 12 ports, or supporting idiosyncratic services, -- write a custom BPF following the BPF web.c example. function decision_tcpsrv(policy, tx1_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms, ports) policy.state = c.GK_BPF policy.params.bpf.expire_sec = cap_expire_sec policy.params.bpf.program_index = BPF_INDEX_TCPSRV policy.params.bpf.reserved = 0 policy.params.bpf.cookie_len = ffi.sizeof("struct tcpsrv_params") local params = ffi.cast("struct tcpsrv_params *", policy.params.bpf.cookie) params.tx1_rate_kib_sec = tx1_rate_kib_sec params.next_renewal_ms = next_renewal_ms params.renewal_step_ms = renewal_step_ms params.listening_port_count = ports.listening_port_count params.remote_port_count = ports.remote_port_count for i, v in ipairs(ports.ports) do params.ports.p[i - 1] = v end return true end -- The prototype of this function is compatible with decision_granted() to -- help testing it. Policies may prefer to call -- decision_grantedv2_will_full_params() instead. function decision_grantedv2(policy, tx_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms) return decision_grantedv2_will_full_params(BPF_INDEX_GRANTEDV2, policy, tx_rate_kib_sec, tx_rate_kib_sec * 0.05, -- 5% cap_expire_sec, next_renewal_ms, renewal_step_ms, false) end -- The prototype of this function is compatible with decision_granted() to -- help testing it. Policies may prefer to call -- decision_grantedv2_will_full_params() instead. function decision_web(policy, tx_rate_kib_sec, cap_expire_sec, next_renewal_ms, renewal_step_ms) return decision_grantedv2_will_full_params(BPF_INDEX_WEB, policy, tx_rate_kib_sec, tx_rate_kib_sec * 0.05, -- 5% cap_expire_sec, next_renewal_ms, renewal_step_ms, false) end -- There is no -> operator in Lua. The . operator works -- equivalently for accessing members of a struct AND -- accessing members of a struct through a reference. -- Therefore, the arguments to this function can be of type -- struct in6_addr or struct in6_addr &. function ipv6_addrs_equal(addr1, addr2) for i=0,15 do if addr1.s6_addr[i] ~= addr2.s6_addr[i] then return false end end return true end ```
/content/code_sandbox/lua/gatekeeper/policylib.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,850
```lua -- Gatekeeper - DDoS protection system. -- -- This program is free software: you can redistribute it and/or modify -- (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- -- along with this program. If not, see <path_to_url module("dylib", package.seeall) -- -- C functions exported through FFI -- local stdcdefs = require("gatekeeper/stdcdefs") local ffi = require("ffi") -- Structs ffi.cdef[[ static const uint16_t MSG_MAX_LEN = (uint16_t)~0U; static const int ETHER_ADDR_LEN = 6; enum gk_fib_action { GK_FWD_GRANTOR, GK_FWD_GATEWAY_FRONT_NET, GK_FWD_GATEWAY_BACK_NET, GK_FWD_NEIGHBOR_FRONT_NET, GK_FWD_NEIGHBOR_BACK_NET, GK_DROP, GK_FIB_MAX, }; struct rte_ether_addr { uint8_t addr_bytes[ETHER_ADDR_LEN]; } __attribute__((__packed__)); struct ipaddr { uint16_t proto; union { struct in_addr v4; struct in6_addr v6; } ip; }; struct fib_dump_addr_set { struct ipaddr grantor_ip; struct ipaddr nexthop_ip; struct rte_ether_addr d_addr; bool stale; }; struct gk_fib_dump_entry { struct ipaddr addr; int prefix_len; enum gk_fib_action action; unsigned int fib_id; unsigned int num_addr_sets; struct fib_dump_addr_set addr_sets[0]; }; struct gk_neighbor_dump_entry { bool stale; enum gk_fib_action action; struct ipaddr neigh_ip; struct rte_ether_addr d_addr; }; struct lls_dump_entry { bool stale; uint16_t port_id; struct ipaddr addr; struct rte_ether_addr ha; }; ]] -- Functions and wrappers ffi.cdef[[ int add_fib_entry(const char *prefix, const char *gt_ip, const char *gw_ip, enum gk_fib_action action, struct gk_config *gk_conf); int del_fib_entry(const char *ip_prefix, struct gk_config *gk_conf); int gk_flush_flow_table(const char *src_prefix, const char *dst_prefix, struct gk_config *gk_conf); int gk_log_flow_state(const char *src_addr, const char *dst_addr, struct gk_config *gk_conf); int gk_load_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index, const char *filename, int jit); int gk_unload_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index); ]] c = ffi.C function fib_action_to_str(fib_action) local res if fib_action == c.GK_FWD_GRANTOR then res = "FWD_GRANTOR (0)" elseif fib_action == c.GK_FWD_GATEWAY_FRONT_NET then res = "FWD_GATEWAY_FRONT_NET (1)" elseif fib_action == c.GK_FWD_GATEWAY_BACK_NET then res = "FWD_GATEWAY_BACK_NET (2)" elseif fib_action == c.GK_FWD_NEIGHBOR_FRONT_NET then res = "FWD_NEIGHBOR_FRONT_NET (3)" elseif fib_action == c.GK_FWD_NEIGHBOR_BACK_NET then res = "FWD_NEIGHBOR_BACK_NET (4)" elseif fib_action == c.GK_DROP then res = "DROP (5)" else res = "INVALID (" .. tostring(fib_action) .. ")" end return res end -- Bound the output of the FIB table to the maximum size of -- a message of the dynamic configuration block. function bound_fib_dump_output(acc) local next_stop = acc.next_stop local total_rows = #acc if next_stop == nil then next_stop = 1000 acc.next_stop = next_stop acc.total_rows = 0 end if total_rows < next_stop then return false, acc end if acc.total_rows ~= 0 then -- Subtract one because acc[1] is -- the concatenated output of acc.total_rows rows. total_rows = acc.total_rows + total_rows - 1 end local output = table.concat(acc) local output_len = string.len(output) acc = { [1] = output } -- Free previous acc. if output_len >= c.MSG_MAX_LEN then return true, acc end -- Find the new acc.next_stop local avg_len_per_row = output_len / total_rows next_stop = math.ceil( ((c.MSG_MAX_LEN - output_len) / avg_len_per_row) -- Add 1% to next_stop to increase the chance that -- the next stop is the last stop. * 1.01) if next_stop <= 0 then next_stop = 1 end -- Add one because acc already includes acc[1]. acc.next_stop = next_stop + 1 acc.total_rows = total_rows return false, acc end -- The following is an example function that can be used as -- the callback function of list_gk_fib4() and list_gk_fib6(). -- Parameter fib_dump_entry is going to be released after -- print_fib_dump_entry() returns, so don't keep references to fib_dump_entry -- or any of the data reachable through its fields. function print_fib_dump_entry(fib_dump_entry, acc) acc[#acc + 1] = "FIB entry for IP prefix: " acc[#acc + 1] = dylib.ip_format_addr(fib_dump_entry.addr) acc[#acc + 1] = "/" acc[#acc + 1] = tostring(fib_dump_entry.prefix_len) acc[#acc + 1] = " with action " acc[#acc + 1] = fib_action_to_str(fib_dump_entry.action) for i = 0,fib_dump_entry.num_addr_sets - 1,1 do if fib_dump_entry.action == c.GK_FWD_GRANTOR then acc[#acc + 1] = "\n\tGrantor IP address: " acc[#acc + 1] = dylib.ip_format_addr( fib_dump_entry.addr_sets[i].grantor_ip) end acc[#acc + 1] = "\n\tEthernet cache entry: [state: " acc[#acc + 1] = fib_dump_entry.addr_sets[i].stale and "stale" or "fresh" acc[#acc + 1] = ", nexthop ip: " acc[#acc + 1] = dylib.ip_format_addr( fib_dump_entry.addr_sets[i].nexthop_ip) acc[#acc + 1] = ", d_addr: " acc[#acc + 1] = dylib.ether_format_addr( fib_dump_entry.addr_sets[i].d_addr) acc[#acc + 1] = "]" end acc[#acc + 1] = "\n" return bound_fib_dump_output(acc) end -- The following is an example function that can be used as -- the callback function of list_gk_neighbors4() and list_gk_neighbors6(). -- Parameter neighbor_dump_entry is going to be released after -- print_neighbor_dump_entry() returns, so don't keep references to -- neighbor_dump_entry or any of the data reachable through its fields. function print_neighbor_dump_entry(neighbor_dump_entry, acc) acc[#acc + 1] = "Neighbor Ethernet cache entry: [state: " acc[#acc + 1] = neighbor_dump_entry.stale and "stale" or "fresh" acc[#acc + 1] = ", neighbor ip: " acc[#acc + 1] = dylib.ip_format_addr(neighbor_dump_entry.neigh_ip) acc[#acc + 1] = ", d_addr: " acc[#acc + 1] = dylib.ether_format_addr(neighbor_dump_entry.d_addr) acc[#acc + 1] = ", action: " acc[#acc + 1] = fib_action_to_str(neighbor_dump_entry.action) acc[#acc + 1] = "]\n" return acc end -- The following is an example function that can be used as -- the callback function of list_lls_arp() and list_lls_nd(). -- Parameter lls_dump_entry is going to be released after -- print_lls_dump_entry() returns, so don't keep references to -- lls_dump_entry or any of the data reachable through its fields. function print_lls_dump_entry(lls_dump_entry, acc) acc[#acc + 1] = "LLS cache entry: [state: " acc[#acc + 1] = lls_dump_entry.stale and "stale" or "fresh" acc[#acc + 1] = ", ip: " acc[#acc + 1] = dylib.ip_format_addr(lls_dump_entry.addr) acc[#acc + 1] = ", mac: " acc[#acc + 1] = dylib.ether_format_addr(lls_dump_entry.ha) acc[#acc + 1] = ", port: " acc[#acc + 1] = tostring(lls_dump_entry.port_id) acc[#acc + 1] = "]\n" return acc end function update_gt_lua_states_incrementally(gt_conf, lua_code, is_returned) return dylib.internal_update_gt_lua_states_incrementally(gt_conf, string.dump(lua_code), is_returned) end ```
/content/code_sandbox/lua/gatekeeper/dylib.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,124
```lua require "gatekeeper/staticlib" local reply_msg = "" local dyc = staticlib.c.get_dy_conf() if dyc.gt ~= nil then local function example() print("Hello Gatekeeper!") end dylib.update_gt_lua_states(dyc.gt) dylib.update_gt_lua_states_incrementally(dyc.gt, example, false) return "gt: successfully updated the lua states\n" end local ret = dylib.c.add_fib_entry("198.51.100.0/25", "203.0.113.1", "10.0.2.253", dylib.c.GK_FWD_GRANTOR, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end ret = dylib.c.del_fib_entry("198.51.100.0/25", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end -- Load balancing to multiple Grantor servers, -- where one Grantor is weighted twice as much. addrs = { { gt_ip = '203.0.113.2', gw_ip = '10.0.2.252' }, { gt_ip = '203.0.113.3', gw_ip = '10.0.2.251' }, { gt_ip = '203.0.113.4', gw_ip = '10.0.2.250' }, { gt_ip = '203.0.113.4', gw_ip = '10.0.2.250' } } dylib.add_grantor_entry_lb("198.51.100.0/25", addrs, dyc.gk) -- Update to make one Grantor weighted 3x as much as the other. addrs[1] = { gt_ip = '203.0.113.4', gw_ip = '10.0.2.250' } dylib.update_grantor_entry_lb("198.51.100.0/25", addrs, dyc.gk) -- Examples of temporarily changing global and block log levels. local old_log_level = staticlib.c.rte_log_get_global_level() staticlib.c.rte_log_set_global_level(staticlib.c.RTE_LOG_ERR) ret = staticlib.c.set_log_level_per_block("CPS", staticlib.c.RTE_LOG_ERR) if ret < 1 then return "cps: failed to set new log level" end ret = dylib.c.add_fib_entry("198.51.100.128/25", nil, nil, dylib.c.GK_DROP, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end ret = dylib.c.add_fib_entry("192.0.2.0/24", nil, "10.0.2.254", dylib.c.GK_FWD_GATEWAY_BACK_NET, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end -- Revert global log level. staticlib.c.rte_log_set_global_level(old_log_level) -- Revert CPS log level. local cpsc = staticlib.c.get_cps_conf() if cpsc == nil then return "cps: failed to fetch config to revert log level" end ret = staticlib.c.set_log_level_per_block("CPS", cpsc.log_level) if ret < 1 then return "cps: failed to revert to original log level" end ret = dylib.c.add_fib_entry("198.18.0.0/15", nil, "10.0.1.254", dylib.c.GK_FWD_GATEWAY_FRONT_NET, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end local ret = dylib.c.add_fib_entry("2001:db8:3::/48", "2001:db8:0::1", "2001:db8:2::253", dylib.c.GK_FWD_GRANTOR, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end ret = dylib.c.add_fib_entry("2001:db8:4::/48", nil, "2001:db8:2::253", dylib.c.GK_FWD_GATEWAY_BACK_NET, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end ret = dylib.c.add_fib_entry("2001:db8:5::/48", nil, "2001:db8:1::253", dylib.c.GK_FWD_GATEWAY_FRONT_NET, dyc.gk) if ret < 0 then return "gk: failed to add an FIB entry\n" end local function list_fib_neighbors() reply_msg = reply_msg .. table.concat(dylib.list_gk_fib4(dyc.gk, dylib.print_fib_dump_entry, {})) .. table.concat(dylib.list_gk_fib6(dyc.gk, dylib.print_fib_dump_entry, {})) .. table.concat(dylib.list_gk_neighbors4(dyc.gk, dylib.print_neighbor_dump_entry, {})) .. table.concat(dylib.list_gk_neighbors6(dyc.gk, dylib.print_neighbor_dump_entry, {})) end list_fib_neighbors() ret = dylib.c.del_fib_entry("198.51.100.0/25", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("198.51.100.128/25", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("192.0.2.0/24", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("198.18.0.0/15", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("2001:db8:3::/48", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("2001:db8:4::/48", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end ret = dylib.c.del_fib_entry("2001:db8:5::/48", dyc.gk) if ret < 0 then return "gk: failed to delete an FIB entry\n" end list_fib_neighbors() local llsc = staticlib.c.get_lls_conf() if llsc == nil then return "lls: failed to fetch config to dump caches" end reply_msg = reply_msg .. table.concat(dylib.list_lls_arp(llsc, dylib.print_lls_dump_entry, {})) .. table.concat(dylib.list_lls_nd(llsc, dylib.print_lls_dump_entry, {})) ret = dylib.c.gk_log_flow_state("198.51.100.0", "192.0.2.0", dyc.gk) if ret < 0 then return "gk: failed to log the flow state\n" end ret = dylib.c.gk_log_flow_state("2001:db8:3::", "2001:db8:5::", dyc.gk) if ret < 0 then return "gk: failed to log the flow state\n" end ret = dylib.c.gk_flush_flow_table("198.51.100.0/25", "192.0.2.0/24", dyc.gk) if ret < 0 then return "gk: failed to flush the flow table\n" end ret = dylib.c.gk_flush_flow_table("2001:db8:3::/48", "2001:db8:5::/48", dyc.gk) if ret < 0 then return "gk: failed to flush the flow table\n" end ret = dylib.c.gk_load_bpf_flow_handler(dyc.gk, 255, "bpf/granted.bpf", true) if ret < 0 then -- The error below may be triggered for a number reasons, -- the reasons below should be the most common ones: -- -- 1. Running Gatekeeper in a folder different from -- the root of the repository requires to adjust the path passed -- to dylib.c.gk_load_bpf_flow_handler(); -- -- 2. The BPF programs in folder ROOT_OF_REPOSITORY/bpf are -- not compiled. return "gk: failed to load a BPF program in runtime" end ret = dylib.c.gk_unload_bpf_flow_handler(dyc.gk, 255) if ret < 0 then return "gk: failed to unload a BPF program in runtime" end return "gk: successfully processed all the FIB entries\n" .. reply_msg ```
/content/code_sandbox/lua/examples/example_of_dynamic_config_request.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,064
```lua local policylib = require("gatekeeper/policylib") local ffi = require("ffi") local function dcs_default(policy) return policylib.decision_granted(policy, 1024, -- tx_rate_kib_sec 300, -- cap_expire_sec 240000, -- next_renewal_ms 3000) -- renewal_step_ms end local function dcs_malformed(policy) return policylib.decision_declined(policy, 10) end local function dcs_declined(policy) return policylib.decision_declined(policy, 60) end local function dcs_friendly(policy) return policylib.decision_granted(policy, 2048, -- tx_rate_kib_sec 600, -- cap_expire_sec 540000, -- next_renewal_ms 3000) -- renewal_step_ms end local groups = { [1] = dcs_friendly, [253] = dcs_declined, [254] = dcs_malformed, [255] = dcs_default, } -- The following defines simple policies without a LPM table. local simple_policy = { [policylib.c.IPV4] = { -- Loosely assume that TCP and UDP ports are equivalents -- to simplify this example. [policylib.c.gt_cpu_to_be_16(80)] = dcs_friendly, }, [policylib.c.IPV6] = { -- Loosely assume that TCP and UDP ports are equivalents -- to simplify this example. [policylib.c.gt_cpu_to_be_16(80)] = dcs_friendly, }, } -- Function that looks up the simple policy for the packet. local function lookup_simple_policy(pkt_info) if pkt_info.frag then return dcs_malformed end if pkt_info.inner_ip_ver == policylib.c.IPV4 and pkt_info.l4_proto == policylib.c.ICMP then if pkt_info.upper_len < ffi.sizeof("struct rte_icmp_hdr") then return dcs_malformed end local ipv4_hdr = ffi.cast("struct rte_ipv4_hdr *", pkt_info.inner_l3_hdr) local icmp_hdr = ffi.cast("struct rte_icmp_hdr *", pkt_info.l4_hdr) local icmp_type = icmp_hdr.icmp_type local icmp_code = icmp_hdr.icmp_code -- Disable traceroute through ICMP into network. if ipv4_hdr.time_to_live < 16 and icmp_type == policylib.c.ICMP_ECHO_REQUEST_TYPE and icmp_code == policylib.c.ICMP_ECHO_REQUEST_CODE then return dcs_declined end return dcs_default end if pkt_info.inner_ip_ver == policylib.c.IPV6 and pkt_info.l4_proto == policylib.c.ICMPV6 then if pkt_info.upper_len < ffi.sizeof("struct icmpv6_hdr") then return dcs_malformed end local ipv6_hdr = ffi.cast("struct rte_ipv6_hdr *", pkt_info.inner_l3_hdr) local icmpv6_hdr = ffi.cast("struct icmpv6_hdr *", pkt_info.l4_hdr) local icmpv6_type = icmpv6_hdr.icmpv6_type local icmpv6_code = icmpv6_hdr.icmpv6_code -- Disable traceroute through ICMPV6 into network. if ipv6_hdr.hop_limits < 16 and icmpv6_type == policylib.c.ICMPV6_ECHO_REQUEST_TYPE and icmpv6_code == policylib.c.ICMPV6_ECHO_REQUEST_CODE then return dcs_declined end return dcs_default end local l3_policy = simple_policy[pkt_info.inner_ip_ver] if l3_policy == nil then return nil end if pkt_info.l4_proto == policylib.c.TCP then if pkt_info.upper_len < ffi.sizeof("struct rte_tcp_hdr") then return dcs_malformed end local tcphdr = ffi.cast("struct rte_tcp_hdr *", pkt_info.l4_hdr) return l3_policy[tcphdr.dst_port] end if pkt_info.l4_proto == policylib.c.UDP then if pkt_info.upper_len < ffi.sizeof("struct rte_udp_hdr") then return dcs_malformed end local udphdr = ffi.cast("struct rte_udp_hdr *", pkt_info.l4_hdr) return l3_policy[udphdr.dst_port] end return nil end -- The following defines the LPM policies. local scaling_factor_rules = 2 local scaling_factor_tbl8s = 2 -- Estimate the number of rules and number of tbl8s in DPDK LPM library. local function lpm_para_estimate(ipv4_file) local num_rules = 0 local num_tbl8s = 0 local prefixes = {} for line in io.lines(ipv4_file) do local ip_addr, prefix_len = lpmlib.str_to_prefix(line) num_rules = num_rules + 1 num_tbl8s = num_tbl8s + lpmlib.lpm_add_tbl8s(ip_addr, prefix_len, prefixes) end return num_rules, num_tbl8s end -- Estimate the number of rules and number of tbl8s in DPDK LPM6 library. local function lpm6_para_estimate(ipv6_file) local num_rules = 0 local num_tbl8s = 0 local prefixes = {} for line in io.lines(ipv6_file) do local ip6_addr, prefix_len = lpmlib.str_to_prefix6(line) num_rules = num_rules + 1 num_tbl8s = num_tbl8s + lpmlib.lpm6_add_tbl8s(ip6_addr, prefix_len, prefixes) end return num_rules, num_tbl8s end -- This file only contains an example set of Bogons IPv4 lists -- downloaded from path_to_url local bogons_ipv4_file = "lua/examples/bogons-ipv4.txt" local num_ipv4_rules, num_ipv4_tbl8s = lpm_para_estimate(bogons_ipv4_file) num_ipv4_rules = math.max(1, scaling_factor_rules * num_ipv4_rules) num_ipv4_tbl8s = math.max(1, scaling_factor_tbl8s * num_ipv4_tbl8s) -- This variable is made global, so that the example() function in -- lua/examples/example_gt_lpm_params_request.lua can access it. lpm = lpmlib.new_lpm(num_ipv4_rules, num_ipv4_tbl8s) -- This file only contains an example set of Bogons IPv6 lists -- downloaded from path_to_url local bogons_ipv6_file = "lua/examples/bogons-ipv6.txt" local num_ipv6_rules, num_ipv6_tbl8s = lpm6_para_estimate(bogons_ipv6_file) num_ipv6_rules = math.max(1, scaling_factor_rules * num_ipv6_rules) num_ipv6_tbl8s = math.max(1, scaling_factor_tbl8s * num_ipv6_tbl8s) local lpm6 = lpmlib.new_lpm6(num_ipv6_rules, num_ipv6_tbl8s) for line in io.lines(bogons_ipv4_file) do local ip_addr, prefix_len = lpmlib.str_to_prefix(line) lpmlib.lpm_add(lpm, ip_addr, prefix_len, 253) end for line in io.lines(bogons_ipv6_file) do local ip_addr, prefix_len = lpmlib.str_to_prefix6(line) lpmlib.lpm6_add(lpm6, ip_addr, prefix_len, 253) end -- Example global IP addresses for special cases in policy. ipv6_addr_ex, _ = lpmlib.str_to_prefix6("2001:219::1/128") ipv4_addr_ex, _ = lpmlib.str_to_prefix("41.78.176.1/32") local function lookup_lpm_policy(pkt_info) if pkt_info.inner_ip_ver == policylib.c.IPV4 then local ipv4_hdr = ffi.cast("struct rte_ipv4_hdr *", pkt_info.inner_l3_hdr) if ipv4_hdr.dst_addr == ipv4_addr_ex then return nil end local policy_id = lpmlib.lpm_lookup(lpm, ipv4_hdr.src_addr) if policy_id < 0 then return nil end return groups[policy_id] end if pkt_info.inner_ip_ver == policylib.c.IPV6 then local ipv6_hdr = ffi.cast("struct rte_ipv6_hdr *", pkt_info.inner_l3_hdr) local dst_addr = ffi.cast("struct in6_addr &", ipv6_hdr.dst_addr) if policylib.ipv6_addrs_equal(dst_addr, ipv6_addr_ex) then return nil end local src_addr = ffi.cast("struct in6_addr &", ipv6_hdr.src_addr) local policy_id = lpmlib.lpm6_lookup(lpm6, src_addr) if policy_id < 0 then return nil end return groups[policy_id] end return nil end function lookup_policy(pkt_info, policy) local group group = lookup_lpm_policy(pkt_info) if group == nil then group = lookup_simple_policy(pkt_info) end if group == nil then group = dcs_default end return group(policy) end --[[ Flows associated with fragments that have to be discarded before being fully assembled must be punished. Otherwise, an attacker could overflow the request channel with fragments that never complete, and policies wouldn't be able to do anything about it because they would not be aware of these fragments. The punishment is essentially a policy decision stated in the configuration files to be applied to these cases. For example, decline the flow for 10 minutes. --]] function lookup_frag_punish_policy(policy) return policylib.decision_declined(policy, 600) end ```
/content/code_sandbox/lua/examples/policy.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,229
```lua -- Gatekeeper - DDoS protection system. -- -- This program is free software: you can redistribute it and/or modify -- (at your option) any later version. -- -- This program is distributed in the hope that it will be useful, -- but WITHOUT ANY WARRANTY; without even the implied warranty of -- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -- -- along with this program. If not, see <path_to_url module("staticlib", package.seeall) -- -- Functions to allocate lcores -- function get_numa_table (net_conf) local numa_table = {} for _, lcore in ipairs(list_lcores()) do local socket_id = rte_lcore_to_socket_id(lcore) local t = numa_table[socket_id] if t == nil then numa_table[socket_id] = {lcore} else table.insert(t, lcore) end end numa_table["__net_conf"] = net_conf return numa_table end function split_array (array, split_pos) local a1 = {} local a2 = {} for i, v in ipairs(array) do if i <= split_pos then table.insert(a1, v) else table.insert(a2, v) end end if next(a2) == nil then -- a2 is empty. a2 = nil end return a1, a2 end -- This iterator works like ipairs(), but -- (1) it skips nil entries instead of stopping, and -- (2) it starts at index zero instead of one. function all_ipairs (a) return function (last_index, cur_index) while true do cur_index = cur_index + 1 if cur_index > last_index then return nil end local ret = a[cur_index] if ret ~= nil then return cur_index, ret end end end, table.maxn(a), -1 end local function alloc_lcores_at_numa (numa_table, numa, n) local a1, a2 = split_array(numa_table[numa], n) numa_table[numa] = a2 numa_table["__net_conf"].numa_used[numa] = true return a1 end function alloc_lcores_from_same_numa (numa_table, n) for numa, lcores in all_ipairs(numa_table) do if #lcores >= n then return alloc_lcores_at_numa(numa_table, numa, n) end end return nil end function alloc_an_lcore (numa_table) local lcore_t = alloc_lcores_from_same_numa(numa_table, 1) if lcore_t == nil then error("There is not enough lcores"); end return lcore_t[1] end function count_numa_nodes (numa_table) local count = 0 for numa, lcores in all_ipairs(numa_table) do count = count + 1 end return count end function alloc_lcores_evenly_from_all_numa_nodes (numa_table, n, fixed_lcores_per_numa) local num_numa_nodes = count_numa_nodes(numa_table) local q = n / num_numa_nodes local r = n % num_numa_nodes local i = 0 local res = {["__net_conf"] = numa_table["__net_conf"], } for numa, lcores in all_ipairs(numa_table) do local lcores_needed = q + ((i < r) and 1 or 0) if lcores_needed > 0 then lcores_needed = lcores_needed + fixed_lcores_per_numa else break end if #lcores >= lcores_needed then res[numa] = alloc_lcores_at_numa(numa_table, numa, lcores_needed) else error("There is not enough lcores"); end i = i + 1 end return res end local function append_array (a, b) for i, v in ipairs(b) do table.insert(a, v) end end function convert_numa_table_to_array (numa_table) local res = {} for numa, lcores in all_ipairs(numa_table) do append_array(res, lcores) end return res end function gk_sol_map (gk_lcores, sol_lcores) local m = {} local sol_allocated = {} if #gk_lcores % #sol_lcores ~= 0 then print("Warning: uneven GK-to-SOL blocks assignment"); end for i, v in ipairs(sol_lcores) do sol_allocated[i] = 0 end for i1, v1 in ipairs(gk_lcores) do local idx local socket_id = rte_lcore_to_socket_id(v1) for i2, v2 in ipairs(sol_lcores) do if rte_lcore_to_socket_id(v2) == socket_id and (idx == nil or sol_allocated[i2] < sol_allocated[idx]) then idx = i2 end end if idx == nil then error("No SOL block allocated at NUMA node " .. socket_id) end m[i1] = idx sol_allocated[idx] = sol_allocated[idx] + 1 end for i, v in ipairs(sol_allocated) do if v == 0 then print("Warning: SOL block at lcore " .. sol_lcores[i] .. " has zero GK block allocated to it"); end end return m end function print_lcore_array (array) io.write("Array: ") for i, v in ipairs(array) do io.write("[", i, "]=", v, "\t") end io.write("\n") end function print_numa_table (numa_table) for numa, lcores in all_ipairs(numa_table) do io.write("NUMA ", numa, ":\t") for _, lcore in ipairs(lcores) do io.write(lcore, "\t") end io.write("\n") end end -- -- C functions exported through FFI -- local ffi = require("ffi") -- Structs ffi.cdef[[ enum log_levels { /* Corresponding to the values in rte_log.h. */ RTE_LOG_EMERG = 1U, /* System is unusable. */ RTE_LOG_ALERT = 2U, /* Action must be taken immediately. */ RTE_LOG_CRIT = 3U, /* Critical conditions. */ RTE_LOG_ERR = 4U, /* Error conditions. */ RTE_LOG_WARNING = 5U, /* Warning conditions. */ RTE_LOG_NOTICE = 6U, /* Normal but significant condition. */ RTE_LOG_INFO = 7U, /* Informational. */ RTE_LOG_DEBUG = 8U, /* Debug-level messages. */ }; enum bonding_modes { /* Corresponding to the values in rte_eth_bond.h. */ BONDING_MODE_ROUND_ROBIN = 0, BONDING_MODE_ACTIVE_BACKUP = 1, BONDING_MODE_BALANCE = 2, BONDING_MODE_BROADCAST = 3, BONDING_MODE_8023AD = 4, BONDING_MODE_TLB = 5, BONDING_MODE_ALB = 6, }; enum file_modes { /* RWX mask for owner. */ S_IRWXU = 0000700, /* R for owner. */ S_IRUSR = 0000400, /* W for owner. */ S_IWUSR = 0000200, /* X for owner. */ S_IXUSR = 0000100, /* RWX mask for group. */ S_IRWXG = 0000070, /* R for group. */ S_IRGRP = 0000040, /* W for group. */ S_IWGRP = 0000020, /* X for group. */ S_IXGRP = 0000010, /* RWX mask for other. */ S_IRWXO = 0000007, /* R for other. */ S_IROTH = 0000004, /* W for other. */ S_IWOTH = 0000002, /* X for other. */ S_IXOTH = 0000001, /* Set user id on execution. */ S_ISUID = 0004000, /* Set group id on execution. */ S_ISGID = 0002000, /* Save swapped text even after use. */ S_ISVTX = 0001000, }; struct gatekeeper_if { char **pci_addrs; uint8_t num_ports; char *name; uint16_t num_rx_queues; uint16_t num_tx_queues; uint16_t total_pkt_burst; uint32_t arp_cache_timeout_sec; uint32_t nd_cache_timeout_sec; uint32_t bonding_mode; int vlan_insert; uint16_t mtu; uint8_t ipv6_default_hop_limits; uint16_t num_rx_desc; uint16_t num_tx_desc; bool ipv4_hw_udp_cksum; bool ipv6_hw_udp_cksum; bool ipv4_hw_cksum; bool guarantee_random_entropy; bool alternative_rss_hash; /* This struct has hidden fields. */ }; struct net_config { int back_iface_enabled; bool *numa_used; uint32_t log_level; uint32_t rotate_log_interval_sec; /* This struct has hidden fields. */ }; struct gk_config { unsigned int flow_ht_size; unsigned int flow_ht_max_probes; double flow_ht_scale_num_bucket; unsigned int max_num_ipv4_rules; unsigned int num_ipv4_tbl8s; unsigned int max_num_ipv6_rules; unsigned int num_ipv6_tbl8s; unsigned int max_num_ipv6_neighbors; unsigned int flow_table_scan_iter; unsigned int scan_del_thresh; uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; uint32_t front_icmp_msgs_per_sec; uint32_t front_icmp_msgs_burst; uint32_t back_icmp_msgs_per_sec; uint32_t back_icmp_msgs_burst; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; unsigned int basic_measurement_logging_ms; uint8_t fib_dump_batch_size; /* This struct has hidden fields. */ }; struct ggu_config { unsigned int lcore_id; uint16_t ggu_src_port; uint16_t ggu_dst_port; uint16_t max_pkt_burst; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; /* This struct has hidden fields. */ }; struct lls_config { unsigned int lcore_id; uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; unsigned int mailbox_max_pkt_sub; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; unsigned int max_num_cache_records; unsigned int cache_scan_interval_sec; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; uint32_t front_icmp_msgs_per_sec; uint32_t front_icmp_msgs_burst; uint32_t back_icmp_msgs_per_sec; uint32_t back_icmp_msgs_burst; /* This struct has hidden fields. */ }; struct gt_config { uint16_t ggu_src_port; uint16_t ggu_dst_port; int max_num_ipv6_neighbors; uint32_t frag_scan_timeout_ms; uint32_t frag_bucket_num; uint32_t frag_bucket_entries; uint32_t frag_max_entries; uint32_t frag_max_flow_ttl_ms; uint16_t max_pkt_burst; unsigned int batch_interval; unsigned int max_ggu_notify_pkts; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; bool reassembling_enabled; /* This struct has hidden fields. */ }; struct cps_config { unsigned int lcore_id; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; uint16_t front_max_pkt_burst; uint16_t back_max_pkt_burst; uint16_t kni_queue_size; unsigned int max_rt_update_pkts; unsigned int scan_interval_sec; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; uint32_t nl_pid; unsigned int arp_max_entries_exp; unsigned int nd_max_entries_exp; /* This struct has hidden fields. */ }; struct dynamic_config { unsigned int lcore_id; struct gk_config *gk; struct gt_config *gt; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; unsigned int mailbox_max_entries_exp; unsigned int mailbox_mem_cache_size; unsigned int mailbox_burst_size; /* This struct has hidden fields. */ }; struct sol_config { unsigned int pri_req_max_len; unsigned int enq_burst_size; unsigned int deq_burst_size; double tb_rate_approx_err; double req_channel_bw_mbps; uint32_t log_level; uint32_t log_ratelimit_interval_ms; uint32_t log_ratelimit_burst; /* This struct has hidden fields. */ }; ]] -- Functions and wrappers ffi.cdef[[ /* * These functions are meant to help one to investigate issues that * depend on the internals of DPDK. */ void rte_log_set_global_level(uint32_t log_level); uint32_t rte_log_get_global_level(void); int rte_log_set_level(uint32_t type, uint32_t level); int rte_log_get_level(uint32_t type); /* Functions to change the log level of functional blocks. */ int set_log_level_per_block(const char *block_name, uint32_t log_level); int set_log_level_per_lcore(unsigned int lcore_id, uint32_t log_level); int lua_init_iface(struct gatekeeper_if *iface, const char *iface_name, const char **pci_addrs, uint8_t num_pci_addrs, const char **ip_cidrs, uint8_t num_ip_cidrs, uint16_t ipv4_vlan_tag, uint16_t ipv6_vlan_tag); bool ipv4_configured(struct net_config *net_conf); bool ipv6_configured(struct net_config *net_conf); struct net_config *get_net_conf(void); struct gatekeeper_if *get_if_front(struct net_config *net_conf); struct gatekeeper_if *get_if_back(struct net_config *net_conf); int gatekeeper_setup_user(struct net_config *net_conf, const char *user); int gatekeeper_init_network(struct net_config *net_conf); struct gk_config *alloc_gk_conf(void); int gk_load_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index, const char *filename, int jit); int run_gk(struct net_config *net_conf, struct gk_config *gk_conf, struct sol_config *sol_conf); struct ggu_config *alloc_ggu_conf(unsigned int lcore); int run_ggu(struct net_config *net_conf, struct gk_config *gk_conf, struct ggu_config *ggu_conf); int cleanup_ggu(struct ggu_config *ggu_conf); struct lls_config *get_lls_conf(void); int run_lls(struct net_config *net_conf, struct lls_config *lls_conf); struct gt_config *alloc_gt_conf(void); int run_gt(struct net_config *net_conf, struct gt_config *gt_conf, const char *lua_base_directory, const char *lua_policy_file); struct cps_config *get_cps_conf(void); int run_cps(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, struct cps_config *cps_conf, struct lls_config *lls_conf); struct dynamic_config *get_dy_conf(void); void set_dyc_timeout(unsigned sec, unsigned usec, struct dynamic_config *dy_conf); int run_dynamic_config(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, const char *server_path, const char *lua_dy_base_dir, const char *dynamic_config_file, struct dynamic_config *dy_conf, int mode); struct sol_config *alloc_sol_conf(void); int run_sol(struct net_config *net_conf, struct sol_config *sol_conf); ]] c = ffi.C -- -- Network configuration functions -- local ifaces = require("if_map") function check_ifaces(front_ports, back_ports) for i1, v1 in ipairs(front_ports) do pci1 = ifaces[v1] if pci1 == nil then error("There is no map for " .. v1 .. " in the front interface configuration") end for i2, v2 in ipairs(back_ports) do pci2 = ifaces[v2] if pci2 == nil then error("There is no map for " .. v2 .. " in the back interface configuration") end if pci1 == pci2 then error("Configured interfaces on the front [" .. v1 .. " (" .. pci1 .. ")] and back [" .. v2 .. " (" .. pci2 .. ")] are the same") end end end end function init_iface(iface, name, ports, cidrs, ipv4_vlan_tag, ipv6_vlan_tag) local pci_strs = ffi.new("const char *[" .. #ports .. "]") for i, v in ipairs(ports) do local pci_addr = ifaces[v] if pci_addr == nil then error("There is no map for interface " .. v) end for i2, v2 in ipairs(ports) do if i2 > i and pci_addr == ifaces[v2] then error("Duplicate interfaces: " .. v .. " and " .. v2 .. " map to the same PCI address (" .. pci_addr .. ") in the " .. name .. " configuration") end end pci_strs[i - 1] = pci_addr end local ip_cidrs = ffi.new("const char *[" .. #cidrs .. "]") for i, v in ipairs(cidrs) do ip_cidrs[i - 1] = v end local ret = c.lua_init_iface(iface, name, pci_strs, #ports, ip_cidrs, #cidrs, ipv4_vlan_tag, ipv6_vlan_tag) if ret < 0 then error("Failed to initilialize " .. name .. " interface") end return ret end function get_front_burst_config(max_pkt_burst_front, net_conf) local front_iface = c.get_if_front(net_conf) return math.max(max_pkt_burst_front, front_iface.num_ports) end function get_back_burst_config(max_pkt_burst_back, net_conf) if not net_conf.back_iface_enabled then error("One can only have max_pkt_burst_back when the back network is enabled") end local back_iface = c.get_if_back(net_conf) return math.max(max_pkt_burst_back, back_iface.num_ports) end ```
/content/code_sandbox/lua/gatekeeper/staticlib.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
4,284
```lua require "gatekeeper/staticlib" require "gatekeeper/policylib" local dyc = staticlib.c.get_dy_conf() if dyc.gt == nil then return "Gatekeeper: failed to run as Grantor server\n" end -- The function assumes that the variable lpm, an IPv4 LPM table, -- is globally available in the policy as in the policy example. local function example() local max_rules, number_tbl8s = lpmlib.lpm_get_paras(lpm) return policylib.c.gt_lcore_id() .. ":" .. max_rules .. "," .. number_tbl8s .. "\n" end local reply_msg = dylib.update_gt_lua_states_incrementally( dyc.gt, example, true) return "gt: successfully updated the lua states\n" .. "The returned message is: " .. reply_msg ```
/content/code_sandbox/lua/examples/example_gt_lpm_params_request.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
178
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gt == nil then return "No GT block available; not a Grator server" end dylib.update_gt_lua_states(dyc.gt) return "Successfully reloaded the Lua policy" ```
/content/code_sandbox/gkctl/scripts/reload_policy.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
78
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end local function print_only_grantor(fib_dump_entry, acc) if fib_dump_entry.action ~= dylib.c.GK_FWD_GRANTOR then return false, acc end return dylib.print_fib_dump_entry(fib_dump_entry, acc) end return table.concat(dylib.list_gk_fib4(dyc.gk, print_only_grantor, {})) ```
/content/code_sandbox/gkctl/scripts/show_fib_grantor.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
138
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end return table.concat(dylib.list_gk_fib6(dyc.gk, dylib.print_fib_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_fib6.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
86
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end return table.concat(dylib.list_gk_neighbors4(dyc.gk, dylib.print_neighbor_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_neigh.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
85
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end local function new_summary() return { dup_fib_ids = {}, present_fib_ids = {} } end local function summarize_fib(fib_dump_entry, acc) local fib_id = fib_dump_entry.fib_id local present_fib_ids = acc.present_fib_ids if present_fib_ids[fib_id] == nil then present_fib_ids[fib_id] = 1 else present_fib_ids[fib_id] = present_fib_ids[fib_id] + 1 if present_fib_ids[fib_id] == 2 then table.insert(acc.dup_fib_ids, fib_id) end end return false, acc end local function report_summary(output, summary) table.sort(summary.dup_fib_ids) for _, fib_id in ipairs(summary.dup_fib_ids) do output[#output + 1] = "\t" output[#output + 1] = tostring(fib_id) output[#output + 1] = ": " output[#output + 1] = tostring(summary.present_fib_ids[fib_id]) output[#output + 1] = "\n" end end local output = {} output[#output + 1] = "IPv4 summary (Duplicate FIB ID: count):\n" report_summary(output, dylib.list_gk_fib4(dyc.gk, summarize_fib, new_summary())) output[#output + 1] = "\nIPv6 summary (Duplicate FIB ID: count):\n" report_summary(output, dylib.list_gk_fib6(dyc.gk, summarize_fib, new_summary())) return table.concat(output) ```
/content/code_sandbox/gkctl/scripts/check_fibs.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
412
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end local function new_summary() return { actions = {}, prefix_lengths = {} } end local function summarize_fib(fib_dump_entry, acc) local actions = acc.actions -- fib_dump_entry.action is of type enum gk_fib_action, -- so it must be casted into a number to avoid having -- each instance as a unique value. local action = tonumber(fib_dump_entry.action) if actions[action] == nil then actions[action] = 1 else actions[action] = actions[action] + 1 end local prefix_lengths = acc.prefix_lengths local prefix_len = fib_dump_entry.prefix_len if prefix_lengths[prefix_len] == nil then prefix_lengths[prefix_len] = 1 else prefix_lengths[prefix_len] = prefix_lengths[prefix_len] + 1 end return false, acc end local function report_summary(output, summary) local total1 = 0 for action, count in pairs(summary.actions) do output[#output + 1] = "\t" output[#output + 1] = dylib.fib_action_to_str(action) output[#output + 1] = ": " output[#output + 1] = tostring(count) output[#output + 1] = "\n" total1 = total1 + count end output[#output + 1] = "\n" local total2 = 0 for prefix_length, count in pairs(summary.prefix_lengths) do output[#output + 1] = "\t" output[#output + 1] = tostring(prefix_length) output[#output + 1] = ": " output[#output + 1] = tostring(count) output[#output + 1] = "\n" total2 = total2 + count end output[#output + 1] = "Total entries: " output[#output + 1] = tostring(total2) output[#output + 1] = "\n" assert(total1 == total2, "Totals are not equal") end local output = {} output[#output + 1] = "IPv4 summary:\n" report_summary(output, dylib.list_gk_fib4(dyc.gk, summarize_fib, new_summary())) output[#output + 1] = "\nIPv6 summary:\n" report_summary(output, dylib.list_gk_fib6(dyc.gk, summarize_fib, new_summary())) return table.concat(output) ```
/content/code_sandbox/gkctl/scripts/summarize_fibs.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
594
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end local function print_only_grantor(fib_dump_entry, acc) if fib_dump_entry.action ~= dylib.c.GK_FWD_GRANTOR then return false, acc end return dylib.print_fib_dump_entry(fib_dump_entry, acc) end return table.concat(dylib.list_gk_fib6(dyc.gk, print_only_grantor, {})) ```
/content/code_sandbox/gkctl/scripts/show_fib6_grantor.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
138
```lua require "gatekeeper/staticlib" local llsc = staticlib.c.get_lls_conf() if llsc == nil then return "No link layer support block available" end return table.concat(dylib.list_lls_arp(llsc, dylib.print_lls_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_arp.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
61
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <sys/types.h> #include <sys/un.h> #include <netinet/in.h> #include <netdb.h> #include <unistd.h> #include <arpa/inet.h> #include <error.h> #include <argp.h> #include <assert.h> static const uint16_t MSG_MAX_LEN = (uint16_t)~0U; /* Argp's global variables. */ const char *argp_program_version = "Gatekeeper dynamic configuration client 1.0"; /* Arguments. */ static char adoc[] = "<PATH>"; static char doc[] = "Gatekeeper Client -- configure Gatekeeper via " "the dynamic configuration functional block"; static struct argp_option options[] = { {"server-path", 's', "FILE", 0, "Path to Gatekeeper/Grantor's UNIX socket", 1}, {"conn-timeout", 't', "TIMEOUT", 0, "UNIX socket connect timeout, in seconds", 1}, { 0 } }; struct args { const char *server_path; const char *lua_script_path; unsigned int connect_timeout; }; static error_t parse_opt(int key, char *arg, struct argp_state *state) { struct args *args = state->input; switch (key) { case 's': args->server_path = arg; break; case 't': { unsigned long timeout; errno = 0; timeout = strtoul(arg, NULL, 10); if (errno != 0) argp_failure(state, 1, errno, "Invalid connect timeout"); if (timeout > UINT_MAX) argp_failure(state, 1, ERANGE, "Invalid connect timeout"); args->connect_timeout = timeout; break; } case ARGP_KEY_INIT: args->lua_script_path = NULL; break; case ARGP_KEY_ARG: if (args->lua_script_path) { argp_error(state, "Wrong number of arguments; only one is allowed"); } args->lua_script_path = arg; break; case ARGP_KEY_END: if (!args->lua_script_path) { argp_error(state, "The lua script path was not specified"); } break; default: return ARGP_ERR_UNKNOWN; } return 0; } static struct argp argp = {options, parse_opt, adoc, doc, NULL, NULL, NULL}; static int load_file_to_buffer(const char *file_name, char *buffer, int n) { int ret; FILE *fp = fopen(file_name, "r"); if (fp == NULL) { fprintf(stderr, "Error: Failed to open file %s - %s\n", file_name, strerror(errno)); ret = -1; goto out; } /* * Return value equals the number of bytes transferred * only when size (i.e., second parameter) is 1. */ ret = fread(buffer, 1, n, fp); if (ferror(fp)) { fprintf(stderr, "Error: %s\n", strerror(errno)); ret = -1; } else if (!feof(fp)) { assert(ret == n); fprintf(stderr, "Error: Failed to read the whole file %s (file length exceeds the maximum message size - %d)\n", file_name, n); ret = -1; } fclose(fp); out: return ret; } static int write_all(int conn_fd, const char *msg_buff, int nbytes) { int send_size; int tot_size = 0; if (nbytes <= 0) return 0; while ((send_size = write(conn_fd, msg_buff + tot_size, nbytes - tot_size)) > 0) { tot_size += send_size; if (tot_size >= nbytes) break; } /* The connection with the server is closed. */ if (send_size == 0) { fprintf(stderr, "Server disconnected\n"); return -1; } if (send_size < 0) { fprintf(stderr, "Failed to write data to the socket connection - (%s)\n", strerror(errno)); return -1; } return 0; } static int read_all(int conn_fd, char *msg_buff, int nbytes) { int recv_size; int tot_size = 0; if (nbytes <= 0) return 0; while ((recv_size = read(conn_fd, msg_buff + tot_size, nbytes - tot_size)) > 0) { tot_size += recv_size; if (tot_size >= nbytes) break; } /* The connection with the server is closed. */ if (recv_size == 0) { fprintf(stderr, "Server disconnected\n"); return -1; } if (recv_size < 0) { fprintf(stderr, "Failed to read data from the socket connection - (%s)\n", strerror(errno)); return -1; } return tot_size; } int connect_wait(int sock_fd, const struct sockaddr *addr, socklen_t addrlen, unsigned int timeout) { unsigned int remain = timeout; for (;;) { if (connect(sock_fd, addr, addrlen) == 0) return 0; switch (errno) { /* Retry in case of these expected errors: * 1) Gatekeeper has not yet created the dynamic configuration * socket; * 2) Gatekeeper has created the socket but its permissions * have not yet been changed to allow access to the * unprivileged user (can only happpen if gkctl itself is * running as the unprivileged user); * 3) Gatekeeper is not yet listening on the socket. */ case ENOENT: case EPERM: case ECONNREFUSED: if (remain == 0) return -1; sleep(1); remain--; break; default: return -1; } } } int main(int argc, char *argv[]) { int ret; int sock_fd; char send_buff[MSG_MAX_LEN + sizeof(uint16_t)]; char recv_buff[MSG_MAX_LEN + 1]; size_t len; size_t total_file_len; struct sockaddr_un serv_addr; struct args args = { /* Defaults. */ .server_path = "/var/run/gatekeeper/dyn_cfg.socket", .connect_timeout = 0, }; /* Read parameters. */ argp_parse(&argp, argc, argv, 0, NULL, &args); if (sizeof(serv_addr.sun_path) <= strlen(args.server_path)) { fprintf(stderr, "Error: passing a too long server path (i.e., > %lu) - %s\n", sizeof(serv_addr.sun_path), args.server_path); ret = -1; goto out; } serv_addr.sun_family = AF_UNIX; strcpy(serv_addr.sun_path, args.server_path); ret = load_file_to_buffer(args.lua_script_path, send_buff + sizeof(uint16_t), MSG_MAX_LEN); if (ret < 0) { ret = -1; goto out; } else if (ret == 0) { fprintf(stderr, "Error: the file %s is empty\n", args.lua_script_path); goto out; } total_file_len = ret; *(uint16_t *)send_buff = htons(ret); if ((sock_fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { perror("Error: Could not create socket"); ret = -1; goto out; } if (connect_wait(sock_fd, (struct sockaddr *)&serv_addr, sizeof(serv_addr), args.connect_timeout) < 0) { perror("Error: Connect failed"); ret = -1; goto close_sock; } ret = write_all(sock_fd, send_buff, total_file_len + sizeof(uint16_t)); if (ret != 0) { fprintf(stderr, "Failed to send message\n"); ret = -1; goto close_sock; } ret = read_all(sock_fd, recv_buff, sizeof(uint16_t)); if (ret != sizeof(uint16_t)) { fprintf(stderr, "Failed to receive message length\n"); ret = -1; goto close_sock; } len = ntohs(*(uint16_t *)recv_buff); if (len == 0) { fprintf(stderr, "Received a message with no body\n"); ret = -1; goto close_sock; } ret = read_all(sock_fd, recv_buff, len); if (ret != (int)len) { fprintf(stderr, "Failed to receive message\n"); ret = -1; goto close_sock; } if (recv_buff[ret - 1] != '\0') recv_buff[ret] = '\0'; printf("%s\n", recv_buff); ret = 0; close_sock: close(sock_fd); out: return ret; } ```
/content/code_sandbox/gkctl/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,062
```lua require "gatekeeper/staticlib" local llsc = staticlib.c.get_lls_conf() if llsc == nil then return "No link layer support block available" end return table.concat(dylib.list_lls_nd(llsc, dylib.print_lls_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_nd.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
60
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end return table.concat(dylib.list_gk_neighbors6(dyc.gk, dylib.print_neighbor_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_neigh6.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
85
```lua require "gatekeeper/staticlib" local dyc = staticlib.c.get_dy_conf() if dyc == nil then return "No dynamic configuration block available" end if dyc.gk == nil then return "No GK block available; not a Gatekeeper server" end return table.concat(dylib.list_gk_fib4(dyc.gk, dylib.print_fib_dump_entry, {})) ```
/content/code_sandbox/gkctl/scripts/show_fib.lua
lua
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
86
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdio.h> #include <stdbool.h> #include <lualib.h> #include <lauxlib.h> #include <rte_lcore.h> #include <rte_malloc.h> #include <rte_log.h> #include <rte_debug.h> #include "gatekeeper_config.h" #include "gatekeeper_main.h" #include "gatekeeper_gk.h" #include "gatekeeper_gt.h" /* Return a table with all lcore ids. Function to be called from Lua. */ static int l_list_lcores(lua_State *L) { unsigned int i; lua_Integer lua_index = 1; lua_newtable(L); /* Result. */ RTE_LCORE_FOREACH(i) { /* Push lcore id into Lua stack. */ lua_pushinteger(L, i); /* Add lcore id to the table at @lua_index position. */ lua_rawseti(L, -2, lua_index++); } return 1; /* Return the table. */ } static int l_rte_lcore_to_socket_id(lua_State *L) { /* First (and only argument) must be the lcore id. */ lua_Integer lcore_id = luaL_checkinteger(L, 1); if (lcore_id < 0 || lcore_id >= RTE_MAX_LCORE) luaL_error(L, "The first argument of rte_lcore_to_socket_id() must be between %d and %d, inclusive\n", 0, RTE_MAX_LCORE - 1); lua_pushinteger(L, rte_lcore_to_socket_id(lcore_id)); return 1; } static int protected_gk_assign_lcores(lua_State *L) { uint32_t ctypeid; struct gk_config *gk_conf; lua_Integer i, n; unsigned int *lcores; gk_conf = *(struct gk_config **) luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); n = lua_objlen(L, 2); lcores = *(unsigned int **)lua_touserdata(L, 3); for (i = 1; i <= n; i++) { lua_pushinteger(L, i); /* Push i. */ lua_gettable(L, 2); /* Pop i, Push t[i]. */ /* Check that t[i] is a number. */ if (!lua_isnumber(L, -1)) luaL_error(L, "Index %d is not a number", i); lcores[i - 1] = lua_tointeger(L, -1); lua_pop(L, 1); /* Pop t[i]. */ } gk_conf->lcores = lcores; gk_conf->num_lcores = n; return 0; /* No results. */ } static int l_gk_assign_lcores(lua_State *L) { uint32_t ctypeid; lua_Integer n; unsigned int *lcores, **ud; uint32_t correct_ctypeid = luaL_get_ctypeid(L, CTYPE_STRUCT_GK_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_GK_CONFIG_PTR. */ luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); if (ctypeid != correct_ctypeid) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_GK_CONFIG_PTR); /* Second argument must be a table. */ luaL_checktype(L, 2, LUA_TTABLE); n = lua_objlen(L, 2); /* Get size of the table. */ if (n <= 0) return 0; /* No results. */ ud = lua_newuserdata(L, sizeof(lcores)); lua_pushcfunction(L, protected_gk_assign_lcores); lua_insert(L, 1); lcores = rte_malloc("gk_conf.lcores", n * sizeof(*lcores), 0); if (lcores == NULL) luaL_error(L, "DPDK has run out memory"); *ud = lcores; /* lua_pcall() is used here to avoid leaking @lcores. */ if (lua_pcall(L, 3, 0, 0)) { rte_free(lcores); lua_error(L); } return 0; } static int protected_gk_assign_sol_map(lua_State *L) { uint32_t ctypeid; struct gk_config *gk_conf; lua_Integer i, n; unsigned int *gk_sol_map; gk_conf = *(struct gk_config **) luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); n = lua_objlen(L, 2); gk_sol_map = *(unsigned int **)lua_touserdata(L, 3); for (i = 1; i <= n; i++) { lua_pushinteger(L, i); /* Push i. */ lua_gettable(L, 2); /* Pop i, Push t[i]. */ /* Check that t[i] is a number. */ if (!lua_isnumber(L, -1)) luaL_error(L, "Index %d is not a number", i); gk_sol_map[i - 1] = lua_tointeger(L, -1) - 1; lua_pop(L, 1); /* Pop t[i]. */ } gk_conf->gk_sol_map = gk_sol_map; return 0; /* No results. */ } static int l_gk_assign_sol_map(lua_State *L) { uint32_t ctypeid; lua_Integer n; unsigned int *gk_sol_map, **ud; uint32_t correct_ctypeid = luaL_get_ctypeid(L, CTYPE_STRUCT_GK_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_GK_CONFIG_PTR. */ luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); if (ctypeid != correct_ctypeid) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_GK_CONFIG_PTR); /* Second argument must be a table. */ luaL_checktype(L, 2, LUA_TTABLE); n = lua_objlen(L, 2); /* Get size of the table. */ if (n <= 0) return 0; /* No results. */ ud = lua_newuserdata(L, sizeof(gk_sol_map)); lua_pushcfunction(L, protected_gk_assign_sol_map); lua_insert(L, 1); gk_sol_map = rte_malloc("gk_conf.gk_sol_map", n * sizeof(*gk_sol_map), 0); if (gk_sol_map == NULL) luaL_error(L, "DPDK has run out memory"); *ud = gk_sol_map; /* lua_pcall() is used here to avoid leaking @gk_sol_map. */ if (lua_pcall(L, 3, 0, 0)) { rte_free(gk_sol_map); lua_error(L); } return 0; } #define CTYPE_STRUCT_GT_CONFIG_PTR "struct gt_config *" static int protected_gt_assign_lcores(lua_State *L) { uint32_t ctypeid; struct gt_config *gt_conf; lua_Integer i, n; unsigned int *lcores; gt_conf = *(struct gt_config **) luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GT_CONFIG_PTR); n = lua_objlen(L, 2); lcores = *(unsigned int **)lua_touserdata(L, 3); for (i = 1; i <= n; i++) { lua_pushinteger(L, i); /* Push i. */ lua_gettable(L, 2); /* Pop i, Push t[i]. */ /* Check that t[i] is a number. */ if (!lua_isnumber(L, -1)) luaL_error(L, "Index %d is not a number", i); lcores[i - 1] = lua_tointeger(L, -1); lua_pop(L, 1); /* Pop t[i]. */ } gt_conf->lcores = lcores; gt_conf->num_lcores = n; return 0; /* No results. */ } static int l_gt_assign_lcores(lua_State *L) { uint32_t ctypeid; lua_Integer n; unsigned int *lcores, **ud; uint32_t correct_ctypeid = luaL_get_ctypeid(L, CTYPE_STRUCT_GT_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_GT_CONFIG_PTR. */ luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GT_CONFIG_PTR); if (ctypeid != correct_ctypeid) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_GT_CONFIG_PTR); /* Second argument must be a table. */ luaL_checktype(L, 2, LUA_TTABLE); n = lua_objlen(L, 2); /* Get size of the table. */ if (n <= 0) return 0; /* No results. */ ud = lua_newuserdata(L, sizeof(lcores)); lua_pushcfunction(L, protected_gt_assign_lcores); lua_insert(L, 1); lcores = rte_malloc("gt_conf.lcores", n * sizeof(*lcores), 0); if (lcores == NULL) luaL_error(L, "DPDK has run out memory"); *ud = lcores; /* lua_pcall() is used here to avoid leaking @lcores. */ if (lua_pcall(L, 3, 0, 0)) { rte_free(lcores); lua_error(L); } return 0; } #define CTYPE_STRUCT_SOL_CONFIG_PTR "struct sol_config *" static int protected_sol_assign_lcores(lua_State *L) { uint32_t ctypeid; struct sol_config *sol_conf; lua_Integer i, n; unsigned int *lcores; sol_conf = *(struct sol_config **) luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_SOL_CONFIG_PTR); n = lua_objlen(L, 2); lcores = *(unsigned int **)lua_touserdata(L, 3); for (i = 1; i <= n; i++) { lua_pushinteger(L, i); /* Push i. */ lua_gettable(L, 2); /* Pop i, Push t[i]. */ /* Check that t[i] is a number. */ if (!lua_isnumber(L, -1)) luaL_error(L, "Index %d is not a number", i); lcores[i - 1] = lua_tointeger(L, -1); lua_pop(L, 1); /* Pop t[i]. */ } sol_conf->lcores = lcores; sol_conf->num_lcores = n; return 0; /* No results. */ } static int l_sol_assign_lcores(lua_State *L) { uint32_t ctypeid; lua_Integer n; unsigned int *lcores, **ud; uint32_t correct_ctypeid = luaL_get_ctypeid(L, CTYPE_STRUCT_SOL_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_SOL_CONFIG_PTR. */ luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_SOL_CONFIG_PTR); if (ctypeid != correct_ctypeid) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_SOL_CONFIG_PTR); /* Second argument must be a table. */ luaL_checktype(L, 2, LUA_TTABLE); n = lua_objlen(L, 2); /* Get size of the table. */ if (n <= 0) return 0; /* No results. */ ud = lua_newuserdata(L, sizeof(lcores)); lua_pushcfunction(L, protected_sol_assign_lcores); lua_insert(L, 1); lcores = rte_malloc("sol_conf.lcores", n * sizeof(*lcores), 0); if (lcores == NULL) luaL_error(L, "DPDK has run out memory"); *ud = lcores; /* lua_pcall() is used here to avoid leaking @lcores. */ if (lua_pcall(L, 3, 0, 0)) { rte_free(lcores); lua_error(L); } return 0; } static const struct luaL_reg staticlib [] = { {"list_lcores", l_list_lcores}, {"rte_lcore_to_socket_id", l_rte_lcore_to_socket_id}, {"gk_assign_lcores", l_gk_assign_lcores}, {"gk_assign_sol_map", l_gk_assign_sol_map}, {"gt_assign_lcores", l_gt_assign_lcores}, {"sol_assign_lcores", l_sol_assign_lcores}, {NULL, NULL} /* Sentinel. */ }; int set_lua_path(lua_State *L, const char *path) { int ret; char new_path[1024]; lua_getglobal(L, "package"); lua_getfield(L, -1, "path"); ret = snprintf(new_path, sizeof(new_path), "%s;%s/?.lua", lua_tostring(L, -1), path); RTE_VERIFY(ret > 0 && ret < (int)sizeof(new_path)); lua_pop(L, 1); lua_pushstring(L, new_path); lua_setfield(L, -2, "path"); lua_pop(L, 1); return ret; } int config_gatekeeper(const char *lua_base_dir, const char *gatekeeper_config_file) { int ret; char lua_entry_path[128]; lua_State *lua_state; ret = snprintf(lua_entry_path, sizeof(lua_entry_path), \ "%s/%s", lua_base_dir, gatekeeper_config_file); RTE_VERIFY(ret > 0 && ret < (int)sizeof(lua_entry_path)); lua_state = luaL_newstate(); if (!lua_state) { G_LOG(ERR, "config: failed to create new Lua state\n"); return -1; } luaL_openlibs(lua_state); luaL_register(lua_state, "staticlib", staticlib); set_lua_path(lua_state, lua_base_dir); ret = luaL_loadfile(lua_state, lua_entry_path); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } /* * Calls a function in protected mode. * int lua_pcall (lua_State *L, int nargs, int nresults, int errfunc); * @nargs: the number of arguments that you pushed onto the stack. * @nresults: the number of results that the funtion will push onto * the stack. * @errfunc: if "0", it represents the error message returned on * the stack is exactly the original error message. * Otherwise, it presents the index of the error handling function. */ ret = lua_pcall(lua_state, 0, 0, 0); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } /* Function to be called. */ lua_getglobal(lua_state, "gatekeeper_init"); ret = lua_pcall(lua_state, 0, 1, 0); if (ret != 0) { G_LOG(ERR, "config: %s\n", lua_tostring(lua_state, -1)); ret = -1; goto out; } ret = lua_tointeger(lua_state, -1); if (ret != 1) G_LOG(ERR, "config: gatekeeper_init() return value is %d\n", ret); lua_pop(lua_state, 1); out: lua_close(lua_state); return ret; } ```
/content/code_sandbox/config/static.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,532
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <unistd.h> #include <arpa/inet.h> #include <sys/un.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/select.h> #include <sys/stat.h> #include <lualib.h> #include <lauxlib.h> #include <rte_log.h> #include <rte_lcore.h> #include <rte_cycles.h> #include <rte_malloc.h> #include "gatekeeper_net.h" #include "gatekeeper_lls.h" #include "gatekeeper_main.h" #include "gatekeeper_config.h" #include "gatekeeper_launch.h" #include "gatekeeper_log_ratelimit.h" /* * The cast "(uint16_t)" is needed because of * the strict compilation check of DPDK, * without uint16_t, it pops an error message * "error: large integer implicitly truncated to * unsigned type [-Werror=overflow]." */ static const uint16_t MSG_MAX_LEN = (uint16_t)~0U; static struct dynamic_config config; /* * Return values: * 0: Write @nbytes successfully. * -1: Connection closed by the client. */ static int write_nbytes(int conn_fd, const char *msg_buff, int nbytes) { int send_size; int tot_size = 0; if (nbytes == 0) return 0; while ((send_size = write(conn_fd, msg_buff + tot_size, nbytes - tot_size)) > 0) { tot_size += send_size; if (tot_size >= nbytes) break; } /* * The connection with the client is closed. * This is unexpected, since the client closed * the connection before getting a response. */ if (send_size == 0) { G_LOG(WARNING, "Client disconnected\n"); return -1; } if (send_size < 0) { G_LOG(ERR, "Failed to write data to the socket connection - (%s)\n", strerror(errno)); return -1; } return 0; } static int reply_client_message(int conn_fd, const char *reply_msg, uint16_t reply_len) { int ret; uint16_t nlen = htons(reply_len); /* The first two bytes: the length of the message in network order. */ ret = write_nbytes(conn_fd, (char *)&nlen, sizeof(nlen)); if (ret != 0) return -1; /* Sending the message. */ ret = write_nbytes(conn_fd, reply_msg, reply_len); if (ret != 0) return -1; return 0; } static int process_client_message(int conn_fd, const char *msg, int msg_len, lua_State *L) { int ret; size_t reply_len; const char *reply_msg; const char *CLIENT_EMPTY_ERROR = "Dynamic configuration cannot process the request: the request is empty."; const char *CLIENT_PROC_ERROR = "Dynamic configuration: the reply was NULL."; if (msg_len == 0) { G_LOG(WARNING, "The received message is an empty string\n"); return reply_client_message(conn_fd, CLIENT_EMPTY_ERROR, strlen(CLIENT_EMPTY_ERROR)); } /* Load the client's Lua chunk, and run it. */ ret = luaL_loadbuffer(L, msg, msg_len, "message") || lua_pcall(L, 0, 1, 0); reply_msg = lua_tolstring(L, -1, &reply_len); if (reply_msg == NULL) { /* * luaL_loadbuffer() and lua_pcall() must have * pushed an error string if they failed. */ RTE_VERIFY(ret == 0); G_LOG(ERR, "The client request script returns a NULL string\n"); lua_pop(L, 1); return reply_client_message(conn_fd, CLIENT_PROC_ERROR, strlen(CLIENT_PROC_ERROR)); } if (reply_len > MSG_MAX_LEN) { G_LOG(WARNING, "The reply message length (%lu) exceeds the limit\n", reply_len); reply_len = MSG_MAX_LEN; } ret = reply_client_message(conn_fd, reply_msg, reply_len); lua_pop(L, 1); return ret; } /* * Return values: * 0: Read @nbytes successfully. * -1: The client closed the connection or an error occurred. */ static int read_nbytes(int conn_fd, char *msg_buff, int nbytes) { int recv_size; int tot_size = 0; while ((recv_size = read(conn_fd, msg_buff + tot_size, nbytes - tot_size)) > 0) { tot_size += recv_size; if (tot_size >= nbytes) break; } /* * The connection with the client is closed. * This is expected for clients that send one * message and then close the connection. */ if (recv_size == 0) { G_LOG(DEBUG, "Client disconnected\n"); return -1; } if (recv_size < 0) { G_LOG(ERR, "Failed to read data from the socket connection - (%s)\n", strerror(errno)); return -1; } return 0; } /* * Return values: * -1: Error happens. * 0: Command successfully processed, may need to process further commands. */ static int process_single_cmd(int conn_fd, lua_State *L) { int ret; uint16_t msg_len; char msg_buff[MSG_MAX_LEN]; /* * The protocol should be rather simple: two-byte, * unsigned integer in network order signal the size, * in bytes, of the message that follows that * first two bytes. */ ret = read_nbytes(conn_fd, (char *)&msg_len, 2); if (ret != 0) return -1; msg_len = ntohs(msg_len); RTE_VERIFY(msg_len <= MSG_MAX_LEN); ret = read_nbytes(conn_fd, msg_buff, msg_len); if (ret != 0) return -1; ret = process_client_message( conn_fd, msg_buff, msg_len, L); if (ret < 0) return -1; return 0; } static void cleanup_dy(struct dynamic_config *dy_conf) { int ret; if (dy_conf->gk != NULL) { gk_conf_put(dy_conf->gk); dy_conf->gk = NULL; } if (dy_conf->gt != NULL) { gt_conf_put(dy_conf->gt); dy_conf->gt = NULL; } if (dy_conf->sock_fd != -1) { ret = close(dy_conf->sock_fd); if (ret < 0) { G_LOG(ERR, "Failed to close the server socket - (%s)\n", strerror(errno)); } dy_conf->sock_fd = -1; } rte_free(dy_conf->dynamic_config_file); dy_conf->dynamic_config_file = NULL; rte_free(dy_conf->lua_dy_base_dir); dy_conf->lua_dy_base_dir = NULL; if (dy_conf->server_path != NULL) { ret = unlink(dy_conf->server_path); if (ret != 0) { G_LOG(WARNING, "Failed to unlink(%s) - (%s)\n", dy_conf->server_path, strerror(errno)); } rte_free(dy_conf->server_path); dy_conf->server_path = NULL; } destroy_mailbox(&dy_conf->mb); } static void process_return_message(lua_State *L, struct dynamic_config *dy_conf, int num_succ_sent_inst) { int num_gt_messages = 0; size_t reply_len = 0; char reply_msg[MSG_MAX_LEN]; /* Wait for all GT instances to synchronize. */ while (rte_atomic16_read(&dy_conf->num_returned_instances) < num_succ_sent_inst) rte_pause(); while (num_gt_messages < num_succ_sent_inst) { int i; struct dy_cmd_entry *dy_cmds[dy_conf->mailbox_burst_size]; /* Load a set of commands from its mailbox ring. */ int num_cmd = mb_dequeue_burst(&dy_conf->mb, (void **)dy_cmds, dy_conf->mailbox_burst_size); /* * This condition check deals with the possibility that * the GT blocks incremented dy_conf->num_returned_instances * without sending a message due to not having enough memory * to send the message. */ if (num_cmd == 0) break; for (i = 0; i < num_cmd; i++) { struct dy_cmd_entry *entry = dy_cmds[i]; switch (entry->op) { case GT_UPDATE_POLICY_RETURN: { if (dy_conf->gt == NULL) { G_LOG(ERR, "The command operation %u requires that the server runs as Grantor\n", entry->op); break; } if (unlikely(entry->u.gt.length > RETURN_MSG_MAX_LEN)) G_LOG(ERR, "The return message from GT block is too long\n"); else if (unlikely(reply_len + entry->u.gt.length > MSG_MAX_LEN)) G_LOG(ERR, "The aggregated return message from GT blocks is too long\n"); else { rte_memcpy(reply_msg + reply_len, entry->u.gt.return_msg, entry->u.gt.length); reply_len += entry->u.gt.length; } num_gt_messages++; break; } default: G_LOG(ERR, "Unknown command operation %u\n", entry->op); break; } mb_free_entry(&dy_conf->mb, entry); } } if (dy_conf->gt != NULL && num_gt_messages != dy_conf->gt->num_lcores) { G_LOG(WARNING, "%s(): successfully collected only %d/%d instances\n", __func__, num_gt_messages, dy_conf->gt->num_lcores); } lua_pushlstring(L, reply_msg, reply_len); } static int l_update_gt_lua_states_incrementally(lua_State *L) { int i; uint32_t ctypeid; struct gt_config *gt_conf; uint32_t correct_ctypeid_gt_config = luaL_get_ctypeid(L, CTYPE_STRUCT_GT_CONFIG_PTR); size_t len; const char *lua_bytecode; int is_returned; int num_succ_sent_inst = 0; struct dynamic_config *dy_conf = get_dy_conf(); /* First argument must be of type CTYPE_STRUCT_GT_CONFIG_PTR. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GT_CONFIG_PTR); if (ctypeid != correct_ctypeid_gt_config) luaL_error(L, "Expected `%s' as first argument", CTYPE_STRUCT_GT_CONFIG_PTR); gt_conf = *(struct gt_config **)cdata; /* Second argument must be a Lua bytecode. */ lua_bytecode = lua_tolstring(L, 2, &len); if (lua_bytecode == NULL || len == 0) luaL_error(L, "gt: invalid lua bytecode\n"); /* Third argument should be a boolean. */ is_returned = lua_toboolean(L, 3); if (lua_gettop(L) != 3) luaL_error(L, "Expected three arguments, however it got %d arguments", lua_gettop(L)); if (is_returned) rte_atomic16_init(&dy_conf->num_returned_instances); for (i = 0; i < gt_conf->num_lcores; i++) { int ret; struct gt_instance *instance = &gt_conf->instances[i]; unsigned int lcore_id = gt_conf->lcores[i]; struct gt_cmd_entry *entry; char *lua_bytecode_buff = rte_malloc_socket("lua_bytecode", len, 0, rte_lcore_to_socket_id(lcore_id)); if (lua_bytecode_buff == NULL) { if (num_succ_sent_inst > 0) { G_LOG(ERR, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d due to failure of allocating memory\n", i, lcore_id); continue; } else { luaL_error(L, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d due to failure of allocating memory\n", i, lcore_id); } } entry = mb_alloc_entry(&instance->mb); if (entry == NULL) { rte_free(lua_bytecode_buff); if (num_succ_sent_inst > 0) { G_LOG(ERR, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d\n", i, lcore_id); continue; } else { luaL_error(L, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d\n", i, lcore_id); } } entry->op = GT_UPDATE_POLICY_INCREMENTALLY; entry->u.bc.len = len; entry->u.bc.lua_bytecode = lua_bytecode_buff; rte_memcpy(lua_bytecode_buff, lua_bytecode, len); entry->u.bc.is_returned = is_returned; ret = mb_send_entry(&instance->mb, entry); if (ret != 0) { rte_free(lua_bytecode_buff); if (num_succ_sent_inst > 0) { G_LOG(ERR, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d\n", i, lcore_id); continue; } else { luaL_error(L, "gt: failed to send new lua update chunk bytecode to GT block %d at lcore %d\n", i, lcore_id); } } num_succ_sent_inst++; } if (is_returned) process_return_message(L, dy_conf, num_succ_sent_inst); return !!is_returned; } const struct luaL_reg dylib_lua_c_funcs [] = { {"update_gt_lua_states", l_update_gt_lua_states}, {"internal_update_gt_lua_states_incrementally", l_update_gt_lua_states_incrementally}, {"list_gk_fib4", l_list_gk_fib4}, {"list_gk_fib6", l_list_gk_fib6}, {"list_gk_neighbors4", l_list_gk_neighbors4}, {"list_gk_neighbors6", l_list_gk_neighbors6}, {"list_lls_arp", l_list_lls_arp}, {"list_lls_nd", l_list_lls_nd}, {"ether_format_addr", l_ether_format_addr}, {"ip_format_addr", l_ip_format_addr}, {"add_grantor_entry_lb", l_add_grantor_entry_lb}, {"update_grantor_entry_lb", l_update_grantor_entry_lb}, {NULL, NULL} /* Sentinel. */ }; static int setup_dy_lua(lua_State *L, struct dynamic_config *dy_conf) { int ret; char lua_entry_path[128]; ret = snprintf(lua_entry_path, sizeof(lua_entry_path), "%s/%s", dy_conf->lua_dy_base_dir, dy_conf->dynamic_config_file); RTE_VERIFY(ret > 0 && ret < (int)sizeof(lua_entry_path)); luaL_openlibs(L); luaL_register(L, "dylib", dylib_lua_c_funcs); set_lua_path(L, dy_conf->lua_dy_base_dir); ret = luaL_loadfile(L, lua_entry_path); if (ret != 0) { G_LOG(ERR, "%s\n", lua_tostring(L, -1)); return -1; } ret = lua_pcall(L, 0, 0, 0); if (ret != 0) { G_LOG(ERR, "%s\n", lua_tostring(L, -1)); return -1; } return 0; } static void handle_client(int server_socket_fd, struct dynamic_config *dy_conf) { int ret; int conn_fd; socklen_t len; int rcv_buff_size; struct sockaddr_un client_addr; /* The lua state used to handle the dynamic configuration files. */ lua_State *lua_state; len = sizeof(client_addr); conn_fd = accept(server_socket_fd, (struct sockaddr *)&client_addr, &len); if (conn_fd < 0) { G_LOG(ERR, "Failed to accept a new connection - (%s)\n", strerror(errno)); return; } if (unlikely(client_addr.sun_family != AF_UNIX)) { G_LOG(WARNING, "Unexpected condition: unknown client type %d at %s\n", client_addr.sun_family, __func__); goto close_fd; } /* * The request must be received under a specified timeout, * or the request is aborted. */ ret = setsockopt(conn_fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&dy_conf->rcv_time_out, sizeof(struct timeval)); if (ret < 0) { G_LOG(ERR, "Failed to call setsockopt(SO_RCVTIMEO) - (%s)\n", strerror(errno)); goto close_fd; } rcv_buff_size = MSG_MAX_LEN; ret = setsockopt(conn_fd, SOL_SOCKET, SO_RCVBUF, &rcv_buff_size, sizeof(rcv_buff_size)); if (ret < 0) { G_LOG(ERR, "Failed to call setsockopt(SO_RCVBUF) with size = %d - (%s)\n", rcv_buff_size, strerror(errno)); goto close_fd; } lua_state = luaL_newstate(); if (lua_state == NULL) { G_LOG(ERR, "Failed to create new Lua state\n"); goto close_fd; } /* Set up the Lua state while there is a connection. */ ret = setup_dy_lua(lua_state, dy_conf); if (ret < 0) { G_LOG(ERR, "Failed to set up the lua state\n"); goto close_lua; } while (1) { ret = process_single_cmd(conn_fd, lua_state); if (ret != 0) break; } close_lua: lua_close(lua_state); close_fd: ret = close(conn_fd); if (ret < 0) { G_LOG(ERR, "Failed to close the connection socket - (%s)\n", strerror(errno)); } } static void process_dy_cmd(struct dy_cmd_entry *entry) { switch (entry->op) { case GT_UPDATE_POLICY_RETURN: G_LOG(WARNING, "Synchronization timeout: the return message (%s) with command operation %u from GT instance running at lcore %u did not get aggregated\n", entry->u.gt.return_msg, entry->op, entry->u.gt.gt_lcore); break; default: G_LOG(ERR, "Unknown command operation %u\n", entry->op); break; } } static void clear_mailbox(struct dynamic_config *dy_conf) { while (true) { int i; int num_cmd; struct dy_cmd_entry *dy_cmds[dy_conf->mailbox_burst_size]; /* Load a set of commands from its mailbox ring. */ num_cmd = mb_dequeue_burst(&dy_conf->mb, (void **)dy_cmds, dy_conf->mailbox_burst_size); if (num_cmd == 0) break; for (i = 0; i < num_cmd; i++) { process_dy_cmd(dy_cmds[i]); mb_free_entry(&dy_conf->mb, dy_cmds[i]); } } } static int dyn_cfg_proc(void *arg) { int ret = 0; struct dynamic_config *dy_conf = arg; G_LOG(NOTICE, "The Dynamic Config block is running at tid = %u\n", gettid()); if (dy_conf->gt != NULL) { /* * Grantor servers. * * When a client calls dylib.update_gt_lua_states() to * reload the Lua policy of a Grantor server, the policy * may need to request more hugepages from the kernel. * This need can arrise, for example, when a policy allocates * LPM tables. * * In order to obtain more hugepages, DPDK needs to access * a number of control files such as files in /dev/hugepages/, * file /proc/self/pagemap, and potentially more. * Thus, the capability CAP_DAC_OVERRIDE is neccessary. * * The capability CAP_SYS_ADMIN is also needed, so DPDK can * map virtual addresses into physical addresses. * See details in rte_mem_virt2phy(), and * the following function of the Linux kernel: * fs/proc/task_mmu.c:pagemap_read(). * * Notice that the dynamic configuration needs * these capabilities because dylib.update_gt_lua_states() * creates the new Lua states and then send them to * the GT instances. * * A positive side effect of capability CAP_DAC_OVERRIDE is to * allow the dynamic configuration block to remove * its Unix socket while exiting. */ cap_value_t caps[] = {CAP_DAC_OVERRIDE, CAP_SYS_ADMIN}; if (needed_caps(RTE_DIM(caps), caps) < 0) { G_LOG(ERR, "Could not set needed capabilities for Grantor\n"); exiting = true; } } else { if (needed_caps(0, NULL) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } } while (likely(!exiting)) { fd_set fds; struct timeval stv; clear_mailbox(dy_conf); FD_ZERO(&fds); FD_SET(dy_conf->sock_fd, &fds); /* * 10000 usecs' timeout for the select() function. * This parameter can prevent the select() function * from blocking forever. So, the whole program can * exit when receiving a quitting signal. */ stv.tv_sec = 0; stv.tv_usec = 10000; ret = select(dy_conf->sock_fd + 1, &fds, NULL, NULL, &stv); if (ret < 0 && errno != EINTR) { G_LOG(ERR, "Failed to call the select() function - (%s)\n", strerror(errno)); break; } else if (likely(ret <= 0)) { if (unlikely(ret < 0)) RTE_VERIFY(errno == EINTR); continue; } /* * The config component accepts only one connection at a time. */ RTE_VERIFY(FD_ISSET(dy_conf->sock_fd, &fds)); handle_client(dy_conf->sock_fd, dy_conf); } G_LOG(NOTICE, "The Dynamic Config block is exiting\n"); cleanup_dy(dy_conf); return ret; } struct dynamic_config * get_dy_conf(void) { return &config; } void set_dyc_timeout(unsigned int sec, unsigned int usec, struct dynamic_config *dy_conf) { dy_conf->rcv_time_out.tv_sec = sec; dy_conf->rcv_time_out.tv_usec = usec; } int run_dynamic_config(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, const char *server_path, const char *lua_dy_base_dir, const char *dynamic_config_file, struct dynamic_config *dy_conf, int mode) { int ret; struct sockaddr_un server_addr; mode_t socket_umask, saved_umask; /* * When the dynamic configuration is run for Gatekeeper, * the gt_conf should be NULL. * When the dynamic configuration is run for Grantor, * the gk_conf should be NULL. * The code works fine with both being NULL as well. * This way, not only will the dynamic config block work for * the Grantor case, it could work for unforeseen cases as well. */ if (net_conf == NULL || server_path == NULL || lua_dy_base_dir == NULL || dynamic_config_file == NULL || dy_conf == NULL) { ret = -1; goto out; } log_ratelimit_state_init(dy_conf->lcore_id, dy_conf->log_ratelimit_interval_ms, dy_conf->log_ratelimit_burst, dy_conf->log_level, "DYC"); ret = init_mailbox("dy_conf", dy_conf->mailbox_max_entries_exp, sizeof(struct dy_cmd_entry), dy_conf->mailbox_mem_cache_size, dy_conf->lcore_id, &dy_conf->mb); if (ret < 0) goto out; dy_conf->sock_fd = -1; dy_conf->server_path = rte_strdup("server_path", server_path); if (dy_conf->server_path == NULL) { ret = -1; goto free_mb; } /* * Remove any old socket and create an unnamed socket for the server. */ ret = unlink(dy_conf->server_path); if (ret < 0 && errno != ENOENT) { G_LOG(ERR, "%s(): Failed to unlink(%s), errno=%i: %s\n", __func__, dy_conf->server_path, errno, strerror(errno)); goto free_server_path; } dy_conf->lua_dy_base_dir = rte_strdup( "lua_dy_base_dir", lua_dy_base_dir); if (dy_conf->lua_dy_base_dir == NULL) { G_LOG(ERR, "%s(): rte_strdup(%s) out of memory\n", __func__, lua_dy_base_dir); ret = -1; goto free_server_path; } dy_conf->dynamic_config_file = rte_strdup( "dynamic_config_file", dynamic_config_file); if (dy_conf->dynamic_config_file == NULL) { G_LOG(ERR, "%s(): rte_strdup(%s) out of memory\n", __func__, dynamic_config_file); ret = -1; goto free_dy_lua_base_dir; } /* Init the server socket. */ dy_conf->sock_fd = socket(AF_UNIX, SOCK_STREAM, 0); if (dy_conf->sock_fd < 0) { G_LOG(ERR, "%s(): Failed to initialize the server socket, errno=%i: %s\n", __func__, errno, strerror(errno)); ret = -1; goto free_dynamic_config_file; } /* Name the socket. */ memset(&server_addr, 0, sizeof(server_addr)); server_addr.sun_family = AF_UNIX; if (sizeof(server_addr.sun_path) <= strlen(dy_conf->server_path)) { G_LOG(ERR, "%s(): The server path (%s) exceeds the length limit %lu\n", __func__, dy_conf->server_path, sizeof(server_addr.sun_path)); ret = -1; goto free_sock; } strcpy(server_addr.sun_path, dy_conf->server_path); /* * fchmod(2) does not work on sockets, so the safest way to change * the mode of the server socket is through umask(2). */ socket_umask = ~mode & (S_IRWXU | S_IRWXG | S_IRWXO); saved_umask = umask(socket_umask); ret = bind(dy_conf->sock_fd, (struct sockaddr *)&server_addr, sizeof(server_addr)); if (ret < 0) { G_LOG(ERR, "%s(): Failed to bind the server socket (%s), errno=%i: %s\n", __func__, dy_conf->server_path, errno, strerror(errno)); goto free_sock; } /* Restore original umask. */ RTE_VERIFY(umask(saved_umask) == socket_umask); /* Change user and group of the server socket. */ if (net_conf->pw_uid != 0) { /* * fchown(2) does not work on sockets, * so we are left with lchown(2). */ ret = lchown(dy_conf->server_path, net_conf->pw_uid, net_conf->pw_gid); if (ret < 0) { G_LOG(ERR, "%s(): Failed to change the owner of the server socket (%s) to uid=%u and gid=%u, errno=%i: %s\n", __func__, dy_conf->server_path, net_conf->pw_uid, net_conf->pw_gid, errno, strerror(errno)); goto free_sock; } } /* * The Dynamic config component listens to a Unix socket * for request from the local host. */ ret = listen(dy_conf->sock_fd, 10); if (ret < 0) { G_LOG(ERR, "%s(): Failed to listen on the server socket (%s), errno=%i: %s\n", __func__, dy_conf->server_path, errno, strerror(errno)); goto free_sock; } if (gk_conf != NULL) gk_conf_hold(gk_conf); dy_conf->gk = gk_conf; if (gt_conf != NULL) gt_conf_hold(gt_conf); dy_conf->gt = gt_conf; ret = launch_at_stage3("dynamic_conf", dyn_cfg_proc, dy_conf, dy_conf->lcore_id); if (ret < 0) goto put_gk_gt_config; return 0; put_gk_gt_config: dy_conf->gk = NULL; if (gk_conf != NULL) gk_conf_put(gk_conf); dy_conf->gt = NULL; if (gt_conf != NULL) gt_conf_put(gt_conf); free_sock: close(dy_conf->sock_fd); dy_conf->sock_fd = -1; free_dynamic_config_file: rte_free(dy_conf->dynamic_config_file); dy_conf->dynamic_config_file = NULL; free_dy_lua_base_dir: rte_free(dy_conf->lua_dy_base_dir); dy_conf->lua_dy_base_dir = NULL; free_server_path: rte_free(dy_conf->server_path); dy_conf->server_path = NULL; free_mb: destroy_mailbox(&dy_conf->mb); out: return ret; } ```
/content/code_sandbox/config/dynamic.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
6,718
```shell #!/bin/sh if [ "$#" -eq "0" ]; then echo "devbind.sh <interface> [<interface> ...]" fi for iface in $@; do /usr/share/gatekeeper/dpdk-devbind.py --bind=vfio-pci $iface done ```
/content/code_sandbox/debian/devbind.sh
shell
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
66
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_GK_BPF_H_ #define _GATEKEEPER_GK_BPF_H_ #include "gatekeeper_gk.h" /* * Load the BPF program that handles flows into @gk_conf at * position @index. * * RETURN * Zero on success; * Negative on failure. */ int gk_load_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index, const char *filename, int jit); /* * Unload the BPF program that handles flows into @gk_conf at * position @index. * * RETURN * Zero on success; * Negative on failure. */ int gk_unload_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index); int gk_bpf_decide_pkt(struct gk_config *gk_conf, uint8_t program_index, struct flow_entry *fe, struct ipacket *packet, uint64_t now, uint64_t *p_bpf_ret); #endif /* _GATEKEEPER_GK_BPF_H_ */ ```
/content/code_sandbox/gk/bpf.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
326
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <rte_ip_frag.h> #define GK_BPF_INTERNAL static #include "gatekeeper_flow_bpf.h" #include "gatekeeper_gk.h" #include "gatekeeper_main.h" #include "gatekeeper_l2.h" #include "gatekeeper_varip.h" #include "bpf.h" struct gk_bpf_init_frame { uint64_t password; struct gk_bpf_cookie *cookie; struct gk_bpf_init_ctx ctx; }; static const uint64_t init_password = 0xe0952bafb0a248f5; static struct gk_bpf_init_frame * init_ctx_to_frame(struct gk_bpf_init_ctx *ctx) { struct gk_bpf_init_frame *frame; if (unlikely(ctx == NULL)) return NULL; frame = container_of(ctx, struct gk_bpf_init_frame, ctx); if (unlikely(frame->password != init_password)) { G_LOG(WARNING, "%s(): password violation\n", __func__); return NULL; } return frame; } static struct gk_bpf_cookie * init_ctx_to_cookie(struct gk_bpf_init_ctx *ctx) { struct gk_bpf_init_frame *frame = init_ctx_to_frame(ctx); if (unlikely(frame == NULL)) return NULL; return frame->cookie; } static const struct rte_bpf_xsym flow_handler_init_xsym[] = { { .name = "cycles_per_sec", .type = RTE_BPF_XTYPE_VAR, .var = { .val = &cycles_per_sec, .desc = { .type = RTE_BPF_ARG_PTR, .size = sizeof(cycles_per_sec), }, }, }, { .name = "cycles_per_ms", .type = RTE_BPF_XTYPE_VAR, .var = { .val = &cycles_per_ms, .desc = { .type = RTE_BPF_ARG_PTR, .size = sizeof(cycles_per_ms), }, }, }, { .name = "init_ctx_to_cookie", .type = RTE_BPF_XTYPE_FUNC, .func = { .val = (void *)init_ctx_to_cookie, .nb_args = 1, .args = { [0] = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_init_ctx), }, }, .ret = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_cookie), }, }, }, }; struct gk_bpf_pkt_frame { uint64_t password; struct flow_entry *fe; struct ipacket *packet; struct gk_config *gk_conf; bool ready_to_tx; struct gk_bpf_pkt_ctx ctx; }; static const uint64_t pkt_password = 0xa2e329ba8b15af05; static struct gk_bpf_pkt_frame * pkt_ctx_to_frame(struct gk_bpf_pkt_ctx *ctx) { struct gk_bpf_pkt_frame *frame; if (unlikely(ctx == NULL)) return NULL; frame = container_of(ctx, struct gk_bpf_pkt_frame, ctx); if (unlikely(frame->password != pkt_password)) { G_LOG(WARNING, "%s(): password violation\n", __func__); return NULL; } return frame; } static struct gk_bpf_cookie * pkt_ctx_to_cookie(struct gk_bpf_pkt_ctx *ctx) { struct gk_bpf_pkt_frame *frame = pkt_ctx_to_frame(ctx); if (unlikely(frame == NULL)) return NULL; return &frame->fe->u.bpf.cookie; } static struct rte_mbuf * pkt_ctx_to_pkt(struct gk_bpf_pkt_ctx *ctx) { struct gk_bpf_pkt_frame *frame = pkt_ctx_to_frame(ctx); if (unlikely(frame == NULL)) return NULL; return frame->packet->pkt; } /* * One's complement sum. * * Notice that if @a and @b are little-endian, the result is also * little-endian. The same is true for big-endian. In order words, * this function preserves endianness. * * The endianness preservation is independent of the endianness of the host. */ static inline uint16_t onec_add(uint16_t a, uint16_t b) { uint16_t res = a + b; return res + (res < b); } /* * The result has the same endianness of the inputs as long as * all inputs have the same endianness. * The endianness preservation is independent of the endianness of the host. */ static inline uint16_t new_ip_csum(uint16_t old_csum, uint16_t old16, uint16_t new16) { /* According to RFC1624 [Eqn. 3]. */ return ~onec_add(onec_add(~old_csum, ~old16), new16); } static int update_pkt_priority(struct ipacket *packet, int priority, struct gatekeeper_if *iface) { uint32_t mask; struct rte_ether_hdr *eth_hdr = adjust_pkt_len(packet->pkt, iface, 0); if (eth_hdr == NULL) { G_LOG(ERR, "gk: could not adjust the packet length at %s\n", __func__); return -1; } RTE_VERIFY(pkt_out_skip_l2(iface, eth_hdr) == packet->l3_hdr); if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ip4hdr = packet->l3_hdr; uint16_t old_val = *(uint16_t *)ip4hdr; uint16_t new_val; mask = (1 << 2) - 1; ip4hdr->type_of_service = (priority << 2) | (ip4hdr->type_of_service & mask); new_val = *(uint16_t *)ip4hdr; /* According to RFC1624 [Eqn. 4]. */ ip4hdr->hdr_checksum = new_ip_csum(ip4hdr->hdr_checksum, old_val, new_val); } else if (likely(packet->flow.proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ip6hdr = packet->l3_hdr; mask = (((1 << 4) - 1) << 28) + (1 << 22) - 1; ip6hdr->vtc_flow = rte_cpu_to_be_32( (priority << 22) | (rte_be_to_cpu_32(ip6hdr->vtc_flow) & mask)); } else return -1; return 0; } static int gk_bpf_prep_for_tx(struct gk_bpf_pkt_ctx *ctx, int priority, int direct_if_possible) { int ret; struct gk_bpf_pkt_frame *frame = pkt_ctx_to_frame(ctx); if (unlikely(frame == NULL)) return -EINVAL; if (unlikely(frame->ready_to_tx)) return -EINVAL; if (unlikely(priority < 0 || priority > PRIORITY_MAX)) return -EINVAL; ret = (direct_if_possible != 0 && priority == PRIORITY_GRANTED) ? update_pkt_priority(frame->packet, priority, &frame->gk_conf->net->back) : encapsulate(frame->packet->pkt, priority, &frame->gk_conf->net->back, &choose_grantor_per_flow(frame->fe)->gt_addr); frame->ready_to_tx = ret == 0; return ret; } static const struct rte_bpf_xsym flow_handler_pkt_xsym[] = { { .name = "cycles_per_sec", .type = RTE_BPF_XTYPE_VAR, .var = { .val = &cycles_per_sec, .desc = { .type = RTE_BPF_ARG_PTR, .size = sizeof(cycles_per_sec), }, }, }, { .name = "cycles_per_ms", .type = RTE_BPF_XTYPE_VAR, .var = { .val = &cycles_per_ms, .desc = { .type = RTE_BPF_ARG_PTR, .size = sizeof(cycles_per_ms), }, }, }, { .name = "pkt_ctx_to_cookie", .type = RTE_BPF_XTYPE_FUNC, .func = { .val = (void *)pkt_ctx_to_cookie, .nb_args = 1, .args = { [0] = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_pkt_ctx), }, }, .ret = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_cookie), }, }, }, { .name = "pkt_ctx_to_pkt", .type = RTE_BPF_XTYPE_FUNC, .func = { .val = (void *)pkt_ctx_to_pkt, .nb_args = 1, .args = { [0] = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_pkt_ctx), }, }, .ret = { .type = RTE_BPF_ARG_PTR_MBUF, .size = sizeof(struct rte_mbuf), .buf_size = RTE_MBUF_DEFAULT_BUF_SIZE, }, }, }, { .name = "gk_bpf_prep_for_tx", .type = RTE_BPF_XTYPE_FUNC, .func = { .val = (void *)gk_bpf_prep_for_tx, .nb_args = 3, .args = { [0] = { .type = RTE_BPF_ARG_PTR, .size = sizeof(struct gk_bpf_pkt_ctx), }, [1] = { .type = RTE_BPF_ARG_RAW, .size = sizeof(int), }, [2] = { .type = RTE_BPF_ARG_RAW, .size = sizeof(int), }, }, .ret = { .type = RTE_BPF_ARG_RAW, .size = sizeof(int), }, }, }, }; static int __bpf_jit_if_possible(struct rte_bpf *bpf, rte_bpf_jitted_func_t *ret_f, unsigned int index, const char *name) { struct rte_bpf_jit jit; int rc = rte_bpf_get_jit(bpf, &jit); if (unlikely(rc != 0)) { G_LOG(ERR, "%s() failed to get JIT program %s at index %u, error code: %i\n", __func__, name, index, rc); return rc; } if (unlikely(jit.func == NULL)) { G_LOG(WARNING, "%s(): BPF JIT is not available\n", __func__); return -ENOTSUP; } *ret_f = jit.func; return 0; } #define bpf_jit_if_possible(bpf, ret, index) \ __bpf_jit_if_possible(bpf, ret, index, #bpf) int gk_load_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index, const char *filename, int jit) { struct gk_bpf_flow_handler *handler; struct rte_bpf_prm prm; struct rte_bpf *bpf_f_init; if (gk_conf == NULL) { G_LOG(ERR, "%s(): parameter gk_conf cannot be NULL\n", __func__); return -1; } if (index >= GK_MAX_BPF_FLOW_HANDLERS) { G_LOG(ERR, "%s(): parameter index must be in [0, %i], received %u\n", __func__, GK_MAX_BPF_FLOW_HANDLERS, index); return -1; } handler = &gk_conf->flow_handlers[index]; if (handler->f_init != NULL || handler->f_pkt != NULL) { G_LOG(ERR, "%s(): index %i is already in use\n", __func__, index); return -1; } memset(&prm, 0, sizeof(prm)); prm.xsym = flow_handler_init_xsym; prm.nb_xsym = RTE_DIM(flow_handler_init_xsym); prm.prog_arg.type = RTE_BPF_ARG_PTR; prm.prog_arg.size = sizeof(struct gk_bpf_init_ctx); bpf_f_init = rte_bpf_elf_load(&prm, filename, "init"); if (bpf_f_init == NULL) { G_LOG(ERR, "%s(): file \"%s\" does not have the BPF program \"init\"; rte_errno = %i: %s\n", __func__, filename, rte_errno, rte_strerror(rte_errno)); return -1; } prm.xsym = flow_handler_pkt_xsym; prm.nb_xsym = RTE_DIM(flow_handler_pkt_xsym); prm.prog_arg.size = sizeof(struct gk_bpf_pkt_ctx); handler->f_pkt = rte_bpf_elf_load(&prm, filename, "pkt"); if (handler->f_pkt == NULL) { G_LOG(ERR, "%s(): file \"%s\" does not have the BPF program \"pkt\"; rte_errno = %i: %s\n", __func__, filename, rte_errno, rte_strerror(rte_errno)); goto f_init; } if (jit && bpf_jit_if_possible(bpf_f_init, &handler->f_init_jit, index) == 0) bpf_jit_if_possible(handler->f_pkt, &handler->f_pkt_jit, index); /* * Guarantee that @handler has all its field but f_init properly set * in memory. This is important because the Dynamic Configuration * Block may call this function during runtime. */ rte_mb(); handler->f_init = bpf_f_init; return 0; f_init: rte_bpf_destroy(bpf_f_init); return -1; } static void fill_in_cmd_entry(struct gk_cmd_entry *entry, rte_atomic32_t *done_counter, void *arg) { entry->op = GK_FLUSH_BPF; entry->u.flush_bpf.program_index = (uintptr_t)arg; entry->u.flush_bpf.done_counter = done_counter; } int gk_unload_bpf_flow_handler(struct gk_config *gk_conf, unsigned int index) { struct gk_bpf_flow_handler *handler; struct rte_bpf *bpf; if (gk_conf == NULL) { G_LOG(ERR, "%s(): parameter gk_conf cannot be NULL\n", __func__); return -1; } if (index >= GK_MAX_BPF_FLOW_HANDLERS) { G_LOG(ERR, "%s(): parameter index must be in [0, %i], received %u\n", __func__, GK_MAX_BPF_FLOW_HANDLERS, index); return -1; } handler = &gk_conf->flow_handlers[index]; bpf = handler->f_init; if (bpf == NULL || handler->f_pkt == NULL) { G_LOG(ERR, "%s(): index %i is NOT in use\n", __func__, index); return -1; } /* Stop new flow entries of refering to this BPF program. */ handler->f_init = NULL; handler->f_init_jit = NULL; rte_mb(); /* * Flush all flow entries in all flow tables that refer to * this BPF program. */ synchronize_gk_instances(gk_conf, fill_in_cmd_entry, (void *)(uintptr_t)index); /* * Free the BPF program. */ rte_bpf_destroy(bpf); bpf = handler->f_pkt; handler->f_pkt = NULL; handler->f_pkt_jit = NULL; rte_bpf_destroy(bpf); return 0; } int gk_init_bpf_cookie(const struct gk_config *gk_conf, uint8_t program_index, struct gk_bpf_cookie *cookie) { const struct gk_bpf_flow_handler *handler = &gk_conf->flow_handlers[program_index]; struct rte_bpf *bpf; struct gk_bpf_init_frame frame; rte_bpf_jitted_func_t jit; uint64_t bpf_ret; bpf = handler->f_init; if (bpf == NULL || handler->f_pkt == NULL) { G_LOG(ERR, "The GK BPF program at index %u is not available\n", program_index); return -1; } frame.password = init_password; frame.cookie = cookie; frame.ctx.now = rte_rdtsc(); jit = handler->f_init_jit; bpf_ret = likely(jit != NULL) ? jit(&frame.ctx) : rte_bpf_exec(bpf, &frame.ctx); if (bpf_ret != GK_BPF_INIT_RET_OK) { G_LOG(ERR, "The function init of the GK BPF program at index %u returned an error\n", program_index); return -1; } return 0; } static int parse_packet_further(struct ipacket *packet, struct gk_bpf_pkt_ctx *ctx) { struct rte_mbuf *pkt = packet->pkt; uint16_t parsed_len = pkt_in_l2_hdr_len(pkt); pkt->l2_len = parsed_len; ctx->l3_proto = packet->flow.proto; /* * extract_packet_info() guarantees that the L2 header and * the L3 headers without extensions are in the packet. */ switch (packet->flow.proto) { case RTE_ETHER_TYPE_IPV4: { struct rte_ipv4_hdr *ipv4_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *, parsed_len); pkt->l3_len = ipv4_hdr_len(ipv4_hdr); parsed_len += pkt->l3_len; ctx->fragmented = rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr); ctx->l4_proto = ipv4_hdr->next_proto_id; break; } case RTE_ETHER_TYPE_IPV6: { struct rte_ipv6_hdr *ipv6_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv6_hdr *, parsed_len); int l3_len = ipv6_skip_exthdr(ipv6_hdr, pkt->data_len - parsed_len, &ctx->l4_proto); if (l3_len < 0) { G_LOG(NOTICE, "%s: Failed to parse IPv6 extension headers\n", __func__); return -1; } pkt->l3_len = l3_len; parsed_len += l3_len; ctx->fragmented = rte_ipv6_frag_get_ipv6_fragment_header( ipv6_hdr) != NULL; break; } default: G_LOG(ERR, "%s: Unknown L3 header %hu\n", __func__, packet->flow.proto); return -1; } pkt->l4_len = RTE_MIN(pkt->data_len - parsed_len, /* Maximum value that @pkt->l4_len can hold. */ ((1 << RTE_MBUF_L4_LEN_BITS) - 1)); return 0; } int gk_bpf_decide_pkt(struct gk_config *gk_conf, uint8_t program_index, struct flow_entry *fe, struct ipacket *packet, uint64_t now, uint64_t *p_bpf_ret) { struct gk_bpf_pkt_frame frame = { .password = pkt_password, .fe = fe, .packet = packet, .gk_conf = gk_conf, .ready_to_tx = false, .ctx = { .now = now, .expire_at = fe->expire_at, }, }; const struct gk_bpf_flow_handler *handler = &gk_conf->flow_handlers[program_index]; struct rte_bpf *bpf = handler->f_pkt; rte_bpf_jitted_func_t jit; if (unlikely(bpf == NULL)) { G_LOG(WARNING, "The BPF program at index %u does not have function pkt\n", program_index); return -EINVAL; } if (unlikely(parse_packet_further(packet, &frame.ctx) < 0)) return -EINVAL; jit = handler->f_pkt_jit; *p_bpf_ret = likely(jit != NULL) ? jit(&frame.ctx) : rte_bpf_exec(bpf, &frame.ctx); if (unlikely(*p_bpf_ret == GK_BPF_PKT_RET_FORWARD && !frame.ready_to_tx)) { G_LOG(ERR, "The BPF program at index %u has a bug: it returned GK_BPF_PKT_RET_FORWARD without successfully calling gk_bpf_prep_for_tx()\n", program_index); return -EIO; } return 0; } ```
/content/code_sandbox/gk/bpf.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
4,667
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stddef.h> #include <arpa/inet.h> #include <linux/rtnetlink.h> #include "gatekeeper_config.h" #include "gatekeeper_gk.h" #include "gatekeeper_l2.h" #include "gatekeeper_lls.h" #include "gatekeeper_main.h" #include "gatekeeper_rt.h" void destroy_neigh_hash_table(struct neighbor_hash_table *neigh) { if (neigh->cache_tbl != NULL) { rte_free(neigh->cache_tbl); neigh->cache_tbl = NULL; } if (neigh->hash_table != NULL) { rte_hash_free(neigh->hash_table); neigh->hash_table = NULL; } neigh->tbl_size = 0; } /* * This function is only called on cache entries that are not being used, * so we don't need a concurrencty mechanism here. However, * callers must ensure that the entry is not being used. */ int clear_ether_cache(struct ether_cache *eth_cache) { int ref_cnt; memset(eth_cache->fields_to_clear, 0, sizeof(*eth_cache) - offsetof(struct ether_cache, fields_to_clear)); if ((ref_cnt = rte_atomic32_read(&eth_cache->ref_cnt)) != 1) { G_LOG(WARNING, "%s() the value of ref_cnt field in Ethernet cache entry is %d rather than 1\n", __func__, ref_cnt); } rte_atomic32_init(&eth_cache->ref_cnt); return 0; } static void gk_arp_and_nd_req_cb(const struct lls_map *map, void *arg, __attribute__((unused))enum lls_reply_ty ty, int *pcall_again) { struct ether_cache *eth_cache = arg; if (pcall_again == NULL) { clear_ether_cache(eth_cache); return; } /* * Deal with concurrency control by sequential lock * on the nexthop entry. */ write_seqlock(&eth_cache->lock); rte_ether_addr_copy(&map->ha, &eth_cache->l2_hdr.eth_hdr.dst_addr); eth_cache->stale = map->stale; write_sequnlock(&eth_cache->lock); *pcall_again = true; } /* Get a new Ethernet cached header, and fill up the header accordingly. */ static struct ether_cache * get_new_ether_cache_locked(struct neighbor_hash_table *neigh, struct ipaddr *addr, struct gatekeeper_if *iface) { int i; struct ether_cache *eth_cache = NULL; for (i = 0; i < neigh->tbl_size; i++) { if (rte_atomic32_read(&neigh->cache_tbl[i].ref_cnt) == 0) { eth_cache = &neigh->cache_tbl[i]; break; } } if (eth_cache == NULL) return NULL; /* * We are initializing @eth_cache, no one but us should be * reading/writing to @eth_cache, so it doesn't need a sequential lock * to protect the operations here. */ eth_cache->stale = true; rte_memcpy(&eth_cache->ip_addr, addr, sizeof(eth_cache->ip_addr)); if (iface->vlan_insert) { uint16_t vlan_tag_be = addr->proto == RTE_ETHER_TYPE_IPV4 ? iface->ipv4_vlan_tag_be : iface->ipv6_vlan_tag_be; fill_vlan_hdr(&eth_cache->l2_hdr.eth_hdr, vlan_tag_be, addr->proto); } else { eth_cache->l2_hdr.eth_hdr.ether_type = rte_cpu_to_be_16(addr->proto); } rte_ether_addr_copy(&iface->eth_addr, &eth_cache->l2_hdr.eth_hdr.src_addr); rte_atomic32_set(&eth_cache->ref_cnt, 1); return eth_cache; } static struct ether_cache * neigh_get_ether_cache_locked(struct neighbor_hash_table *neigh, struct ipaddr *addr, struct gatekeeper_if *iface, int lcore_id) { int ret; struct ether_cache *eth_cache = lookup_ether_cache(neigh, &addr->ip); if (eth_cache != NULL) { rte_atomic32_inc(&eth_cache->ref_cnt); return eth_cache; } eth_cache = get_new_ether_cache_locked(neigh, addr, iface); if (eth_cache == NULL) return NULL; if (addr->proto == RTE_ETHER_TYPE_IPV4) { ret = hold_arp(gk_arp_and_nd_req_cb, eth_cache, &addr->ip.v4, lcore_id); } else if (likely(addr->proto == RTE_ETHER_TYPE_IPV6)) { ret = hold_nd(gk_arp_and_nd_req_cb, eth_cache, &addr->ip.v6, lcore_id); } else { G_LOG(CRIT, "%s(): bug: unknown IP type %hu\n", __func__, addr->proto); ret = -EINVAL; } if (ret < 0) goto eth_cache_cleanup; ret = rte_hash_add_key_data(neigh->hash_table, &addr->ip, eth_cache); if (ret == 0) { /* * Function get_new_ether_cache_locked() already * sets @ref_cnt to 1. */ return eth_cache; } G_LOG(ERR, "%s(): failed to add a cache entry to the neighbor hash table\n", __func__); if (addr->proto == RTE_ETHER_TYPE_IPV4) put_arp(&addr->ip.v4, lcore_id); else put_nd(&addr->ip.v6, lcore_id); /* * By calling put_xxx(), the LLS block will call * gk_arp_and_nd_req_cb(), which, in turn, will call * clear_ether_cache(), so we can return directly here. */ return NULL; eth_cache_cleanup: clear_ether_cache(eth_cache); return NULL; } int parse_ip_prefix(const char *ip_prefix, struct ipaddr *res) { /* Need to make copy to tokenize. */ size_t ip_prefix_len = ip_prefix != NULL ? strlen(ip_prefix) : 0; char ip_prefix_copy[ip_prefix_len + 1]; char *ip_addr; char *saveptr; char *prefix_len_str; char *end; long prefix_len; int ip_type; if (ip_prefix == NULL) return -EINVAL; strcpy(ip_prefix_copy, ip_prefix); ip_addr = strtok_r(ip_prefix_copy, "/", &saveptr); if (ip_addr == NULL) { G_LOG(ERR, "%s(%s): failed to parse IP address in prefix\n", __func__, ip_prefix); return -EINVAL; } ip_type = get_ip_type(ip_addr); if (ip_type != AF_INET && ip_type != AF_INET6) return -EINVAL; prefix_len_str = strtok_r(NULL, "\0", &saveptr); if (prefix_len_str == NULL) { G_LOG(ERR, "%s(%s): failed to parse prefix length in prefix\n", __func__, ip_prefix); return -EINVAL; } prefix_len = strtol(prefix_len_str, &end, 10); if (prefix_len_str == end || !*prefix_len_str || *end) { G_LOG(ERR, "%s(%s): prefix length \"%s\" is not a number\n", __func__, ip_prefix, prefix_len_str); return -EINVAL; } if ((prefix_len == LONG_MAX || prefix_len == LONG_MIN) && errno == ERANGE) { G_LOG(ERR, "%s(%s): prefix length \"%s\" caused underflow or overflow\n", __func__, ip_prefix, prefix_len_str); return -EINVAL; } if (prefix_len < 0 || prefix_len > max_prefix_len(ip_type)) { G_LOG(ERR, "%s(%s): prefix length \"%s\" is out of range\n", __func__, ip_prefix, prefix_len_str); return -EINVAL; } if (convert_str_to_ip(ip_addr, res) < 0) { G_LOG(ERR, "%s(%s): the IP address of the prefix is not valid\n", __func__, ip_prefix); return -EINVAL; } RTE_VERIFY((ip_type == AF_INET && res->proto == RTE_ETHER_TYPE_IPV4) || (ip_type == AF_INET6 && res->proto == RTE_ETHER_TYPE_IPV6)); return prefix_len; } /* This function will return an empty FIB entry. */ static int get_empty_fib_id(uint16_t ip_proto, struct gk_config *gk_conf, struct gk_fib **p_fib, struct qid **p_qid, uint32_t *p_id) { struct gk_lpm *ltbl = &gk_conf->lpm_tbl; int ret; /* Find an empty FIB entry. */ if (ip_proto == RTE_ETHER_TYPE_IPV4) { ret = qid_pop(&ltbl->qid, p_id); if (unlikely(ret < 0)) { G_LOG(WARNING, "%s(): cannot find an empty fib entry in the IPv4 FIB table (errno=%i): %s\n", __func__, -ret, strerror(-ret)); } else { *p_fib = &ltbl->fib_tbl[*p_id]; if (unlikely((*p_fib)->action != GK_FIB_MAX)) { G_LOG(CRIT, "%s(): bug: empty IPv4 FIB entry marked with action %d\n", __func__, (*p_fib)->action); return -EFAULT; } if (p_qid != NULL) *p_qid = &ltbl->qid; } return ret; } if (likely(ip_proto == RTE_ETHER_TYPE_IPV6)) { ret = qid_pop(&ltbl->qid6, p_id); if (unlikely(ret < 0)) { G_LOG(WARNING, "%s(): cannot find an empty fib entry in the IPv6 FIB table (errno=%i): %s\n", __func__, -ret, strerror(-ret)); } else { *p_fib = &ltbl->fib_tbl6[*p_id]; if (unlikely((*p_fib)->action != GK_FIB_MAX)) { G_LOG(CRIT, "%s(): bug: empty IPv6 FIB entry marked with action %d\n", __func__, (*p_fib)->action); return -EFAULT; } if (p_qid != NULL) *p_qid = &ltbl->qid6; } return ret; } G_LOG(CRIT, "%s(): bug: unknown Ethernet type %hu\n", __func__, ip_proto); *p_fib = NULL; if (p_qid != NULL) *p_qid = NULL; return -EINVAL; } /* Add a prefix into the LPM table. */ static int lpm_add_route(const struct ipaddr *ip_addr, int prefix_len, int fib_id, struct gk_lpm *ltbl) { if (ip_addr->proto == RTE_ETHER_TYPE_IPV4) { return fib_add(&ltbl->fib, (uint8_t *)&ip_addr->ip.v4.s_addr, prefix_len, fib_id); } if (likely(ip_addr->proto == RTE_ETHER_TYPE_IPV6)) { return fib_add(&ltbl->fib6, ip_addr->ip.v6.s6_addr, prefix_len, fib_id); } G_LOG(CRIT, "%s(): bug: unknown IP type %hu\n", __func__, ip_addr->proto); return -EINVAL; } /* Delete a prefix from the LPM table. */ static int lpm_del_route(const struct ipaddr *ip_addr, int prefix_len, struct gk_lpm *ltbl) { if (ip_addr->proto == RTE_ETHER_TYPE_IPV4) { return fib_delete(&ltbl->fib, (uint8_t *)&ip_addr->ip.v4.s_addr, prefix_len); } if (likely(ip_addr->proto == RTE_ETHER_TYPE_IPV6)) { return fib_delete(&ltbl->fib6, ip_addr->ip.v6.s6_addr, prefix_len); } G_LOG(CRIT, "%s(): bug: unknown IP type %hu\n", __func__, ip_addr->proto); return -EINVAL; } /* * For IPv4, the hash table key (i.e., IPv4 address) used is * in network byte order. Moreover, the DPDK's hash table * implementation takes a mod over the hash. * We convert the key to host order to make sure * that the most important bits of the hash function are * the least significant bits of the IP address. */ uint32_t custom_ipv4_hash_func(const void *key, __attribute__((unused)) uint32_t length, __attribute__((unused)) uint32_t initval) { return ntohl(*(const uint32_t *)key); } int setup_neighbor_tbl(unsigned int socket_id, int identifier, int ip_ver, int ht_size, struct neighbor_hash_table *neigh, rte_hash_function hash_func) { int i, ret; char ht_name[64]; int key_len = ip_ver == RTE_ETHER_TYPE_IPV4 ? sizeof(struct in_addr) : sizeof(struct in6_addr); struct rte_hash_parameters neigh_hash_params = { .entries = ht_size < HASH_TBL_MIN_SIZE ? HASH_TBL_MIN_SIZE : ht_size, .key_len = key_len, .hash_func = hash_func, .hash_func_init_val = 0, .socket_id = socket_id, }; ret = snprintf(ht_name, sizeof(ht_name), "neighbor_hash_%u", identifier); RTE_VERIFY(ret > 0 && ret < (int)sizeof(ht_name)); /* Setup the neighbor hash table. */ neigh_hash_params.name = ht_name; neigh->hash_table = rte_hash_create(&neigh_hash_params); if (neigh->hash_table == NULL) { G_LOG(ERR, "%s(): cannot create hash table for neighbor FIB\n", __func__); ret = -ENOMEM; goto out; } /* Setup the Ethernet header cache table. */ neigh->cache_tbl = rte_calloc_socket(NULL, ht_size, sizeof(struct ether_cache), 0, socket_id); if (neigh->cache_tbl == NULL) { G_LOG(ERR, "%s(): cannot create Ethernet header cache table\n", __func__); ret = -ENOMEM; goto neigh_hash; } /* Initialize the sequential lock for each Ethernet cache entry. */ for (i = 0; i < ht_size; i++) seqlock_init(&neigh->cache_tbl[i].lock); neigh->tbl_size = ht_size; ret = 0; goto out; neigh_hash: rte_hash_free(neigh->hash_table); neigh->hash_table = NULL; out: return ret; } /* * The caller is responsible for releasing any resource associated to @fib. * For example, if the FIB entry has action GK_FWD_NEIGHBOR_*_NET, * then the caller needs to first destroy the neighbor hash table before * calling this function. */ static inline void initialize_fib_entry(struct gk_fib *fib) { /* Reset the fields of the deleted FIB entry. */ fib->action = GK_FIB_MAX; memset(&fib->u, 0, sizeof(fib->u)); } static int reset_fib_entry(struct gk_fib *fib_entry, struct qid *qid, uint32_t fib_id) { int ret = qid_push(qid, fib_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to push QID %"PRIu32" for FIB (errno=%i): %s\n", __func__, fib_id, -ret, strerror(-ret)); return ret; } initialize_fib_entry(fib_entry); return 0; } static inline int get_fib_id_from_entry(struct gk_fib *fib_tbl, struct gk_fib *fib_entry, struct qid *qid, uint32_t *p_id) { if (unlikely(fib_entry < &fib_tbl[0] || fib_entry > &fib_tbl[qid->len - 1])) { G_LOG(CRIT, "%s(): bug: invalid pointers: fib_tbl=%p, fib_entry=%p\n", __func__, fib_tbl, fib_entry); return -EFAULT; } *p_id = fib_entry - fib_tbl; return 0; } static int find_reset_fib_entry(struct gk_fib *fib_tbl, struct gk_fib *fib_entry, struct qid *qid) { uint32_t fib_id = qid->len; /* Invalid ID. */ int ret = get_fib_id_from_entry(fib_tbl, fib_entry, qid, &fib_id); if (unlikely(ret < 0)) return ret; return reset_fib_entry(fib_entry, qid, fib_id); } /* * Setup the FIB entries for the network prefixes, for which @iface * is responsible. * These prefixes are configured when the Gatekeeper server starts. */ static int setup_net_prefix_fib(int identifier, struct gk_fib **neigh_fib, struct gk_fib **neigh6_fib, struct gatekeeper_if *iface, struct gk_config *gk_conf) { int ret, ret2; uint32_t fib_id, fib6_id; unsigned int socket_id = rte_lcore_to_socket_id(gk_conf->lcores[0]); struct net_config *net_conf = gk_conf->net; struct gk_fib *neigh_fib_ipv4 = NULL; struct gk_fib *neigh_fib_ipv6 = NULL; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; /* Set up the FIB entry for the IPv4 network prefix. */ if (ipv4_if_configured(iface)) { ret = get_empty_fib_id(RTE_ETHER_TYPE_IPV4, gk_conf, &neigh_fib_ipv4, NULL, &fib_id); if (unlikely(ret < 0)) goto out; ret = setup_neighbor_tbl(socket_id, (identifier * 2), RTE_ETHER_TYPE_IPV4, (1 << (32 - iface->ip4_addr_plen)), &neigh_fib_ipv4->u.neigh, custom_ipv4_hash_func); if (ret < 0) goto init_fib_ipv4; if (iface == &net_conf->front) neigh_fib_ipv4->action = GK_FWD_NEIGHBOR_FRONT_NET; else if (likely(iface == &net_conf->back)) neigh_fib_ipv4->action = GK_FWD_NEIGHBOR_BACK_NET; else { G_LOG(CRIT, "%s(): bug: invalid interface %s\n", __func__, iface->name); ret = -EINVAL; goto free_fib_ipv4_ht; } ret = fib_add(&ltbl->fib, (uint8_t *)&iface->ip4_addr.s_addr, iface->ip4_addr_plen, fib_id); if (ret < 0) goto free_fib_ipv4_ht; *neigh_fib = neigh_fib_ipv4; } /* Set up the FIB entry for the IPv6 network prefix. */ if (ipv6_if_configured(iface)) { ret = get_empty_fib_id(RTE_ETHER_TYPE_IPV6, gk_conf, &neigh_fib_ipv6, NULL, &fib6_id); if (unlikely(ret < 0)) goto free_fib_ipv4; ret = setup_neighbor_tbl(socket_id, (identifier * 2 + 1), RTE_ETHER_TYPE_IPV6, gk_conf->max_num_ipv6_neighbors, &neigh_fib_ipv6->u.neigh, DEFAULT_HASH_FUNC); if (ret < 0) goto init_fib_ipv6; if (iface == &net_conf->front) neigh_fib_ipv6->action = GK_FWD_NEIGHBOR_FRONT_NET; else if (likely(iface == &net_conf->back)) neigh_fib_ipv6->action = GK_FWD_NEIGHBOR_BACK_NET; else { G_LOG(CRIT, "%s(): bug: invalid interface %s\n", __func__, iface->name); ret = -EINVAL; goto free_fib_ipv6_ht; } ret = fib_add(&ltbl->fib6, iface->ip6_addr.s6_addr, iface->ip6_addr_plen, fib6_id); if (ret < 0) goto free_fib_ipv6_ht; *neigh6_fib = neigh_fib_ipv6; } return 0; free_fib_ipv6_ht: destroy_neigh_hash_table(&neigh_fib_ipv6->u.neigh); init_fib_ipv6: ret2 = reset_fib_entry(neigh_fib_ipv6, &ltbl->qid6, fib6_id); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(): bug: failed to reset IPv6 FIB entry (errno=%i): %s\n", __func__, -ret2, strerror(-ret2)); } free_fib_ipv4: if (neigh_fib_ipv4 == NULL) goto out; *neigh_fib = NULL; RTE_VERIFY(fib_delete(&ltbl->fib, (uint8_t *)&iface->ip4_addr.s_addr, iface->ip4_addr_plen) == 0); free_fib_ipv4_ht: destroy_neigh_hash_table(&neigh_fib_ipv4->u.neigh); init_fib_ipv4: ret2 = reset_fib_entry(neigh_fib_ipv4, &ltbl->qid, fib_id); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(): bug: failed to reset IPv4 FIB entry (errno=%i): %s\n", __func__, -ret2, strerror(-ret2)); } out: return ret; } static int init_fib_tbl(struct gk_config *gk_conf) { int ret; unsigned int i; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; struct gk_fib *neigh_fib_front = NULL, *neigh6_fib_front = NULL; struct gk_fib *neigh_fib_back = NULL, *neigh6_fib_back = NULL; rte_spinlock_init(&ltbl->lock); if (ltbl->fib_tbl != NULL) { for (i = 0; i < gk_conf->max_num_ipv4_rules; i++) initialize_fib_entry(&ltbl->fib_tbl[i]); } if (ltbl->fib_tbl6 != NULL) { for (i = 0; i < gk_conf->max_num_ipv6_rules; i++) initialize_fib_entry(&ltbl->fib_tbl6[i]); } /* Set up the FIB entry for the front network prefixes. */ ret = setup_net_prefix_fib(0, &neigh_fib_front, &neigh6_fib_front, &gk_conf->net->front, gk_conf); if (ret < 0) { G_LOG(ERR, "%s(): failed to setup the FIB entry for the front network prefixes\n", __func__); goto out; } /* Set up the FIB entry for the back network prefixes. */ RTE_VERIFY(gk_conf->net->back_iface_enabled); ret = setup_net_prefix_fib(1, &neigh_fib_back, &neigh6_fib_back, &gk_conf->net->back, gk_conf); if (ret < 0) { G_LOG(ERR, "%s(): failed to setup the FIB entry for the back network prefixes\n", __func__); goto free_front_fibs; } return 0; free_front_fibs: if (neigh_fib_front != NULL) { int ret2; struct gatekeeper_if *iface = &gk_conf->net->front; RTE_VERIFY(fib_delete(&gk_conf->lpm_tbl.fib, (uint8_t *)&iface->ip4_addr.s_addr, iface->ip4_addr_plen) == 0); destroy_neigh_hash_table(&neigh_fib_front->u.neigh); ret2 = find_reset_fib_entry(gk_conf->lpm_tbl.fib_tbl, neigh_fib_front, &ltbl->qid); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(): bug: failed to reset IPv4 FIB entry (errno=%i): %s\n", __func__, -ret2, strerror(-ret2)); } neigh_fib_front = NULL; } if (neigh6_fib_front != NULL) { int ret2; struct gatekeeper_if *iface = &gk_conf->net->front; RTE_VERIFY(fib_delete(&gk_conf->lpm_tbl.fib6, iface->ip6_addr.s6_addr, iface->ip6_addr_plen) == 0); destroy_neigh_hash_table(&neigh6_fib_front->u.neigh); ret2 = find_reset_fib_entry(gk_conf->lpm_tbl.fib_tbl6, neigh6_fib_front, &ltbl->qid6); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(): bug: failed to reset IPv6 FIB entry (errno=%i): %s\n", __func__, -ret2, strerror(-ret2)); } neigh6_fib_front = NULL; } out: return ret; } int setup_gk_lpm(struct gk_config *gk_conf, unsigned int socket_id) { struct gk_lpm *ltbl = &gk_conf->lpm_tbl; int ret; if (ipv4_configured(gk_conf->net)) { ret = fib_create(&ltbl->fib, "IPv4-FIB", socket_id, 32, gk_conf->max_num_ipv4_rules, gk_conf->num_ipv4_tbl8s); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create the IPv4 FIB (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto out; } /* * Don't need to zero memory during allocation because * initialize_fib_entry() does that for us. */ ltbl->fib_tbl = rte_malloc_socket("IPv4-FIB-table", gk_conf->max_num_ipv4_rules * sizeof(struct gk_fib), 0, socket_id); if (unlikely(ltbl->fib_tbl == NULL)) { G_LOG(ERR, "%s(): failed to create the IPv4 FIB table\n", __func__); ret = -ENOMEM; goto free_fib; } ret = qid_init(&ltbl->qid, gk_conf->max_num_ipv4_rules, "rt_qid", socket_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create IPv4 QID for managing FIB entries (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto free_lpm_tbl; } } else if (gk_conf->max_num_ipv4_rules != 0 || gk_conf->num_ipv4_tbl8s != 0) { G_LOG(WARNING, "%s(): IPv4 is not configured, but the parameters max_num_ipv4_rules=%u and num_ipv4_tbl8s=%u are not both zero\n", __func__, gk_conf->max_num_ipv4_rules, gk_conf->num_ipv4_tbl8s); } if (ipv6_configured(gk_conf->net)) { ret = fib_create(&ltbl->fib6, "IPv6-FIB", socket_id, 128, gk_conf->max_num_ipv6_rules, gk_conf->num_ipv6_tbl8s); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create the IPv6 FIB (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto free_qid; } /* * Don't need to zero memory during allocation because * initialize_fib_entry() does that for us. */ ltbl->fib_tbl6 = rte_malloc_socket("IPv6-FIB-table", gk_conf->max_num_ipv6_rules * sizeof(struct gk_fib), 0, socket_id); if (unlikely(ltbl->fib_tbl6 == NULL)) { G_LOG(ERR, "%s(): failed to create the IPv6 FIB table\n", __func__); ret = -ENOMEM; goto free_fib6; } ret = qid_init(&ltbl->qid6, gk_conf->max_num_ipv6_rules, "rt_qid6", socket_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create IPv6 QID for managing FIB entries (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto free_lpm_tbl6; } } else if (gk_conf->max_num_ipv6_rules != 0 || gk_conf->num_ipv6_tbl8s != 0) { G_LOG(WARNING, "%s(): IPv6 is not configured, but the parameters max_num_ipv6_rules=%u and num_ipv6_tbl8s=%u are not both zero\n", __func__, gk_conf->max_num_ipv6_rules, gk_conf->num_ipv6_tbl8s); } ret = init_fib_tbl(gk_conf); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to initialize the FIB table (errno=%i): %s\n", __func__, -ret, strerror(-ret)); goto free_qid6; } ret = 0; goto out; free_qid6: if (!ipv6_configured(gk_conf->net)) goto free_qid; qid_free(&ltbl->qid6); free_lpm_tbl6: rte_free(ltbl->fib_tbl6); ltbl->fib_tbl6 = NULL; free_fib6: fib_free(&ltbl->fib6); free_qid: if (!ipv4_configured(gk_conf->net)) goto out; qid_free(&ltbl->qid); free_lpm_tbl: rte_free(ltbl->fib_tbl); ltbl->fib_tbl = NULL; free_fib: fib_free(&ltbl->fib); out: return ret; } static void fill_in_cmd_entry(struct gk_cmd_entry *entry, rte_atomic32_t *done_counter, void *arg) { struct gk_synch_request *req_template = arg; entry->op = GK_SYNCH_WITH_LPM; entry->u.synch = *req_template; entry->u.synch.done_counter = done_counter; } static void synchronize_gk_instances_with_fib(struct gk_config *gk_conf, struct gk_fib *fib, bool update_only) { struct gk_synch_request req_template = { .fib = fib, .update_only = update_only, .done_counter = NULL, }; synchronize_gk_instances(gk_conf, fill_in_cmd_entry, &req_template); } /* * Note that, @action should be either GK_FWD_GATEWAY_FRONT_NET * or GK_FWD_GATEWAY_BACK_NET. */ static struct gk_fib * find_fib_entry_for_neighbor_locked(const struct ipaddr *gw_addr, enum gk_fib_action action, struct gk_config *gk_conf) { int ret; uint32_t fib_id; struct gk_fib *neigh_fib; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; struct gatekeeper_if *iface; if (action == GK_FWD_GATEWAY_FRONT_NET) iface = &gk_conf->net->front; else if (likely(action == GK_FWD_GATEWAY_BACK_NET)) iface = &gk_conf->net->back; else { G_LOG(ERR, "%s(): action = %d is not expected\n", __func__, action); return NULL; } if (gw_addr->proto == RTE_ETHER_TYPE_IPV4 && ipv4_if_configured(iface)) { ret = rib_lookup(rib4_from_ltbl(ltbl), (uint8_t *)&gw_addr->ip.v4.s_addr, &fib_id); /* * Invalid gateway entry, since at least we should * obtain the FIB entry for the neighbor table. */ if (unlikely(ret < 0)) return NULL; neigh_fib = &ltbl->fib_tbl[fib_id]; } else if (likely(gw_addr->proto == RTE_ETHER_TYPE_IPV6) && ipv6_if_configured(iface)) { ret = rib_lookup(rib6_from_ltbl(ltbl), gw_addr->ip.v6.s6_addr, &fib_id); /* * Invalid gateway entry, since at least we should * obtain the FIB entry for the neighbor table. */ if (unlikely(ret < 0)) return NULL; neigh_fib = &ltbl->fib_tbl6[fib_id]; } else { G_LOG(ERR, "%s(): Unconfigued IP type %hu at interface %s\n", __func__, gw_addr->proto, iface->name); return NULL; } /* * Invalid gateway entry, since the neighbor entry * and the gateway entry should be in the same network. */ if ((action == GK_FWD_GATEWAY_FRONT_NET && neigh_fib->action != GK_FWD_NEIGHBOR_FRONT_NET) || (action == GK_FWD_GATEWAY_BACK_NET && neigh_fib->action != GK_FWD_NEIGHBOR_BACK_NET)) return NULL; return neigh_fib; } static int ether_cache_put(struct gk_fib *neigh_fib, enum gk_fib_action action, struct ether_cache *eth_cache, struct gk_config *gk_conf) { int ret, ref_cnt; struct gk_fib *neighbor_fib = neigh_fib; struct ipaddr addr; while ((ref_cnt = rte_atomic32_read(&eth_cache->ref_cnt)) >= 2) { if (likely(rte_atomic32_cmpset((volatile uint32_t *) &eth_cache->ref_cnt.cnt, ref_cnt, ref_cnt - 1) != 0)) return 0; } if (ref_cnt < 1) { rte_panic("%s(): bug: the ref_cnt of the ether cache should be 1, but it is %d\n", __func__, ref_cnt); } /* * We need a copy of the IP address of the nexthop, * because after calling put_xxx(), it's possible that * gk_arp_and_nd_req_cb() is called before rte_hash_del_key(). * In this case, the 'eth_cache->ip_addr' (hash key) will be reset, * so that the hash key becomes invalid. */ addr = eth_cache->ip_addr; /* * Find the FIB entry for the @addr. * We need to release the @eth_cache * Ethernet header entry from the neighbor hash table. */ if (neighbor_fib == NULL) { neighbor_fib = find_fib_entry_for_neighbor_locked( &addr, action, gk_conf); if (neighbor_fib == NULL) { G_LOG(ERR, "%s(): could not find neighbor FIB to release Ethernet header entry\n", __func__); return -EINVAL; } } if (addr.proto == RTE_ETHER_TYPE_IPV4) { ret = put_arp(&addr.ip.v4, gk_conf->lcores[0]); if (ret < 0) return ret; ret = rte_hash_del_key(neighbor_fib->u.neigh.hash_table, &addr.ip.v4.s_addr); if (ret < 0) { G_LOG(CRIT, "%s(): failed to delete an Ethernet cache entry from the IPv4 neighbor table; we are NOT trying to recover from this failure\n", __func__); } return ret; } if (likely(addr.proto == RTE_ETHER_TYPE_IPV6)) { ret = put_nd(&addr.ip.v6, gk_conf->lcores[0]); if (ret < 0) return ret; ret = rte_hash_del_key(neighbor_fib->u.neigh.hash_table, addr.ip.v6.s6_addr); if (ret < 0) { G_LOG(CRIT, "%s(): failed to delete an Ethernet cache entry from the IPv6 neighbor table; we are NOT trying to recover from this failure\n", __func__); } return ret; } G_LOG(ERR, "%s(): remove an invalid FIB entry with IP type %hu\n", __func__, addr.proto); return -EINVAL; } /* * This function is called by del_fib_entry_numerical_locked(). * Notice that, it doesn't stand on its own, and it's only * a construct to make del_fib_entry_numerical_locked() readable. */ static int del_gateway_from_neigh_table_locked(const struct ip_prefix *ip_prefix, enum gk_fib_action action, struct ether_cache *eth_cache, struct gk_config *gk_conf) { int ret = ether_cache_put(NULL, action, eth_cache, gk_conf); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to release the Ethernet cached header of the Grantor FIB entry\n", __func__, ip_prefix->str); } return ret; } static int clear_grantor_set(const struct ip_prefix *ip_prefix, struct grantor_set *set, struct gk_config *gk_conf) { int final_ret = 0; unsigned int i; for (i = 0; i < set->num_entries; i++) { int ret = del_gateway_from_neigh_table_locked(ip_prefix, GK_FWD_GATEWAY_BACK_NET, set->entries[i].eth_cache, gk_conf); if (unlikely(final_ret == 0 && ret < 0)) final_ret = ret; } rte_free(set); return final_ret; } /* * Returns: * >= 0 if the prefix already exists, the return is the FIB ID. * -ENOENT if the prefix does not exist. * < 0 if an error occurred. */ static int check_prefix_exists_locked(const struct ip_prefix *prefix, struct gk_config *gk_conf, struct gk_fib **p_fib) { struct gk_lpm *ltbl = &gk_conf->lpm_tbl; uint32_t fib_id; int ret; if (prefix->addr.proto == RTE_ETHER_TYPE_IPV4) { ret = rib_is_rule_present(rib4_from_ltbl(ltbl), (uint8_t *)&prefix->addr.ip.v4.s_addr, prefix->len, &fib_id); if (ret == 1 && p_fib != NULL) *p_fib = &ltbl->fib_tbl[fib_id]; } else if (likely(prefix->addr.proto == RTE_ETHER_TYPE_IPV6)) { ret = rib_is_rule_present(rib6_from_ltbl(ltbl), prefix->addr.ip.v6.s6_addr, prefix->len, &fib_id); if (ret == 1 && p_fib != NULL) *p_fib = &ltbl->fib_tbl6[fib_id]; } else { G_LOG(WARNING, "%s(%s): Unknown IP type %hu\n", __func__, prefix->str, prefix->addr.proto); if (p_fib != NULL) *p_fib = NULL; return -EINVAL; } if (ret == 1) return fib_id; if (p_fib != NULL) *p_fib = NULL; if (likely(ret == 0)) return -ENOENT; RTE_VERIFY(ret < 0 && ret != -ENOENT); return ret; } static inline int check_prefix(const struct ip_prefix *prefix_info) { if (unlikely(prefix_info->len < 0)) return -EINVAL; return 0; } /* * For removing FIB entries, it needs to notify the GK instances * about the removal of the FIB entry. */ int del_fib_entry_numerical_locked(const struct ip_prefix *prefix_info, struct gk_config *gk_conf) { struct gk_fib *fib_tbl, *prefix_fib; struct qid *qid; int ret, ret2; ret = check_prefix(prefix_info); if (unlikely(ret < 0)) return ret; ret = check_prefix_exists_locked(prefix_info, gk_conf, &prefix_fib); if (unlikely(ret == -ENOENT)) { G_LOG(WARNING, "%s(%s): tried to delete a non-existent IP prefix\n", __func__, prefix_info->str); return -ENOENT; } if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): check_prefix_exists_locked() failed (errno=%i): %s\n", __func__, prefix_info->str, -ret, strerror(-ret)); return ret; } RTE_VERIFY(prefix_fib != NULL); /* * GK_FWD_NEIGHBOR_*_NET FIB entries are initialized when * Gatekeeper starts. These FIB entries are only reserved * for the network prefixes which Gatekeeper is responsible. * Changing these network prefixes requires restarting Gatekeeper, * so one can ignore the deletion of these FIB entries. */ if (unlikely(prefix_fib->action == GK_FWD_NEIGHBOR_FRONT_NET || prefix_fib->action == GK_FWD_NEIGHBOR_BACK_NET)) { G_LOG(WARNING, "%s(%s) cannot delete a LAN prefix of Gatekeeper\n", __func__, prefix_info->str); return -EPERM; } if (prefix_info->addr.proto == RTE_ETHER_TYPE_IPV4) { fib_tbl = gk_conf->lpm_tbl.fib_tbl; qid = &gk_conf->lpm_tbl.qid; } else if (likely(prefix_info->addr.proto == RTE_ETHER_TYPE_IPV6)) { fib_tbl = gk_conf->lpm_tbl.fib_tbl6; qid = &gk_conf->lpm_tbl.qid6; } else { G_LOG(WARNING, "%s(%s, %"PRIu16"): prefix with unknown IP protocol when deleting a FIB entry\n", __func__, prefix_info->str, prefix_info->addr.proto); return -EINVAL; } ret = lpm_del_route(&prefix_info->addr, prefix_info->len, &gk_conf->lpm_tbl); if (ret < 0) { G_LOG(ERR, "%s(%s) failed to remove the IP prefix (errno=%i): %s\n", __func__, prefix_info->str, -ret, strerror(-ret)); return ret; } /* * We need to notify the GK blocks whenever we remove * a FIB entry that is accessible through a prefix. */ synchronize_gk_instances_with_fib(gk_conf, prefix_fib, false); /* * From now on, GK blocks must not have a reference * to @prefix_fib. */ switch (prefix_fib->action) { case GK_FWD_GRANTOR: ret = clear_grantor_set(prefix_info, prefix_fib->u.grantor.set, gk_conf); break; case GK_FWD_GATEWAY_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_GATEWAY_BACK_NET: ret = del_gateway_from_neigh_table_locked( prefix_info, prefix_fib->action, prefix_fib->u.gateway.eth_cache, gk_conf); break; case GK_DROP: break; case GK_FWD_NEIGHBOR_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_NEIGHBOR_BACK_NET: rte_panic("%s(%s): GK_FWD_NEIGHBOR_FRONT_NET and GK_FWD_NEIGHBOR_BACK_NET (action = %u) should have been handled above\n", __func__, prefix_info->str, prefix_fib->action); ret = -EFAULT; break; default: rte_panic("%s(%s): bug: unsupported action %u\n", __func__, prefix_info->str, prefix_fib->action); ret = -ENOTSUP; break; } ret2 = find_reset_fib_entry(fib_tbl, prefix_fib, qid); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(ipproto=%"PRIu16"): bug: failed to reset FIB entry (errno=%i): %s\n", __func__, prefix_info->addr.proto, -ret2, strerror(-ret2)); } return ret; } /* * Initialize a gateway FIB entry. * * @gw_addr the gateway address information. * @ip_prefix the IP prefix for which the gateway is responsible. * * add_fib_entry_numerical() already ensured that the gateway * and the prefix have the same IP version. */ static int init_gateway_fib_locked(const struct ip_prefix *ip_prefix, enum gk_fib_action action, const struct route_properties *props, struct ipaddr *gw_addr, struct gk_config *gk_conf) { int ret, ret2; uint32_t fib_id; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; struct gk_fib *gw_fib, *neigh_fib; struct qid *qid; struct ether_cache *eth_cache; struct neighbor_hash_table *neigh_ht; struct gatekeeper_if *iface; if (unlikely(ip_prefix->addr.proto != gw_addr->proto)) { G_LOG(ERR, "%s(%s): IP prefix protocol (%hu) does not match the gateway address protocol (%hu)\n", __func__, ip_prefix->str, ip_prefix->addr.proto, gw_addr->proto); return -EINVAL; } if (action == GK_FWD_GATEWAY_FRONT_NET) iface = &gk_conf->net->front; else if (likely(action == GK_FWD_GATEWAY_BACK_NET)) iface = &gk_conf->net->back; else { G_LOG(ERR, "%s(%s): failed to initialize a fib entry for gateway because it has invalid action %d\n", __func__, ip_prefix->str, action); return -EINVAL; } /* Find the neighbor FIB entry for this gateway. */ neigh_fib = find_fib_entry_for_neighbor_locked( gw_addr, action, gk_conf); if (neigh_fib == NULL) { G_LOG(ERR, "%s(%s): invalid gateway entry; could not find neighbor FIB\n", __func__, ip_prefix->str); return -EINVAL; } /* Find the Ethernet cached header entry for this gateway. */ neigh_ht = &neigh_fib->u.neigh; eth_cache = neigh_get_ether_cache_locked( neigh_ht, gw_addr, iface, gk_conf->lcores[0]); if (eth_cache == NULL) return -EINVAL; /* Find an empty FIB entry for the Gateway. */ ret = get_empty_fib_id(ip_prefix->addr.proto, gk_conf, &gw_fib, &qid, &fib_id); if (unlikely(ret < 0)) goto put_ether_cache; /* Fills up the Gateway FIB entry for the IP prefix. */ gw_fib->action = action; gw_fib->u.gateway.eth_cache = eth_cache; gw_fib->u.gateway.props = *props; ret = lpm_add_route(&ip_prefix->addr, ip_prefix->len, fib_id, ltbl); if (ret < 0) goto init_fib; return 0; init_fib: ret2 = reset_fib_entry(gw_fib, qid, fib_id); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(ipproto=%"PRIu16"): bug: failed to reset FIB entry (errno=%i): %s\n", __func__, ip_prefix->addr.proto, -ret2, strerror(-ret2)); } put_ether_cache: ether_cache_put(neigh_fib, action, eth_cache, gk_conf); return ret; } /* * Return 0 when @gw_addr is not included in @prefix. * If not, or if there is an error, return a negative number. */ static int check_gateway_prefix(const struct ip_prefix *prefix, struct ipaddr *gw_addr) { if (unlikely(prefix->addr.proto != gw_addr->proto)) { G_LOG(ERR, "%s(%s): IP prefix protocol (%hu) does not match the gateway address protocol (%hu)\n", __func__, prefix->str, prefix->addr.proto, gw_addr->proto); return -EINVAL; } if (gw_addr->proto == RTE_ETHER_TYPE_IPV4) { uint32_t ip4_mask = rte_cpu_to_be_32(~0ULL << (32 - prefix->len)); if ((prefix->addr.ip.v4.s_addr ^ gw_addr->ip.v4.s_addr) & ip4_mask) return 0; } else if (likely(gw_addr->proto == RTE_ETHER_TYPE_IPV6)) { uint64_t ip6_mask; uint64_t *pf = (uint64_t *)prefix->addr.ip.v6.s6_addr; uint64_t *gw = (uint64_t *)gw_addr->ip.v6.s6_addr; if (prefix->len == 0) { /* Do nothing. */ } else if (prefix->len <= 64) { ip6_mask = rte_cpu_to_be_64( ~0ULL << (64 - prefix->len)); if ((pf[0] ^ gw[0]) & ip6_mask) return 0; } else { ip6_mask = rte_cpu_to_be_64( ~0ULL << (128 - prefix->len)); if ((pf[0] != gw[0]) || ((pf[1] ^ gw[1]) & ip6_mask)) return 0; } } else { G_LOG(CRIT, "%s(%s): bug: unknown IP type %hu\n", __func__, prefix->str, gw_addr->proto); return -EINVAL; } G_LOG(ERR, "%s(%s): the gateway address is included in the prefix, but gateways of Grantor entries cannot be neighbors of Gatekeeper servers (see issue #267 for details)\n", __func__, prefix->str); return -EPERM; } #define MAX_NUM_GRANTORS_PER_ENTRY \ ((1 << (RTE_SIZEOF_FIELD(struct gk_fib, u.grantor.set->num_entries) * 8)) - 1) /* * Initialize a Grantor FIB entry. * * @gt_addr the Grantor address information. * @gw_addr the gateway address information. * @ip_prefix the IP prefix for which the gateway is responsible. * * add_fib_entry_numerical() already ensured that the gateway * and the prefix have the same IP version. */ static int init_grantor_fib_locked(const struct ip_prefix *ip_prefix, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, struct gk_config *gk_conf, struct gk_fib *gt_fib) { int ret, ret2; struct gk_fib *neigh_fibs[num_addrs]; struct ether_cache *eth_caches[num_addrs]; struct gatekeeper_if *iface = &gk_conf->net->back; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; struct qid *qid; struct grantor_set *new_set; unsigned int i, num_cache_holds = 0; uint32_t fib_id; bool replace_gt = true; if (unlikely(num_addrs > MAX_NUM_GRANTORS_PER_ENTRY)) { G_LOG(ERR, "%s(%s): number of Grantor/gateway address pairs (%u) is greater than the max number of entries allowed (%d)\n", __func__, ip_prefix->str, num_addrs, MAX_NUM_GRANTORS_PER_ENTRY); return -EINVAL; } for (i = 0; i < num_addrs; i++) { struct neighbor_hash_table *neigh_ht; if (unlikely(ip_prefix->addr.proto != gt_addrs[i].proto)) { G_LOG(ERR, "%s(%s): IP prefix protocol (%hu) does not match the Grantor address protocol (%hu)\n", __func__, ip_prefix->str, ip_prefix->addr.proto, gt_addrs[i].proto); ret = -EINVAL; goto put_ether_cache; } /* * Verify that the gateway IP address @gw_addrs[i] is NOT * included in the prefix. * * This verification is needed because when a Gatekeeper * server forwards a packet directly to a protected * destination, it always forwards the packet to * the gateway of the associated Grantor server * (see gk_process_bpf() for details). Thus, the gateway * cannot be a neighbor, otherwise the packets are not sent * directly to the protected destination. * * Issue #267 discusses the assumptions behind this * verification. */ ret = check_gateway_prefix(ip_prefix, &gw_addrs[i]); if (unlikely(ret < 0)) goto put_ether_cache; /* Find the neighbor FIB entry for this gateway. */ neigh_fibs[i] = find_fib_entry_for_neighbor_locked( &gw_addrs[i], GK_FWD_GATEWAY_BACK_NET, gk_conf); if (unlikely(neigh_fibs[i]== NULL)) { G_LOG(ERR, "%s(%s): invalid gateway entry; could not find neighbor FIB\n", __func__, ip_prefix->str); ret = -EINVAL; goto put_ether_cache; } /* Find the Ethernet cached header entry for this gateway. */ neigh_ht = &neigh_fibs[i]->u.neigh; eth_caches[i] = neigh_get_ether_cache_locked( neigh_ht, &gw_addrs[i], iface, gk_conf->lcores[0]); if (unlikely(eth_caches[i] == NULL)) { ret = -EINVAL; goto put_ether_cache; } num_cache_holds++; } if (gt_fib == NULL) { ret = get_empty_fib_id(ip_prefix->addr.proto, gk_conf, &gt_fib, &qid, &fib_id); if (unlikely(ret < 0)) goto put_ether_cache; replace_gt = false; } new_set = rte_malloc_socket("gk_fib.grantor.set", sizeof(*new_set) + num_addrs * sizeof(*(new_set->entries)), 0, rte_lcore_to_socket_id(gk_conf->lcores[0])); if (unlikely(new_set == NULL)) { G_LOG(ERR, "%s(%s): could not allocate set of Grantor entries\n", __func__, ip_prefix->str); ret = -ENOMEM; goto init_fib; } new_set->proto = ip_prefix->addr.proto; new_set->num_entries = num_addrs; for (i = 0; i < num_addrs; i++) { new_set->entries[i].gt_addr = gt_addrs[i]; new_set->entries[i].eth_cache = eth_caches[i]; } if (replace_gt) { /* Replace old set of Grantors in existing entry. */ struct grantor_set *old_set = gt_fib->u.grantor.set; gt_fib->u.grantor.set = new_set; synchronize_gk_instances_with_fib(gk_conf, gt_fib, true); clear_grantor_set(ip_prefix, old_set, gk_conf); } else { /* Add new entry. */ gt_fib->action = GK_FWD_GRANTOR; gt_fib->u.grantor.set = new_set; ret = lpm_add_route(&ip_prefix->addr, ip_prefix->len, fib_id, ltbl); if (ret < 0) goto init_fib; } return 0; init_fib: if (replace_gt) goto put_ether_cache; /* Next label. */ ret2 = reset_fib_entry(gt_fib, qid, fib_id); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(ipproto=%"PRIu16"): bug: failed to reset FIB entry (errno=%i): %s\n", __func__, ip_prefix->addr.proto, -ret2, strerror(-ret2)); } rte_free(new_set); put_ether_cache: for (i = 0; i < num_cache_holds; i++) { ether_cache_put(neigh_fibs[i], GK_FWD_GATEWAY_BACK_NET, eth_caches[i], gk_conf); } return ret; } static int init_drop_fib_locked(const struct ip_prefix *ip_prefix, const struct route_properties *props, struct gk_config *gk_conf) { uint32_t fib_id; struct gk_fib *ip_prefix_fib; struct gk_lpm *ltbl = &gk_conf->lpm_tbl; struct qid *qid; /* Initialize the fib entry for the IP prefix. */ int ret = get_empty_fib_id(ip_prefix->addr.proto, gk_conf, &ip_prefix_fib, &qid, &fib_id); if (unlikely(ret < 0)) return ret; ip_prefix_fib->action = GK_DROP; ip_prefix_fib->u.drop.props = *props; ret = lpm_add_route(&ip_prefix->addr, ip_prefix->len, fib_id, ltbl); if (unlikely(ret < 0)) { int ret2 = reset_fib_entry(ip_prefix_fib, qid, fib_id); if (unlikely(ret2 < 0)) { G_LOG(CRIT, "%s(ipproto=%"PRIu16"): bug: failed to reset FIB entry (errno=%i): %s\n", __func__, ip_prefix->addr.proto, -ret2, strerror(-ret2)); } return ret; } return 0; } /* * If a FIB entry already exists for @prefix, then @cur_fib points to it. * Otherwise, @cur_fib is NULL. */ static int add_fib_entry_locked(const struct ip_prefix *prefix, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf, struct gk_fib *cur_fib) { int ret; if (cur_fib != NULL && cur_fib->action != action) { G_LOG(ERR, "%s(%s): attempt to overwrite prefix whose action is %u with a new FIB entry of action %u; delete current FIB entry and add the new one\n", __func__, prefix->str, cur_fib->action, action); return -EINVAL; } switch (action) { case GK_FWD_GRANTOR: if (num_addrs < 1 || gt_addrs == NULL || gw_addrs == NULL) return -EINVAL; ret = init_grantor_fib_locked(prefix, gt_addrs, gw_addrs, num_addrs, gk_conf, cur_fib); if (ret < 0) return ret; break; case GK_FWD_GATEWAY_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_GATEWAY_BACK_NET: if (num_addrs != 1 || gt_addrs != NULL || gw_addrs == NULL || cur_fib != NULL) return -EINVAL; ret = init_gateway_fib_locked(prefix, action, props, &gw_addrs[0], gk_conf); if (ret < 0) return ret; break; case GK_DROP: if (num_addrs != 0 || gt_addrs != NULL || gw_addrs != NULL || cur_fib != NULL) return -EINVAL; ret = init_drop_fib_locked(prefix, props, gk_conf); if (ret < 0) return ret; break; case GK_FWD_NEIGHBOR_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_NEIGHBOR_BACK_NET: /* FALLTHROUGH */ default: G_LOG(ERR, "%s(%s): invalid FIB action %u\n", __func__, prefix->str, action); return -EINVAL; } return 0; } static int check_longer_prefixes(const char *context, const struct rib_head *rib, const void *ip, uint8_t depth, const struct gk_fib *fib_table, const char *prefix_str, enum gk_fib_action prefix_action) { struct rib_longer_iterator_state state; /* * @stop_at_children can be true because * * (1) if a child is not GK_FWD_GRANTOR, nor GK_DROP, * the test will already fail; * * (2) if a child is either GK_FWD_GRANTOR, or GK_DROP, * the grand-children (if they exist) must be safe. * Otherwise, the child would not have been eadded. */ int ret = rib_longer_iterator_state_init(&state, rib, ip, depth, true); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to initialize the %s RIB iterator (errno=%i): %s\n", __func__, prefix_str, context, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; const struct gk_fib *fib; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (unlikely(ret != -ENOENT)) { G_LOG(ERR, "%s(%s): %s RIB iterator failed (errno=%i): %s\n", __func__, prefix_str, context, -ret, strerror(-ret)); goto out; } ret = 0; goto out; } fib = &fib_table[rule.next_hop]; if (fib->action != GK_FWD_GRANTOR && fib->action != GK_DROP) { G_LOG(WARNING, "%s(%s): adding the %s rule with action %u would add a security hole since there already exists an entry of %u length with action %u\n", __func__, prefix_str, context, prefix_action, rule.depth, fib->action); ret = -EPERM; goto out; } } out: rib_longer_iterator_end(&state); return ret; } static int check_shorter_prefixes(const char *context, const struct rib_head *rib, const void *ip, uint8_t depth, const struct gk_fib *fib_table, const char *prefix_str, enum gk_fib_action prefix_action) { struct rib_shorter_iterator_state state; int ret = rib_shorter_iterator_state_init(&state, rib, ip, depth); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to initialize the %s RIB iterator (errno=%i): %s\n", __func__, prefix_str, context, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; const struct gk_fib *fib; ret = rib_shorter_iterator_next(&state, &rule); if (unlikely(ret < 0)) { if (unlikely(ret != -ENOENT)) { G_LOG(ERR, "%s(%s): %s RIB iterator failed (errno=%i): %s\n", __func__, prefix_str, context, -ret, strerror(-ret)); goto out; } ret = 0; goto out; } fib = &fib_table[rule.next_hop]; if (fib->action == GK_FWD_GRANTOR || fib->action == GK_DROP) { G_LOG(WARNING, "%s(%s): adding the %s rule with action %u would add a security hole since there already exists an entry of %u length with action %u\n", __func__, prefix_str, context, prefix_action, rule.depth, fib->action); ret = -EPERM; goto out; } } out: rib_shorter_iterator_end(&state); return ret; } /* * This function makes sure that only a drop or another Grantor entry * can have a longer prefix than a drop or Grantor entry. * * The importance of this sanity check is illustrated in the following example: * assume that the prefix 10.1.1.0/24 forwards to a gateway and * the prefix 10.1.0.0/16 being added forwards to a Grantor. * Although the prefix 10.1.0.0/16 is intended to protect every host in that * destination, the prefix 10.1.1.0/24 is a longer match and leaves some of * those hosts unprotected. Without this sanity check, variations of this * example could go unnoticed until it is too late. */ static int check_prefix_security_hole_locked(const struct ip_prefix *prefix, enum gk_fib_action action, struct gk_config *gk_conf) { struct gk_lpm *ltbl = &gk_conf->lpm_tbl; if (action == GK_DROP || action == GK_FWD_GRANTOR) { /* Ensure that all prefixes longer than @prefix are safe. */ if (prefix->addr.proto == RTE_ETHER_TYPE_IPV4) { return check_longer_prefixes("IPv4", rib4_from_ltbl(ltbl), &prefix->addr.ip.v4.s_addr, prefix->len, ltbl->fib_tbl, prefix->str, action); } if (likely(prefix->addr.proto == RTE_ETHER_TYPE_IPV6)) { return check_longer_prefixes("IPv6", rib6_from_ltbl(ltbl), prefix->addr.ip.v6.s6_addr, prefix->len, ltbl->fib_tbl6, prefix->str, action); } goto unknown; } /* Ensure that all prefixer shorter than @prefix are safe. */ if (prefix->addr.proto == RTE_ETHER_TYPE_IPV4) { return check_shorter_prefixes("IPv4", rib4_from_ltbl(ltbl), &prefix->addr.ip.v4.s_addr, prefix->len, ltbl->fib_tbl, prefix->str, action); } if (likely(prefix->addr.proto == RTE_ETHER_TYPE_IPV6)) { return check_shorter_prefixes("IPv6", rib6_from_ltbl(ltbl), prefix->addr.ip.v6.s6_addr, prefix->len, ltbl->fib_tbl6, prefix->str, action); } unknown: G_LOG(WARNING, "%s(%s): unknown IP type %hu with action %u\n", __func__, prefix->str, prefix->addr.proto, action); return -EINVAL; } /* * Add a FIB entry for a binary IP address prefix. * * GK_FWD_GRANTOR entries use both @gt_addrs and @gw_addrs, * and @num_addrs represents the number of such Grantor and * gateway pairs for the FIB entry. * * GK_DROP uses neither @gt_addrs nor @gw_addrs. * * All other entry types only use @gw_addrs, and should only * have one gateway (@num_addrs == 1). */ int add_fib_entry_numerical_locked(const struct ip_prefix *prefix_info, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf) { struct gk_fib *neigh_fib; int ret = check_prefix(prefix_info); if (unlikely(ret < 0)) return ret; neigh_fib = find_fib_entry_for_neighbor_locked( &prefix_info->addr, GK_FWD_GATEWAY_FRONT_NET, gk_conf); if (unlikely(neigh_fib != NULL)) { G_LOG(ERR, "%s(%s): invalid prefix; prefix lookup found existing neighbor FIB on front interface\n", __func__, prefix_info->str); return -EINVAL; } else { /* Clarify LPM lookup miss that will occur in log. */ G_LOG(INFO, "%s(%s): prefix lookup did not find existing neighbor FIB on front interface, as expected\n", __func__, prefix_info->str); } neigh_fib = find_fib_entry_for_neighbor_locked( &prefix_info->addr, GK_FWD_GATEWAY_BACK_NET, gk_conf); if (unlikely(neigh_fib != NULL)) { G_LOG(ERR, "%s(%s): invalid prefix; prefix lookup found existing neighbor FIB on back interface\n", __func__, prefix_info->str); return -EINVAL; } else { /* Clarify LPM lookup miss that will occur in log. */ G_LOG(INFO, "%s(%s): prefix lookup did not find existing neighbor FIB on back interface, as expected\n", __func__, prefix_info->str); } ret = check_prefix_exists_locked(prefix_info, gk_conf, NULL); if (ret != -ENOENT) { G_LOG(ERR, "%s(%s): prefix already exists or error occurred\n", __func__, prefix_info->str); if (ret >= 0) return -EEXIST; return ret; } ret = check_prefix_security_hole_locked(prefix_info, action, gk_conf); if (ret < 0) return ret; return add_fib_entry_locked(prefix_info, gt_addrs, gw_addrs, num_addrs, action, props, gk_conf, NULL); } int add_fib_entry_numerical(const struct ip_prefix *prefix_info, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf) { int ret; rte_spinlock_lock_tm(&gk_conf->lpm_tbl.lock); ret = add_fib_entry_numerical_locked(prefix_info, gt_addrs, gw_addrs, num_addrs, action, props, gk_conf); rte_spinlock_unlock_tm(&gk_conf->lpm_tbl.lock); return ret; } static int update_fib_entry_numerical(const struct ip_prefix *prefix_info, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs, unsigned int num_addrs, enum gk_fib_action action, const struct route_properties *props, struct gk_config *gk_conf) { int fib_id; struct gk_fib *cur_fib; int ret = check_prefix(prefix_info); if (unlikely(ret < 0)) return ret; rte_spinlock_lock_tm(&gk_conf->lpm_tbl.lock); fib_id = check_prefix_exists_locked(prefix_info, gk_conf, &cur_fib); if (fib_id < 0) { G_LOG(ERR, "%s(%s): cannot update set of Grantors; prefix does not already exist or error occurred\n", __func__, prefix_info->str); rte_spinlock_unlock_tm(&gk_conf->lpm_tbl.lock); return fib_id; } ret = add_fib_entry_locked(prefix_info, gt_addrs, gw_addrs, num_addrs, action, props, gk_conf, cur_fib); rte_spinlock_unlock_tm(&gk_conf->lpm_tbl.lock); return ret; } static const struct route_properties default_route_properties = { .rt_proto = RTPROT_STATIC, .priority = 0, }; int add_fib_entry(const char *prefix, const char *gt_ip, const char *gw_ip, enum gk_fib_action action, struct gk_config *gk_conf) { int ret; struct ip_prefix prefix_info; struct ipaddr gt_addr, gw_addr; struct ipaddr *gt_para = NULL, *gw_para = NULL; if (gt_ip != NULL) { ret = convert_str_to_ip(gt_ip, &gt_addr); if (ret < 0) return ret; gt_para = &gt_addr; } if (gw_ip != NULL) { ret = convert_str_to_ip(gw_ip, &gw_addr); if (ret < 0) return ret; gw_para = &gw_addr; } prefix_info.str = prefix; prefix_info.len = parse_ip_prefix(prefix, &prefix_info.addr); return add_fib_entry_numerical(&prefix_info, gt_para, gw_para, gt_ip != NULL || gw_ip != NULL ? 1 : 0, action, &default_route_properties, gk_conf); } int del_fib_entry_numerical(const struct ip_prefix *prefix_info, struct gk_config *gk_conf) { int ret; rte_spinlock_lock_tm(&gk_conf->lpm_tbl.lock); ret = del_fib_entry_numerical_locked(prefix_info, gk_conf); rte_spinlock_unlock_tm(&gk_conf->lpm_tbl.lock); return ret; } int del_fib_entry(const char *ip_prefix, struct gk_config *gk_conf) { struct ip_prefix prefix_info; prefix_info.str = ip_prefix; prefix_info.len = parse_ip_prefix(ip_prefix, &prefix_info.addr); return del_fib_entry_numerical(&prefix_info, gk_conf); } /* * Stack when function starts: * * 5 | gw_addrs | (passed as parameter) * 4 | gt_addrs | (passed as parameter) * 3 | gk_conf | (unused in this function) * 2 | table | * 1 | prefix_str | (unused in this function) * |____________| */ static void read_grantor_lb_entries(lua_State *L, lua_Integer tbl_size, struct ipaddr *gt_addrs, struct ipaddr *gw_addrs) { lua_Integer i; /* Iterate over a table of tables. */ for (i = 1; i <= tbl_size; i++) { const char *gt_ip, *gw_ip; int ret; /* Get the table at index i. */ lua_rawgeti(L, 2, i); /* * Make sure that the item inside * the table is a table itself. */ if (!lua_istable(L, 6)) luaL_error(L, "%s(): Grantor entry %ld is not a table", __func__, i); lua_getfield(L, 6, "gt_ip"); lua_getfield(L, 6, "gw_ip"); gt_ip = luaL_checkstring(L, 7); gw_ip = luaL_checkstring(L, 8); ret = convert_str_to_ip(gt_ip, &gt_addrs[i - 1]); if (ret < 0) { luaL_error(L, "%s(): cannot convert Grantor IP %s to bytes", __func__, gt_ip); } ret = convert_str_to_ip(gw_ip, &gw_addrs[i - 1]); if (ret < 0) { luaL_error(L, "%s(): cannot convert gateway IP %s to bytes", __func__, gw_ip); } /* Pop the Grantor/gateway and their table from Lua stack. */ lua_pop(L, 3); } } static void add_grantor_entry_lb_verify_params(lua_State *L, const char **prefix, lua_Integer *tbl_size, struct gk_config **gk_conf) { uint32_t ctypeid; uint32_t correct_ctypeid_gk_config = luaL_get_ctypeid(L, CTYPE_STRUCT_GK_CONFIG_PTR); void *cdata; size_t len; if (lua_gettop(L) != 3) { luaL_error(L, "%s(): expected three arguments, however it received %d arguments", __func__, lua_gettop(L)); } /* First argument must be a prefix string. */ *prefix = lua_tolstring(L, 1, &len); if (*prefix == NULL || len == 0) luaL_error(L, "%s(): could not read prefix for adding load balanced Grantor set", __func__); /* Second argument must be a table. */ luaL_checktype(L, 2, LUA_TTABLE); *tbl_size = lua_objlen(L, 2); if (*tbl_size <= 0) luaL_error(L, "%s(): table must have a positive number of Grantor entries", __func__); /* Third argument must be of type CTYPE_STRUCT_GK_CONFIG_PTR. */ cdata = luaL_checkcdata(L, 3, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); if (ctypeid != correct_ctypeid_gk_config) { luaL_error(L, "%s(): expected '%s' as the third argument", __func__, CTYPE_STRUCT_GK_CONFIG_PTR); } *gk_conf = *(struct gk_config **)cdata; } static int __add_grantor_entry_lb(lua_State *L, int overwrite) { const char *prefix; struct ip_prefix prefix_info; lua_Integer tbl_size; struct gk_config *gk_conf; struct ipaddr *gt_addrs; struct ipaddr *gw_addrs; int ret; /* Verify presence and types of parameters and read them in. */ add_grantor_entry_lb_verify_params(L, &prefix, &tbl_size, &gk_conf); gt_addrs = lua_newuserdata(L, tbl_size * sizeof(*gt_addrs)); gw_addrs = lua_newuserdata(L, tbl_size * sizeof(*gw_addrs)); read_grantor_lb_entries(L, tbl_size, gt_addrs, gw_addrs); /* Set up prefix info. */ prefix_info.str = prefix; prefix_info.len = parse_ip_prefix(prefix, &prefix_info.addr); if (overwrite) { ret = update_fib_entry_numerical(&prefix_info, gt_addrs, gw_addrs, tbl_size, GK_FWD_GRANTOR, &default_route_properties, gk_conf); } else { ret = add_fib_entry_numerical(&prefix_info, gt_addrs, gw_addrs, tbl_size, GK_FWD_GRANTOR, &default_route_properties, gk_conf); } if (ret < 0) luaL_error(L, "%s(): could not add or update FIB entry; check Gatekeeper log", __func__); return 0; } int l_add_grantor_entry_lb(lua_State *L) { return __add_grantor_entry_lb(L, false); } int l_update_grantor_entry_lb(lua_State *L) { return __add_grantor_entry_lb(L, true); } static void fillup_gk_fib_dump_entry_ether(struct fib_dump_addr_set *addr_set, struct ether_cache *eth_cache) { addr_set->stale = eth_cache->stale; addr_set->nexthop_ip = eth_cache->ip_addr; rte_ether_addr_copy(&eth_cache->l2_hdr.eth_hdr.dst_addr, &addr_set->d_addr); } /* * CAUTION: fields @dentry->addr and @dentry->prefix_len must be filled in * before calling this function. */ static void fillup_gk_fib_dump_entry(struct gk_fib_dump_entry *dentry, const struct gk_fib *fib) { dentry->action = fib->action; switch (dentry->action) { case GK_FWD_GRANTOR: { unsigned int i; for (i = 0; i < dentry->num_addr_sets; i++) { dentry->addr_sets[i].grantor_ip = fib->u.grantor.set->entries[i].gt_addr; fillup_gk_fib_dump_entry_ether(&dentry->addr_sets[i], fib->u.grantor.set->entries[i].eth_cache); } break; } case GK_FWD_GATEWAY_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_GATEWAY_BACK_NET: fillup_gk_fib_dump_entry_ether(&dentry->addr_sets[0], fib->u.gateway.eth_cache); break; case GK_FWD_NEIGHBOR_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_NEIGHBOR_BACK_NET: /* FALLTHROUGH */ case GK_DROP: break; default: { /* * Things went bad, but keep going. */ char str_prefix[INET6_ADDRSTRLEN]; RTE_BUILD_BUG_ON(INET6_ADDRSTRLEN < INET_ADDRSTRLEN); if (unlikely(convert_ip_to_str(&dentry->addr, str_prefix, sizeof(str_prefix)) < 0)) strcpy(str_prefix, "<ERROR>"); G_LOG(CRIT, "%s(%s/%i): invalid FIB action (%u) in FIB", __func__, str_prefix, dentry->prefix_len, fib->action); break; } } } #define CTYPE_STRUCT_FIB_DUMP_ENTRY_PTR "struct gk_fib_dump_entry *" static inline unsigned int num_addrs_entry_type(const struct gk_fib *fib) { switch (fib->action) { case GK_FWD_GRANTOR: return fib->u.grantor.set->num_entries; case GK_DROP: return 0; default: /* All other entry types have a single Gateway. */ return 1; } } typedef void (*set_addr_t)(struct ipaddr *addr, rib_address_t address_no); static void list_fib_entries(lua_State *L, const char *context, const struct rib_head *rib, const struct gk_fib *fib_table, rte_spinlock_t *lock, set_addr_t setf, uint8_t batch_size) { struct gk_fib_dump_entry *dentry = NULL; size_t dentry_size = 0; uint32_t correct_ctypeid_fib_dump_entry = luaL_get_ctypeid(L, CTYPE_STRUCT_FIB_DUMP_ENTRY_PTR); uint8_t current_batch_size = 0; struct rib_longer_iterator_state state; int ret; rte_spinlock_lock_tm(lock); ret = rib_longer_iterator_state_init(&state, rib, NULL, 0, false); if (unlikely(ret < 0)) { rte_spinlock_unlock_tm(lock); luaL_error(L, "%s(): failed to initialize the %s RIB iterator (errno=%d): %s", __func__, context, -ret, strerror(-ret)); } while (true) { struct rib_iterator_rule rule; const struct gk_fib *fib; unsigned int num_addrs; size_t new_dentry_size; int done; void *cdata; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { rte_free(dentry); rib_longer_iterator_end(&state); rte_spinlock_unlock_tm(lock); if (unlikely(ret != -ENOENT)) { luaL_error(L, "%s(): %s RIB iterator failed (errno=%d): %s", __func__, context, -ret, strerror(-ret)); } return; } fib = &fib_table[rule.next_hop]; if (unlikely(fib->action == GK_FWD_NEIGHBOR_FRONT_NET || fib->action == GK_FWD_NEIGHBOR_BACK_NET)) continue; num_addrs = num_addrs_entry_type(fib); new_dentry_size = sizeof(*dentry) + num_addrs * sizeof(*dentry->addr_sets); if (new_dentry_size > dentry_size) { dentry_size = new_dentry_size; rte_free(dentry); /* * We don't need rte_zmalloc_socket() here because * the memory is not being used by the GK block. */ dentry = rte_zmalloc("fib_dump", dentry_size, 0); if (unlikely(dentry == NULL)) { rib_longer_iterator_end(&state); rte_spinlock_unlock_tm(lock); luaL_error(L, "%s(): failed to allocate memory for the %s FIB dump", __func__, context); } } else memset(dentry, 0, new_dentry_size); setf(&dentry->addr, rule.address_no); dentry->prefix_len = rule.depth; dentry->fib_id = rule.next_hop; dentry->num_addr_sets = num_addrs; fillup_gk_fib_dump_entry(dentry, fib); lua_pushvalue(L, 2); lua_insert(L, 3); cdata = luaL_pushcdata(L, correct_ctypeid_fib_dump_entry, sizeof(struct gk_fib_dump_entry *)); *(struct gk_fib_dump_entry **)cdata = dentry; lua_insert(L, 4); if (lua_pcall(L, 2, 2, 0) != 0) { rte_free(dentry); rib_longer_iterator_end(&state); rte_spinlock_unlock_tm(lock); lua_error(L); } done = lua_toboolean(L, -2); lua_remove(L, -2); if (unlikely(done)) { rte_free(dentry); rib_longer_iterator_end(&state); rte_spinlock_unlock_tm(lock); return; } if (++current_batch_size >= batch_size) { /* Release the lock after dumping the full batch. */ rte_spinlock_unlock_tm(lock); current_batch_size = 0; /* Give other lcores a chance to acquire the lock. */ rte_pause(); /* * Obtain the lock when starting a new dumping batch. * For the last batch, the lock will be released at * the end. */ rte_spinlock_lock_tm(lock); } } } static void set_addr4(struct ipaddr *addr, rib_address_t address_no) { addr->proto = RTE_ETHER_TYPE_IPV4; addr->ip.v4.s_addr = ipv4_from_rib_addr(address_no); } static void set_addr6(struct ipaddr *addr, rib_address_t address_no) { addr->proto = RTE_ETHER_TYPE_IPV6; rte_memcpy(&addr->ip.v6, &address_no, sizeof(addr->ip.v6)); } #define CTYPE_STRUCT_GK_CONFIG_PTR "struct gk_config *" static int list_fib_for_lua(lua_State *L, bool list_ipv4) { struct gk_config *gk_conf; uint32_t ctypeid; uint32_t correct_ctypeid_gk_config = luaL_get_ctypeid(L, CTYPE_STRUCT_GK_CONFIG_PTR); struct gk_lpm *ltbl; /* First argument must be of type CTYPE_STRUCT_GK_CONFIG_PTR. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); if (ctypeid != correct_ctypeid_gk_config) luaL_error(L, "%s(): expected `%s' as first argument", __func__, CTYPE_STRUCT_GK_CONFIG_PTR); /* Second argument must be a Lua function. */ luaL_checktype(L, 2, LUA_TFUNCTION); /* Third argument should be a Lua value. */ if (lua_gettop(L) != 3) luaL_error(L, "%s(): expected three arguments, however it got %d arguments", __func__, lua_gettop(L)); gk_conf = *(struct gk_config **)cdata; ltbl = &gk_conf->lpm_tbl; if (list_ipv4) { list_fib_entries(L, "IPv4", rib4_from_ltbl(ltbl), ltbl->fib_tbl, &ltbl->lock, set_addr4, gk_conf->fib_dump_batch_size); } else { list_fib_entries(L, "IPv6", rib6_from_ltbl(ltbl), ltbl->fib_tbl6, &ltbl->lock, set_addr6, gk_conf->fib_dump_batch_size); } lua_remove(L, 1); lua_remove(L, 1); return 1; } int l_list_gk_fib4(lua_State *L) { return list_fib_for_lua(L, true); } int l_list_gk_fib6(lua_State *L) { return list_fib_for_lua(L, false); } static void fillup_gk_neighbor_dump_entry(struct gk_neighbor_dump_entry *dentry, struct ether_cache *eth_cache) { dentry->stale = eth_cache->stale; rte_memcpy(&dentry->neigh_ip, &eth_cache->ip_addr, sizeof(dentry->neigh_ip)); rte_memcpy(&dentry->d_addr, &eth_cache->l2_hdr.eth_hdr.dst_addr, sizeof(dentry->d_addr)); } #define CTYPE_STRUCT_NEIGHBOR_DUMP_ENTRY_PTR "struct gk_neighbor_dump_entry *" static void list_hash_table_neighbors_unlock(lua_State *L, enum gk_fib_action action, struct neighbor_hash_table *neigh_ht, struct gk_lpm *ltbl) { uint32_t next = 0; const void *key; void *data; void *cdata; uint32_t correct_ctypeid_neighbor_dentry = luaL_get_ctypeid(L, CTYPE_STRUCT_NEIGHBOR_DUMP_ENTRY_PTR); int32_t index = rte_hash_iterate(neigh_ht->hash_table, (void *)&key, &data, &next); while (index >= 0) { struct gk_neighbor_dump_entry dentry; struct ether_cache *eth_cache = data; dentry.action = action; fillup_gk_neighbor_dump_entry(&dentry, eth_cache); lua_pushvalue(L, 2); lua_insert(L, 3); cdata = luaL_pushcdata(L, correct_ctypeid_neighbor_dentry, sizeof(struct gk_neighbor_dump_entry *)); *(struct gk_neighbor_dump_entry **)cdata = &dentry; lua_insert(L, 4); if (lua_pcall(L, 2, 1, 0) != 0) { rte_spinlock_unlock_tm(&ltbl->lock); lua_error(L); } index = rte_hash_iterate(neigh_ht->hash_table, (void *)&key, &data, &next); } rte_spinlock_unlock_tm(&ltbl->lock); } static void list_ipv4_if_neighbors(lua_State *L, struct gatekeeper_if *iface, enum gk_fib_action action, struct gk_lpm *ltbl) { int ret; uint32_t fib_id; struct gk_fib *neigh_fib; rte_spinlock_lock_tm(&ltbl->lock); ret = rib_lookup(rib4_from_ltbl(ltbl), (uint8_t *)&iface->ip4_addr.s_addr, &fib_id); /* * Invalid gateway entry, since at least we should * obtain the FIB entry for the neighbor table. */ if (unlikely(ret < 0)) { rte_spinlock_unlock_tm(&ltbl->lock); luaL_error(L, "%s(): failed to lookup the lpm table (errno=%d): %s", __func__, -ret, strerror(-ret)); } neigh_fib = &ltbl->fib_tbl[fib_id]; RTE_VERIFY(neigh_fib->action == action); list_hash_table_neighbors_unlock(L, action, &neigh_fib->u.neigh, ltbl); } static void list_ipv6_if_neighbors(lua_State *L, struct gatekeeper_if *iface, enum gk_fib_action action, struct gk_lpm *ltbl) { int ret; uint32_t fib_id; struct gk_fib *neigh_fib; rte_spinlock_lock_tm(&ltbl->lock); ret = rib_lookup(rib6_from_ltbl(ltbl), iface->ip6_addr.s6_addr, &fib_id); /* * Invalid gateway entry, since at least we should * obtain the FIB entry for the neighbor table. */ if (unlikely(ret < 0)) { rte_spinlock_unlock_tm(&ltbl->lock); luaL_error(L, "%s(): failed to lookup the lpm6 table (errno=%d): %s", __func__, -ret, strerror(-ret)); } neigh_fib = &ltbl->fib_tbl6[fib_id]; RTE_VERIFY(neigh_fib->action == action); list_hash_table_neighbors_unlock(L, action, &neigh_fib->u.neigh, ltbl); } static void list_ipv4_neighbors(lua_State *L, struct net_config *net_conf, struct gk_lpm *ltbl) { if (!ipv4_configured(net_conf)) return; list_ipv4_if_neighbors(L, &net_conf->front, GK_FWD_NEIGHBOR_FRONT_NET, ltbl); if (net_conf->back_iface_enabled) list_ipv4_if_neighbors(L, &net_conf->back, GK_FWD_NEIGHBOR_BACK_NET, ltbl); } static void list_ipv6_neighbors(lua_State *L, struct net_config *net_conf, struct gk_lpm *ltbl) { if (!ipv6_configured(net_conf)) return; list_ipv6_if_neighbors(L, &net_conf->front, GK_FWD_NEIGHBOR_FRONT_NET, ltbl); if (net_conf->back_iface_enabled) list_ipv6_if_neighbors(L, &net_conf->back, GK_FWD_NEIGHBOR_BACK_NET, ltbl); } typedef void (*list_neighbors)(lua_State *L, struct net_config *net_conf, struct gk_lpm *ltbl); static void list_neighbors_for_lua(lua_State *L, list_neighbors f) { struct gk_config *gk_conf; uint32_t ctypeid; uint32_t correct_ctypeid_gk_config = luaL_get_ctypeid(L, CTYPE_STRUCT_GK_CONFIG_PTR); /* First argument must be of type CTYPE_STRUCT_GK_CONFIG_PTR. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_GK_CONFIG_PTR); if (ctypeid != correct_ctypeid_gk_config) luaL_error(L, "%s(): expected `%s' as first argument", __func__, CTYPE_STRUCT_GK_CONFIG_PTR); /* Second argument must be a Lua function. */ luaL_checktype(L, 2, LUA_TFUNCTION); /* Third argument should be a Lua value. */ if (lua_gettop(L) != 3) luaL_error(L, "%s(): expected three arguments, however it got %d arguments", __func__, lua_gettop(L)); gk_conf = *(struct gk_config **)cdata; f(L, gk_conf->net, &gk_conf->lpm_tbl); lua_remove(L, 1); lua_remove(L, 1); } int l_list_gk_neighbors4(lua_State *L) { list_neighbors_for_lua(L, list_ipv4_neighbors); return 1; } int l_list_gk_neighbors6(lua_State *L) { list_neighbors_for_lua(L, list_ipv6_neighbors); return 1; } #define CTYPE_STRUCT_ETHER_ADDR_REF "struct rte_ether_addr &" int l_ether_format_addr(lua_State *L) { struct rte_ether_addr *d_addr; char d_buf[RTE_ETHER_ADDR_FMT_SIZE]; uint32_t ctypeid; uint32_t correct_ctypeid_ether_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_ETHER_ADDR_REF); /* First argument must be of type CTYPE_STRUCT_ETHER_ADDR_REF. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_ETHER_ADDR_REF); if (ctypeid != correct_ctypeid_ether_addr) luaL_error(L, "%s(): expected `%s' as first argument", __func__, CTYPE_STRUCT_ETHER_ADDR_REF); d_addr = *(struct rte_ether_addr **)cdata; rte_ether_format_addr(d_buf, sizeof(d_buf), d_addr); lua_pushstring(L, d_buf); return 1; } #define CTYPE_STRUCT_IP_ADDR_REF "struct ipaddr &" int l_ip_format_addr(lua_State *L) { struct ipaddr *ip_addr; char ip[MAX_INET_ADDRSTRLEN]; int ret; uint32_t ctypeid; uint32_t correct_ctypeid_ip_addr = luaL_get_ctypeid(L, CTYPE_STRUCT_IP_ADDR_REF); /* First argument must be of type CTYPE_STRUCT_IP_ADDR_REF. */ void *cdata = luaL_checkcdata(L, 1, &ctypeid, CTYPE_STRUCT_IP_ADDR_REF); if (ctypeid != correct_ctypeid_ip_addr) luaL_error(L, "%s(): expected `%s' as first argument", __func__, CTYPE_STRUCT_IP_ADDR_REF); ip_addr = *(struct ipaddr **)cdata; ret = convert_ip_to_str(ip_addr, ip, sizeof(ip)); if (ret < 0) luaL_error(L, "%s(): failed to convert an IP address to string", __func__); lua_pushstring(L, ip); return 1; } ```
/content/code_sandbox/gk/rt.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
21,753
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <string.h> #include <stdbool.h> #include <math.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <unistd.h> #include <time.h> #include <rte_ip.h> #include <rte_log.h> #include <rte_lcore.h> #include <rte_ethdev.h> #include <rte_memcpy.h> #include <rte_cycles.h> #include <rte_malloc.h> #include <rte_icmp.h> #include <rte_common.h> #include "gatekeeper_acl.h" #include "gatekeeper_gk.h" #include "gatekeeper_main.h" #include "gatekeeper_lls.h" #include "gatekeeper_config.h" #include "gatekeeper_launch.h" #include "gatekeeper_l2.h" #include "gatekeeper_sol.h" #include "gatekeeper_flow_bpf.h" #include "gatekeeper_hash.h" #include "bpf.h" #define START_PRIORITY (38) /* Set @START_ALLOWANCE as the double size of a large DNS reply. */ #define START_ALLOWANCE (8) /* We should avoid calling integer_log_base_2() with zero. */ static inline uint8_t integer_log_base_2(uint64_t delta_time) { #if __WORDSIZE == 64 return (8 * sizeof(uint64_t) - 1) - __builtin_clzl(delta_time); #else return (8 * sizeof(uint64_t) - 1) - __builtin_clzll(delta_time); #endif } /* * It converts the difference of time between the current packet and * the last seen packet into a given priority. */ static uint8_t priority_from_delta_time(uint64_t present, uint64_t past) { uint64_t delta_time; if (unlikely(present < past)) { /* * This should never happen, but we handle it gracefully here * in order to keep going. */ G_LOG(ERR, "The present time smaller than the past time\n"); return 0; } delta_time = (present - past) * picosec_per_cycle; if (unlikely(delta_time < 1)) return 0; return integer_log_base_2(delta_time); } static inline bool is_addr6_mc(const struct in6_addr *addr6) { /* * @addr6 is multicast. * See RFC 4291 section "2.7. Multicast Addresses". */ return addr6->s6_addr[0] == 0xFF; } static inline bool is_addr6_ll(const struct in6_addr *addr6) { const uint8_t ll_prefix[] = {0xFE, 0x80, 0, 0, 0, 0, 0, 0}; const uint64_t *pa64 = (const uint64_t *)addr6->s6_addr; const uint64_t *pb64 = (const uint64_t *)ll_prefix; RTE_BUILD_BUG_ON(sizeof(ll_prefix) != sizeof(uint64_t)); /* * @addr6 is link-local. * See RFC 4291 section "2.5.6. Link-Local IPv6 Unicast * Addresses". */ return *pa64 == *pb64; } static int extract_packet_info(struct rte_mbuf *pkt, struct ipacket *packet, struct rte_mbuf **arp_bufs, uint16_t *num_arp, struct acl_search *acl6) { uint16_t ether_type; size_t ether_len; uint16_t pkt_len = rte_pktmbuf_data_len(pkt); struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); ether_type = rte_be_to_cpu_16(pkt_in_skip_l2(pkt, eth_hdr, &packet->l3_hdr)); ether_len = pkt_in_l2_hdr_len(pkt); switch (ether_type) { case RTE_ETHER_TYPE_IPV4: { struct rte_ipv4_hdr *ip4_hdr; if (unlikely(pkt_len < ether_len + sizeof(*ip4_hdr))) { G_LOG(DEBUG, "%s(): packet is too short to be IPv4 (%i)\n", __func__, pkt_len); return -EINVAL; } ip4_hdr = packet->l3_hdr; packet->flow.proto = RTE_ETHER_TYPE_IPV4; packet->flow.f.v4.src.s_addr = ip4_hdr->src_addr; packet->flow.f.v4.dst.s_addr = ip4_hdr->dst_addr; break; } case RTE_ETHER_TYPE_IPV6: { struct rte_ipv6_hdr *ip6_hdr; if (unlikely(pkt_len < ether_len + sizeof(*ip6_hdr))) { G_LOG(DEBUG, "%s(): packet is too short to be IPv6 (%i)\n", __func__, pkt_len); return -EINVAL; } ip6_hdr = packet->l3_hdr; packet->flow.proto = RTE_ETHER_TYPE_IPV6; rte_memcpy(packet->flow.f.v6.src.s6_addr, ip6_hdr->src_addr, sizeof(packet->flow.f.v6.src.s6_addr)); rte_memcpy(packet->flow.f.v6.dst.s6_addr, ip6_hdr->dst_addr, sizeof(packet->flow.f.v6.dst.s6_addr)); if (unlikely(is_addr6_mc(&packet->flow.f.v6.dst) || is_addr6_ll(&packet->flow.f.v6.dst))) { add_pkt_acl(acl6, pkt); return -ENOENT; } break; } case RTE_ETHER_TYPE_ARP: arp_bufs[(*num_arp)++] = pkt; return -ENOENT; default: /* Drop non-IP and non-ARP packets. */ log_unknown_l2("gk", ether_type); return -EINVAL; } packet->pkt = pkt; return 0; } static inline uint64_t calc_request_expire_at(uint8_t priority, uint64_t now) { uint8_t above_priority = priority + 4; RTE_BUILD_BUG_ON(PRIORITY_MAX >= (sizeof(uint64_t) * 8)); if (unlikely(above_priority > PRIORITY_MAX)) { /* Avoid overflow of the left shift operator below. */ above_priority = PRIORITY_MAX; } /* * TCP waits for 2^i seconds between each retransmitted SYN packet, * where i is greater or equal to 0. Thus, the corresponding * priority p for each retransmitted packet i is: * * floor(log_2(2^i * 10^12)) = floor(i + log_2(10^12)) = i + 39 * * If one sets above_priority = p + 4 and waits for the amount * of time corresponding for the above_priority priority, * TCP can transmit two more SYN packets: * * (2^(i+1)+2^(i+2)) * 10^12 <= 2 ^ above_priority => * 2^(i+1) * 10^12 + 2^(i+2) * 10^12 <= 2^(i+2) * 2^41 => * 2^(i+2) * 5 * 10^11 + 2^(i+2) * 10^12 <= 2^(i+2) * 2^41 => * 5 * 10^11 + 10^12 <= 2^41 (TRUE) */ /* * The cast `(uint64_t)` is needed to force the compiler * to use the 64-bit version of `<<`. */ return now + (((uint64_t)1 << above_priority) / picosec_per_cycle); } static inline void initialize_flow_entry(struct flow_entry *fe, struct ip_flow *flow, uint32_t flow_hash_val, struct gk_fib *grantor_fib) { uint64_t now = rte_rdtsc(); /* * The flow table is a critical data structure, so, * whenever the size of entries grow too much, * one must look for alternatives before increasing * the limit below. */ RTE_BUILD_BUG_ON(sizeof(*fe) > 128); rte_memcpy(&fe->flow, flow, sizeof(*flow)); fe->in_use = true; fe->flow_hash_val = flow_hash_val; fe->state = GK_REQUEST; fe->expire_at = calc_request_expire_at(START_PRIORITY, now); fe->u.request.last_packet_seen_at = now; fe->u.request.last_priority = START_PRIORITY; fe->u.request.allowance = START_ALLOWANCE - 1; fe->grantor_fib = grantor_fib; } static inline void reinitialize_flow_entry(struct flow_entry *fe, uint64_t now) { fe->state = GK_REQUEST; fe->expire_at = calc_request_expire_at(START_PRIORITY, now); fe->u.request.last_packet_seen_at = now; fe->u.request.last_priority = START_PRIORITY; fe->u.request.allowance = START_ALLOWANCE - 1; } typedef int (*packet_drop_cb_func)(struct rte_mbuf *pkt, struct gk_instance *instance); static int drop_packet_front(struct rte_mbuf *pkt, struct gk_instance *instance) { instance->traffic_stats.tot_pkts_num_dropped++; instance->traffic_stats.tot_pkts_size_dropped += rte_pktmbuf_pkt_len(pkt); return drop_packet(pkt); } static inline int drop_packet_back(struct rte_mbuf *pkt, __attribute__((unused)) struct gk_instance *instance) { return drop_packet(pkt); } /* * Return value indicates whether the cached Ethernet header is stale or not. */ int pkt_copy_cached_eth_header(struct rte_mbuf *pkt, struct ether_cache *eth_cache, size_t l2_hdr_len) { unsigned seq; bool stale; do { seq = read_seqbegin(&eth_cache->lock); stale = eth_cache->stale; if (!stale) { struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *); rte_memcpy(eth_hdr, &eth_cache->l2_hdr, l2_hdr_len); pkt->l2_len = l2_hdr_len; } } while (read_seqretry(&eth_cache->lock, seq)); return stale; } /* * When a flow entry is at request state, all the GK block processing * that entry does is to: * (1) compute the priority of the packet. * (2) encapsulate the packet as a request. * (3) put this encapsulated packet in the request queue. * * Returns a negative integer on error, or EINPROGRESS to indicate * that the request is being processed by another lcore, and should * not be forwarded or dropped on returning from this function. */ static int gk_process_request(struct flow_entry *fe, struct ipacket *packet, struct rte_mbuf **req_bufs, uint16_t *num_reqs, struct sol_config *sol_conf) { int ret; uint64_t now = rte_rdtsc(); uint8_t priority = priority_from_delta_time(now, fe->u.request.last_packet_seen_at); struct ether_cache *eth_cache; struct grantor_entry *grantor; fe->u.request.last_packet_seen_at = now; /* * The reason for using "<" instead of "<=" is that the equal case * means that the source has waited enough time to have the same * last priority, so it should be awarded with the allowance. */ if (priority < fe->u.request.last_priority && fe->u.request.allowance > 0) { fe->u.request.allowance--; priority = fe->u.request.last_priority; } else { fe->u.request.last_priority = priority; fe->u.request.allowance = START_ALLOWANCE - 1; fe->expire_at = calc_request_expire_at(priority, now); } /* * Adjust @priority for the DSCP field. * DSCP 0 for legacy packets; 1 for granted packets; * 2 for capability renew; 3-63 for requests. */ priority += PRIORITY_REQ_MIN; if (unlikely(priority > PRIORITY_MAX)) priority = PRIORITY_MAX; /* The assigned priority is @priority. */ grantor = choose_grantor_per_flow(fe); /* Encapsulate the packet as a request. */ ret = encapsulate(packet->pkt, priority, &sol_conf->net->back, &grantor->gt_addr); if (ret < 0) return ret; eth_cache = grantor->eth_cache; RTE_VERIFY(eth_cache != NULL); /* If needed, packet header space was adjusted by encapsulate(). */ if (pkt_copy_cached_eth_header(packet->pkt, eth_cache, sol_conf->net->back.l2_len_out)) return -1; req_bufs[*num_reqs] = packet->pkt; set_prio(req_bufs[*num_reqs], priority); (*num_reqs)++; return EINPROGRESS; } /* * Returns: * * zero on success; the granted packet can be enqueued and forwarded * * a negative number on error or when the packet needs to be * otherwise dropped because it has exceeded its budget * * EINPROGRESS to indicate that the packet is now a request that * is being processed by another lcore, and should not * be forwarded or dropped on returning from this function. */ static int gk_process_granted(struct flow_entry *fe, struct ipacket *packet, struct rte_mbuf **req_bufs, uint16_t *num_reqs, struct sol_config *sol_conf, struct gk_measurement_metrics *stats) { int ret; bool renew_cap; uint8_t priority = PRIORITY_GRANTED; uint64_t now = rte_rdtsc(); struct rte_mbuf *pkt = packet->pkt; struct ether_cache *eth_cache; struct grantor_entry *grantor; uint32_t pkt_len; if (now >= fe->expire_at) { reinitialize_flow_entry(fe, now); return gk_process_request(fe, packet, req_bufs, num_reqs, sol_conf); } if (now >= fe->u.granted.budget_renew_at) { fe->u.granted.budget_renew_at = now + cycles_per_sec; fe->u.granted.budget_byte = (uint64_t)fe->u.granted.tx_rate_kib_cycle * 1024; } pkt_len = rte_pktmbuf_pkt_len(pkt); if (pkt_len > fe->u.granted.budget_byte) { stats->pkts_num_declined++; stats->pkts_size_declined += pkt_len; return -1; } fe->u.granted.budget_byte -= pkt_len; renew_cap = now >= fe->u.granted.send_next_renewal_at; if (renew_cap) { fe->u.granted.send_next_renewal_at = now + fe->u.granted.renewal_step_cycle; priority = PRIORITY_RENEW_CAP; } grantor = choose_grantor_per_flow(fe); /* * Encapsulate packet as a granted packet, * mark it as a capability renewal request if @renew_cap is true, * enter destination according to @fe->grantor_fib. */ ret = encapsulate(packet->pkt, priority, &sol_conf->net->back, &grantor->gt_addr); if (ret < 0) return ret; eth_cache = grantor->eth_cache; RTE_VERIFY(eth_cache != NULL); /* If needed, packet header space was adjusted by encapsulate(). */ if (pkt_copy_cached_eth_header(packet->pkt, eth_cache, sol_conf->net->back.l2_len_out)) return -1; stats->pkts_num_granted++; stats->pkts_size_granted += pkt_len; return 0; } /* * Returns: * * a negative number on error or when the packet needs to be * otherwise dropped because it is declined * * EINPROGRESS to indicate that the packet is now a request that * is being processed by another lcore, and should not * be forwarded or dropped on returning from this function. */ static int gk_process_declined(struct flow_entry *fe, struct ipacket *packet, struct rte_mbuf **req_bufs, uint16_t *num_reqs, struct sol_config *sol_conf, struct gk_measurement_metrics *stats) { uint64_t now = rte_rdtsc(); if (unlikely(now >= fe->expire_at)) { reinitialize_flow_entry(fe, now); return gk_process_request(fe, packet, req_bufs, num_reqs, sol_conf); } stats->pkts_num_declined++; stats->pkts_size_declined += rte_pktmbuf_pkt_len(packet->pkt); return -1; } /* * Returns: * * zero on success; the packet can be enqueued and forwarded * * a negative number on error or when the packet needs to be * otherwise dropped because it has exceeded a limit * * EINPROGRESS to indicate that the packet is now a request that * is being processed by another lcore, and should not * be forwarded or dropped on returning from this function. */ static int gk_process_bpf(struct flow_entry *fe, struct ipacket *packet, struct rte_mbuf **req_bufs, uint16_t *num_reqs, struct gk_config *gk_conf, struct gk_measurement_metrics *stats) { uint64_t bpf_ret; int program_index, rc; uint64_t now = rte_rdtsc(); if (unlikely(now >= fe->expire_at)) goto expired; program_index = fe->program_index; rc = gk_bpf_decide_pkt(gk_conf, program_index, fe, packet, now, &bpf_ret); if (unlikely(rc != 0)) { G_LOG(WARNING, "The BPF program at index %u failed to run its function pkt\n", program_index); goto expired; } switch (bpf_ret) { case GK_BPF_PKT_RET_FORWARD: { struct ether_cache *eth_cache = choose_grantor_per_flow(fe)->eth_cache; RTE_VERIFY(eth_cache != NULL); /* * If needed, encapsulate() already adjusted * packet header space. */ if (pkt_copy_cached_eth_header(packet->pkt, eth_cache, gk_conf->net->back.l2_len_out)) return -1; stats->pkts_num_granted++; stats->pkts_size_granted += rte_pktmbuf_pkt_len(packet->pkt); return 0; } case GK_BPF_PKT_RET_DECLINE: stats->pkts_num_declined++; stats->pkts_size_declined += rte_pktmbuf_pkt_len(packet->pkt); return -1; case GK_BPF_PKT_RET_ERROR: G_LOG(WARNING, "The function pkt of the BPF program at index %u returned GK_BPF_PKT_RET_ERROR\n", program_index); return -1; default: G_LOG(WARNING, "The function pkt of the BPF program at index %u returned an invalid return: %" PRIu64 "\n", program_index, bpf_ret); return -1; } rte_panic("Unexpected condition at %s()", __func__); expired: reinitialize_flow_entry(fe, now); return gk_process_request(fe, packet, req_bufs, num_reqs, gk_conf->sol_conf); } static int get_block_idx(struct gk_config *gk_conf, unsigned int lcore_id) { int i; for (i = 0; i < gk_conf->num_lcores; i++) if (gk_conf->lcores[i] == lcore_id) return i; rte_panic("Unexpected condition: lcore %u is not running a gk block\n", lcore_id); return 0; } static void print_flow_state(struct flow_entry *fe, int32_t index) { int ret; char grantor_ip[RTE_MAX(MAX_INET_ADDRSTRLEN, 128)]; char state_msg[1024]; const char *s_in_use = likely(fe->in_use) ? "" : "NOT in use "; if (unlikely(fe->grantor_fib == NULL)) { ret = snprintf(grantor_ip, sizeof(grantor_ip), "NULL FIB entry"); goto fib_error; } if (unlikely(fe->grantor_fib->action != GK_FWD_GRANTOR)) { ret = snprintf(grantor_ip, sizeof(grantor_ip), "INVALID FIB entry [FIB action: %hhu]", fe->grantor_fib->action); goto fib_error; } ret = convert_ip_to_str(&choose_grantor_per_flow(fe)->gt_addr, grantor_ip, sizeof(grantor_ip)); if (ret < 0) { ret = snprintf(grantor_ip, sizeof(grantor_ip), "GRANTOR FIB entry with INVALID IP address"); goto fib_error; } goto dump; fib_error: RTE_VERIFY(ret > 0 && ret < (int)sizeof(grantor_ip)); dump: switch (fe->state) { case GK_REQUEST: ret = snprintf(state_msg, sizeof(state_msg), "%s[state: GK_REQUEST (%hhu), flow_hash_value: 0x%x, expire_at: 0x%"PRIx64", last_packet_seen_at: 0x%"PRIx64", last_priority: %hhu, allowance: %hhu, grantor_ip: %s]", s_in_use, fe->state, fe->flow_hash_val, fe->expire_at, fe->u.request.last_packet_seen_at, fe->u.request.last_priority, fe->u.request.allowance, grantor_ip); break; case GK_GRANTED: ret = snprintf(state_msg, sizeof(state_msg), "%s[state: GK_GRANTED (%hhu), flow_hash_value: 0x%x, expire_at: 0x%"PRIx64", budget_renew_at: 0x%"PRIx64", tx_rate_kib_cycle: %u, budget_byte: %"PRIu64", send_next_renewal_at: 0x%"PRIx64", renewal_step_cycle: 0x%"PRIx64", grantor_ip: %s]", s_in_use, fe->state, fe->flow_hash_val, fe->expire_at, fe->u.granted.budget_renew_at, fe->u.granted.tx_rate_kib_cycle, fe->u.granted.budget_byte, fe->u.granted.send_next_renewal_at, fe->u.granted.renewal_step_cycle, grantor_ip); break; case GK_DECLINED: ret = snprintf(state_msg, sizeof(state_msg), "%s[state: GK_DECLINED (%hhu), flow_hash_value: 0x%x, expire_at: 0x%"PRIx64", grantor_ip: %s]", s_in_use, fe->state, fe->flow_hash_val, fe->expire_at, grantor_ip); break; case GK_BPF: { uint64_t *c = fe->u.bpf.cookie.mem; RTE_BUILD_BUG_ON(RTE_DIM(fe->u.bpf.cookie.mem) != 8); ret = snprintf(state_msg, sizeof(state_msg), "%s[state: GK_BPF (%hhu), flow_hash_value: 0x%x, expire_at: 0x%"PRIx64", program_index=%u, cookie=%016"PRIx64", %016"PRIx64", %016"PRIx64", %016"PRIx64", %016"PRIx64", %016"PRIx64", %016"PRIx64", %016"PRIx64", grantor_ip: %s]", s_in_use, fe->state, fe->flow_hash_val, fe->expire_at, fe->program_index, rte_be_to_cpu_64(c[0]), rte_be_to_cpu_64(c[1]), rte_be_to_cpu_64(c[2]), rte_be_to_cpu_64(c[3]), rte_be_to_cpu_64(c[4]), rte_be_to_cpu_64(c[5]), rte_be_to_cpu_64(c[6]), rte_be_to_cpu_64(c[7]), grantor_ip); break; } default: ret = snprintf(state_msg, sizeof(state_msg), "%s[state: UNKNOWN (%hhu), flow_hash_value: 0x%x, expire_at: 0x%"PRIx64", grantor_ip: %s]", s_in_use, fe->state, fe->flow_hash_val, fe->expire_at, grantor_ip); break; } RTE_VERIFY(ret > 0 && ret < (int)sizeof(state_msg)); print_flow_err_msg(&fe->flow, index, state_msg); } static inline void reset_fe(struct gk_instance *instance, struct flow_entry *fe) { memset(fe, 0, sizeof(*fe)); if (instance->num_scan_del > 0) instance->num_scan_del--; } /* * Delete a flow entry at a given index and verify the deleted entry. * * Returns: * * zero on success. * * a negative number if the given flow entry was not not able * to be deleted. */ static int gk_del_flow_entry_at_pos(struct gk_instance *instance, uint32_t entry_idx) { struct hs_hash *h = &instance->ip_flow_hash_table; struct flow_entry *fe = &instance->ip_flow_entry_table[entry_idx]; struct flow_entry *fe2; int ret, ret2; uint32_t fe_index; char err_msg[512]; ret = hs_hash_del_key_with_hash(h, &fe->flow, fe->flow_hash_val, &fe_index); if (likely(ret == 0)) { instance->ip_flow_ht_num_items--; if (likely(entry_idx == fe_index)) { /* This is the ONLY normal outcome of this function. */ reset_fe(instance, fe); return 0; } ret2 = snprintf(err_msg, sizeof(err_msg), "%s(): there are two flow entries for the same flow; the main entry is at position %i and the duplicate at position %u; logging both entries and removing the main entry...", __func__, fe_index, entry_idx); RTE_VERIFY(ret2 > 0 && ret2 < (int)sizeof(err_msg)); print_flow_err_msg(&fe->flow, fe_index, err_msg); fe2 = &instance->ip_flow_entry_table[fe_index]; print_flow_state(fe2, fe_index); reset_fe(instance, fe2); print_flow_state(fe, entry_idx); /* * We can't reset @fe because the hash table doesn't have * a copy of the key. While not ideal, we leave @fe untouched, * so Gatekeeper keeps going. */ return 0; } ret2 = snprintf(err_msg, sizeof(err_msg), "%s(): failed to delete a key from hash table (errno=%i): %s\n", __func__, -ret, strerror(-ret)); RTE_VERIFY(ret2 > 0 && ret2 < (int)sizeof(err_msg)); print_flow_err_msg(&fe->flow, entry_idx, err_msg); print_flow_state(fe, entry_idx); return ret; } static uint32_t rss_ip_flow_hf(const void *key, __attribute__((unused)) uint32_t key_len, __attribute__((unused)) uint32_t init_val, const void *data) { return rss_flow_hash(data, key); } static const void * ip_flow_addr(uint32_t entry_idx, const void *data) { const struct flow_entry *fe_table = data; return &fe_table[entry_idx].flow; } static int ip_flow_cmp_eq(const void *key1, const void *key2, __attribute__((unused)) size_t key_len, __attribute__((unused)) const void *data) { return flow_cmp(key1, key2); } static int setup_gk_instance(unsigned int lcore_id, struct gk_config *gk_conf) { int ret; char ht_name[64]; unsigned int block_idx = get_block_idx(gk_conf, lcore_id); unsigned int socket_id = rte_lcore_to_socket_id(lcore_id); struct gk_instance *instance = &gk_conf->instances[block_idx]; struct hs_hash_parameters ip_flow_hash_params = { .name = ht_name, .num_entries = gk_conf->flow_ht_size, .max_probes = gk_conf->flow_ht_max_probes, .scale_num_bucket = gk_conf->flow_ht_scale_num_bucket, .socket_id = socket_id, .key_len = sizeof(struct ip_flow), .hash_func = rss_ip_flow_hf, .hash_func_init_val = 0, .hash_func_data = &gk_conf->net->front, .key_cmp_fn = ip_flow_cmp_eq, .key_cmp_fn_data = NULL, .key_addr_fn = ip_flow_addr, .key_addr_fn_data = NULL }; ret = snprintf(ht_name, sizeof(ht_name), "gk_%u_flow_table", lcore_id); RTE_VERIFY(ret > 0 && ret < (int)sizeof(ht_name)); /* Setup the flow entry table for GK block @block_idx. */ instance->ip_flow_entry_table = rte_calloc_socket( NULL, gk_conf->flow_ht_size, sizeof(struct flow_entry), 0, socket_id); if (unlikely(instance->ip_flow_entry_table == NULL)) { G_LOG(ERR, "%s(lcore=%u): rte_calloc_socket() failed to allocate flow entry table\n", __func__, lcore_id); ret = -ENOMEM; goto out; } ip_flow_hash_params.key_addr_fn_data = instance->ip_flow_entry_table; /* Setup the flow hash table for GK block @block_idx. */ ret = hs_hash_create(&instance->ip_flow_hash_table, &ip_flow_hash_params); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(lcore=%u): hs_hash_create() failed: (errno=%i): %s\n", __func__, lcore_id, -ret, strerror(-ret)); goto flow_entry; } ret = init_mailbox("gk", gk_conf->mailbox_max_entries_exp, sizeof(struct gk_cmd_entry), gk_conf->mailbox_mem_cache_size, lcore_id, &instance->mb); if (ret < 0) goto flow_hash; tb_ratelimit_state_init(&instance->front_icmp_rs, gk_conf->front_icmp_msgs_per_sec, gk_conf->front_icmp_msgs_burst); tb_ratelimit_state_init(&instance->back_icmp_rs, gk_conf->back_icmp_msgs_per_sec, gk_conf->back_icmp_msgs_burst); instance->ip_flow_entry_table_size = gk_conf->flow_ht_size; instance->ip_flow_ht_num_items = 0; ret = 0; goto out; flow_hash: hs_hash_free(&instance->ip_flow_hash_table); flow_entry: rte_free(instance->ip_flow_entry_table); instance->ip_flow_entry_table = NULL; out: return ret; } /* * If the table is full at a given batch, there's no reason to risk trying * another flow in the current batch because the table only has a chance * to free entries in between batches. */ static int gk_hash_add_flow_entry(struct gk_instance *instance, struct ip_flow *flow, uint32_t rss_hash_val, struct gk_config *gk_conf, uint32_t *p_val_idx) { int ret; if (instance->num_scan_del > 0) return -ENOSPC; ret = hs_hash_add_key_with_hash( &instance->ip_flow_hash_table, flow, rss_hash_val, p_val_idx); if (likely(ret == 0)) instance->ip_flow_ht_num_items++; else if (likely(ret == -ENOSPC)) instance->num_scan_del = gk_conf->scan_del_thresh; return ret; } /* * If the test can be done only on @flow, do not access @fe to minimize * pressure on the processor cache of the lcore. */ typedef bool (*test_flow_entry_t)(void *arg, struct flow_entry *fe); static void flush_flow_table(struct gk_instance *instance, test_flow_entry_t test, void *arg, const char *context) { uint32_t num_flushed_flows = 0; uint32_t index; /* * Scan the entry array directly instead of iterating over * the hash table buckets. The entry array is going to be * significantly smaller than the array of buckets, and * sequentially scanning ONLY the entry array is going to * be more efficient than sequentially scanning the larger * array of buckets AND randomly reading the entries. */ for (index = 0; index < instance->ip_flow_entry_table_size; index++) { struct flow_entry *fe = &instance->ip_flow_entry_table[index]; if (fe->in_use && test(arg, fe) && (gk_del_flow_entry_at_pos(instance, index) == 0)) { num_flushed_flows++; } } G_LOG(NOTICE, "%s(%s): flushed %" PRIu32 " flows of the flow table\n", __func__, context, num_flushed_flows); } struct flush_net_prefixes { uint16_t proto; struct ip_prefix *src; struct ip_prefix *dst; struct in_addr ip4_src_mask; struct in_addr ip4_dst_mask; struct in6_addr ip6_src_mask; struct in6_addr ip6_dst_mask; }; static bool test_net_prefixes(void *arg, struct flow_entry *fe) { struct flush_net_prefixes *info = arg; const struct ip_flow *flow = &fe->flow; bool matched = true; if (info->proto != flow->proto) return false; if (info->proto == RTE_ETHER_TYPE_IPV4) { if (info->src->len != 0) { matched = ip4_same_subnet( info->src->addr.ip.v4.s_addr, flow->f.v4.src.s_addr, info->ip4_src_mask.s_addr); } if (matched && info->dst->len != 0) { matched = ip4_same_subnet( info->dst->addr.ip.v4.s_addr, flow->f.v4.dst.s_addr, info->ip4_dst_mask.s_addr); } return matched; } if (info->src->len != 0) { matched = ip6_same_subnet(&info->src->addr.ip.v6, &flow->f.v6.src, &info->ip6_src_mask); } if (matched && info->dst->len != 0) { matched = ip6_same_subnet(&info->dst->addr.ip.v6, &flow->f.v6.dst, &info->ip6_dst_mask); } return matched; } static void flush_net_prefixes(struct ip_prefix *src, struct ip_prefix *dst, struct gk_instance *instance) { struct flush_net_prefixes arg; RTE_VERIFY(src->addr.proto == dst->addr.proto); arg.proto = src->addr.proto; arg.src = src; arg.dst = dst; if (arg.proto == RTE_ETHER_TYPE_IPV4) { ip4_prefix_mask(src->len, &arg.ip4_src_mask); ip4_prefix_mask(dst->len, &arg.ip4_dst_mask); memset(&arg.ip6_src_mask, 0, sizeof(arg.ip6_src_mask)); memset(&arg.ip6_dst_mask, 0, sizeof(arg.ip6_dst_mask)); } else if (likely(arg.proto == RTE_ETHER_TYPE_IPV6)) { memset(&arg.ip4_src_mask, 0, sizeof(arg.ip4_src_mask)); memset(&arg.ip4_dst_mask, 0, sizeof(arg.ip4_dst_mask)); ip6_prefix_mask(src->len, &arg.ip6_src_mask); ip6_prefix_mask(dst->len, &arg.ip6_dst_mask); } else rte_panic("Unexpected protocol: %i\n", src->addr.proto); flush_flow_table(instance, test_net_prefixes, &arg, __func__); } static void log_flow_state(struct gk_log_flow *log, struct gk_instance *instance) { struct flow_entry *fe; uint32_t flow_idx; int ret = hs_hash_lookup_with_hash(&instance->ip_flow_hash_table, &log->flow, log->flow_hash_val, &flow_idx); if (ret < 0) { char err_msg[128]; ret = snprintf(err_msg, sizeof(err_msg), "%s(): flow does not exist\n", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&log->flow, -ENOENT, err_msg); return; } fe = &instance->ip_flow_entry_table[flow_idx]; print_flow_state(fe, flow_idx); } static bool test_fib(void *arg, struct flow_entry *fe) { return fe->grantor_fib == arg; } static void gk_synchronize(struct gk_synch_request *req, struct gk_instance *instance) { if (req->update_only) goto done; switch (req->fib->action) { case GK_FWD_GRANTOR: /* Flush the grantor @fib in the flow table. */ flush_flow_table(instance, test_fib, req->fib, __func__); break; case GK_FWD_GATEWAY_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_GATEWAY_BACK_NET: /* FALLTHROUGH */ case GK_DROP: /* FALLTHROUGH */ case GK_FWD_NEIGHBOR_FRONT_NET: /* FALLTHROUGH */ case GK_FWD_NEIGHBOR_BACK_NET: /* * Do nothing because at this point we do not * have a reference to @fib. */ break; default: rte_panic("%s() at lcore %u: invalid FIB action (%u)\n", __func__, rte_lcore_id(), req->fib->action); break; } done: rte_atomic32_inc(req->done_counter); } static bool test_bpf(void *arg, struct flow_entry *fe) { return fe->state == GK_BPF && fe->program_index == (uintptr_t)arg; } static void process_gk_cmd(struct gk_cmd_entry *entry, struct gk_add_policy **policies, int *num_policies, struct gk_instance *instance) { switch (entry->op) { case GK_ADD_POLICY_DECISION: policies[(*num_policies)++] = &entry->u.ggu; break; case GK_SYNCH_WITH_LPM: gk_synchronize(&entry->u.synch, instance); break; case GK_FLUSH_FLOW_TABLE: flush_net_prefixes(&entry->u.flush.src, &entry->u.flush.dst, instance); break; case GK_LOG_FLOW_STATE: log_flow_state(&entry->u.log, instance); break; case GK_FLUSH_BPF: /* * Release the message sender now because we already have * a local copy of entry->u.flush_bpf.program_index. */ rte_atomic32_inc(entry->u.flush_bpf.done_counter); flush_flow_table(instance, test_bpf, (void *)(uintptr_t)entry->u.flush_bpf.program_index, "GK_FLUSH_BPF"); break; default: G_LOG(ERR, "Unknown command operation %u\n", entry->op); break; } } static int gk_setup_rss(struct gk_config *gk_conf) { int i, ret; if (gk_conf->net->front.rss) { uint16_t port_front = gk_conf->net->front.id; uint16_t gk_queues_front[gk_conf->num_lcores]; for (i = 0; i < gk_conf->num_lcores; i++) { gk_queues_front[i] = gk_conf->instances[i].rx_queue_front; } ret = gatekeeper_setup_rss(port_front, gk_queues_front, gk_conf->num_lcores); if (ret < 0) goto out; ret = gatekeeper_get_rss_config(port_front, &gk_conf->rss_conf_front); if (ret < 0) goto out; } if (gk_conf->net->back.rss) { uint16_t port_back = gk_conf->net->back.id; uint16_t gk_queues_back[gk_conf->num_lcores]; for (i = 0; i < gk_conf->num_lcores; i++) { gk_queues_back[i] = gk_conf->instances[i].rx_queue_back; } ret = gatekeeper_setup_rss(port_back, gk_queues_back, gk_conf->num_lcores); if (ret < 0) goto out; ret = gatekeeper_get_rss_config(port_back, &gk_conf->rss_conf_back); if (ret < 0) goto out; } ret = 0; out: return ret; } static void xmit_icmp(struct gatekeeper_if *iface, struct ipacket *packet, uint16_t *num_pkts, struct rte_mbuf **icmp_bufs, struct gk_instance *instance, packet_drop_cb_func cb_f) { struct rte_ether_addr eth_addr_tmp; struct rte_ether_hdr *icmp_eth; struct rte_ipv4_hdr *icmp_ipv4; struct rte_icmp_hdr *icmph; struct rte_mbuf *pkt = packet->pkt; int icmp_pkt_len = iface->l2_len_out + sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_icmp_hdr); if (pkt->data_len >= icmp_pkt_len) { int ret = rte_pktmbuf_trim(pkt, pkt->data_len - icmp_pkt_len); if (ret < 0) { G_LOG(ERR, "Failed to remove %d bytes of data at the end of the mbuf at %s", pkt->data_len - icmp_pkt_len, __func__); cb_f(pkt, instance); return; } icmp_eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); } else { icmp_eth = (struct rte_ether_hdr *)rte_pktmbuf_append(pkt, icmp_pkt_len - pkt->data_len); if (icmp_eth == NULL) { G_LOG(ERR, "Failed to append %d bytes of new data: not enough headroom space in the first segment at %s\n", icmp_pkt_len - pkt->data_len, __func__); cb_f(pkt, instance); return; } } rte_ether_addr_copy(&icmp_eth->src_addr, &eth_addr_tmp); rte_ether_addr_copy(&icmp_eth->dst_addr, &icmp_eth->src_addr); rte_ether_addr_copy(&eth_addr_tmp, &icmp_eth->dst_addr); if (iface->vlan_insert) { fill_vlan_hdr(icmp_eth, iface->ipv4_vlan_tag_be, RTE_ETHER_TYPE_IPV4); } icmp_ipv4 = (struct rte_ipv4_hdr *)pkt_out_skip_l2(iface, icmp_eth); icmp_ipv4->version_ihl = IP_VHL_DEF; icmp_ipv4->type_of_service = 0; icmp_ipv4->packet_id = 0; icmp_ipv4->fragment_offset = IP_DN_FRAGMENT_FLAG; icmp_ipv4->time_to_live = IP_DEFTTL; icmp_ipv4->next_proto_id = IPPROTO_ICMP; icmp_ipv4->src_addr = packet->flow.f.v4.dst.s_addr; icmp_ipv4->dst_addr = packet->flow.f.v4.src.s_addr; icmp_ipv4->total_length = rte_cpu_to_be_16(pkt->data_len - iface->l2_len_out); pkt->l2_len = iface->l2_len_out; pkt->l3_len = sizeof(struct rte_ipv4_hdr); set_ipv4_checksum(iface, pkt, icmp_ipv4); icmph = (struct rte_icmp_hdr *)&icmp_ipv4[1]; icmph->icmp_type = ICMP_TIME_EXCEEDED; icmph->icmp_code = ICMP_EXC_TTL; icmph->icmp_cksum = 0; icmph->icmp_ident = 0; icmph->icmp_seq_nb = 0; icmph->icmp_cksum = icmp_cksum(icmph, pkt->pkt_len - (pkt->l2_len + pkt->l3_len)); icmp_bufs[*num_pkts] = pkt; (*num_pkts)++; } static void xmit_icmpv6(struct gatekeeper_if *iface, struct ipacket *packet, uint16_t *num_pkts, struct rte_mbuf **icmp_bufs, struct gk_instance *instance, packet_drop_cb_func cb_f) { struct rte_ether_addr eth_addr_tmp; struct rte_ether_hdr *icmp_eth; struct rte_ipv6_hdr *icmp_ipv6; struct icmpv6_hdr *icmpv6_hdr; struct rte_mbuf *pkt = packet->pkt; int icmpv6_pkt_len = iface->l2_len_out + sizeof(struct rte_ipv6_hdr) + sizeof(struct icmpv6_hdr); if (pkt->data_len >= icmpv6_pkt_len) { int ret = rte_pktmbuf_trim(pkt, pkt->data_len - icmpv6_pkt_len); if (ret < 0) { G_LOG(ERR, "Failed to remove %d bytes of data at the end of the mbuf at %s", pkt->data_len - icmpv6_pkt_len, __func__); cb_f(pkt, instance); return; } icmp_eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); } else { icmp_eth = (struct rte_ether_hdr *)rte_pktmbuf_append(pkt, icmpv6_pkt_len - pkt->data_len); if (icmp_eth == NULL) { G_LOG(ERR, "Failed to append %d bytes of new data: not enough headroom space in the first segment at %s\n", icmpv6_pkt_len - pkt->data_len, __func__); cb_f(pkt, instance); return; } } rte_ether_addr_copy(&icmp_eth->src_addr, &eth_addr_tmp); rte_ether_addr_copy(&icmp_eth->dst_addr, &icmp_eth->src_addr); rte_ether_addr_copy(&eth_addr_tmp, &icmp_eth->dst_addr); if (iface->vlan_insert) { fill_vlan_hdr(icmp_eth, iface->ipv6_vlan_tag_be, RTE_ETHER_TYPE_IPV6); } /* Set-up IPv6 header. */ icmp_ipv6 = (struct rte_ipv6_hdr *)pkt_out_skip_l2(iface, icmp_eth); icmp_ipv6->vtc_flow = rte_cpu_to_be_32(IPv6_DEFAULT_VTC_FLOW); icmp_ipv6->payload_len = rte_cpu_to_be_16(sizeof(*icmpv6_hdr)); icmp_ipv6->proto = IPPROTO_ICMPV6; /* * The IP Hop Limit field must be 255 as required by * RFC 4861, sections 7.1.1 and 7.1.2. */ icmp_ipv6->hop_limits = 255; rte_memcpy(icmp_ipv6->src_addr, packet->flow.f.v6.dst.s6_addr, sizeof(icmp_ipv6->src_addr)); rte_memcpy(icmp_ipv6->dst_addr, packet->flow.f.v6.src.s6_addr, sizeof(icmp_ipv6->dst_addr)); /* Set-up ICMPv6 header. */ icmpv6_hdr = (struct icmpv6_hdr *)&icmp_ipv6[1]; icmpv6_hdr->type = ICMPV6_TIME_EXCEED; icmpv6_hdr->code = ICMPV6_EXC_HOPLIMIT; icmpv6_hdr->cksum = 0; /* Calculated below. */ icmpv6_hdr->cksum = rte_ipv6_icmpv6_cksum(icmp_ipv6, icmpv6_hdr); icmp_bufs[*num_pkts] = pkt; (*num_pkts)++; } /* * For IPv4, according to the RFC 1812 section 5.3.1 Time to Live (TTL), * if the TTL is reduced to zero (or less), the packet MUST be * discarded, and if the destination is not a multicast address the * router MUST send an ICMP Time Exceeded message, Code 0 (TTL Exceeded * in Transit) message to the source. * * For IPv6, according to the RFC 1883 section 4.4, * if the IPv6 Hop Limit is less than or equal to 1, then the router needs to * send an ICMP Time Exceeded -- Hop Limit Exceeded in Transit message to * the Source Address and discard the packet. */ static int update_ip_hop_count(struct gatekeeper_if *iface, struct ipacket *packet, uint16_t *num_pkts, struct rte_mbuf **icmp_bufs, struct token_bucket_ratelimit_state *rs, struct gk_instance *instance, packet_drop_cb_func cb_f) { if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) { struct rte_ipv4_hdr *ipv4_hdr = packet->l3_hdr; if (ipv4_hdr->time_to_live <= 1) { if (tb_ratelimit_allow(rs)) { xmit_icmp(iface, packet, num_pkts, icmp_bufs, instance, cb_f); } else cb_f(packet->pkt, instance); return -ETIMEDOUT; } --(ipv4_hdr->time_to_live); ++(ipv4_hdr->hdr_checksum); } else if (likely(packet->flow.proto == RTE_ETHER_TYPE_IPV6)) { struct rte_ipv6_hdr *ipv6_hdr = packet->l3_hdr; if (ipv6_hdr->hop_limits <= 1) { if (tb_ratelimit_allow(rs)) { xmit_icmpv6(iface, packet, num_pkts, icmp_bufs, instance, cb_f); } else cb_f(packet->pkt, instance); return -ETIMEDOUT; } --(ipv6_hdr->hop_limits); } else { G_LOG(WARNING, "Unexpected condition at %s: unknown flow type %hu\n", __func__, packet->flow.proto); cb_f(packet->pkt, instance); return -EINVAL; } return 0; } static void lookup_fib_bulk(struct gk_lpm *ltbl, struct ip_flow *flows[], int num_flows, struct gk_fib *fibs[]) { const uint8_t *addresses[num_flows]; uint32_t next_hops[num_flows]; int i; RTE_BUILD_BUG_ON(sizeof(*fibs[0]) > RTE_CACHE_LINE_SIZE); if (unlikely(num_flows == 0)) return; for (i = 0; i < num_flows; i++) addresses[i] = (const uint8_t *)&flows[i]->f.v4.dst.s_addr; fib_lookup_bulk(&ltbl->fib, addresses, next_hops, num_flows); for (i = 0; i < num_flows; i++) { uint32_t fib_id = next_hops[i]; if (fib_id != FIB_NO_NH) { fibs[i] = &ltbl->fib_tbl[fib_id]; rte_prefetch0(fibs[i]); } else fibs[i] = NULL; } } static void lookup_fib6_bulk(struct gk_lpm *ltbl, struct ip_flow *flows[], int num_flows, struct gk_fib *fibs[]) { const uint8_t *addresses[num_flows]; uint32_t next_hops[num_flows]; int i; RTE_BUILD_BUG_ON(sizeof(*fibs[0]) > RTE_CACHE_LINE_SIZE); if (unlikely(num_flows == 0)) return; for (i = 0; i < num_flows; i++) addresses[i] = flows[i]->f.v6.dst.s6_addr; fib_lookup_bulk(&ltbl->fib6, addresses, next_hops, num_flows); for (i = 0; i < num_flows; i++) { uint32_t fib_id = next_hops[i]; if (fib_id != FIB_NO_NH) { fibs[i] = &ltbl->fib_tbl6[fib_id]; rte_prefetch0(fibs[i]); } else fibs[i] = NULL; } } static struct flow_entry * lookup_fe_from_lpm(struct ipacket *packet, uint32_t ip_flow_hash_val, struct gk_fib *fib, uint32_t *fe_index, uint16_t *num_tx, struct rte_mbuf **tx_bufs, struct acl_search *acl4, struct acl_search *acl6, uint16_t *num_pkts, struct rte_mbuf **icmp_bufs, struct gatekeeper_if *front, struct gatekeeper_if *back, struct gk_instance *instance, struct gk_config *gk_conf) { struct rte_mbuf *pkt = packet->pkt; struct ether_cache *eth_cache; struct gk_measurement_metrics *stats = &instance->traffic_stats; if (fib == NULL || fib->action == GK_FWD_NEIGHBOR_FRONT_NET) { stats->tot_pkts_num_distributed++; stats->tot_pkts_size_distributed += rte_pktmbuf_pkt_len(pkt); if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) add_pkt_acl(acl4, pkt); else add_pkt_acl(acl6, pkt); goto no_fe; } switch (fib->action) { case GK_FWD_GRANTOR: { int ret = gk_hash_add_flow_entry(instance, &packet->flow, ip_flow_hash_val, gk_conf, fe_index); struct flow_entry *fe = &instance->ip_flow_entry_table[*fe_index]; if (unlikely(ret == -EEXIST)) { /* * This rare case happens when two or more packets of * a new flow are present in the same batch of packets * being processed. */ if (unlikely(!fe->in_use)) { G_LOG(CRIT, "%s(): bug: gk_hash_add_flow_entry() returned flow entry index %u that is NOT in use but already EEXIST\n", __func__, *fe_index); } } else if (unlikely(ret < 0)) { /* @fe is NOT defined here. */ drop_packet_front(pkt, instance); break; } else if (unlikely(fe->in_use)) { G_LOG(CRIT, "%s(): bug: gk_hash_add_flow_entry() returned flow entry index %u that is already in use\n", __func__, *fe_index); } if (likely(!fe->in_use)) { /* * The execution reaches here when * 1. successfully adding a new flow entry; and * 2. a flow already exists but it's not in use * due to a bug. */ initialize_flow_entry(fe, &packet->flow, ip_flow_hash_val, fib); } return fe; } case GK_FWD_GATEWAY_FRONT_NET: /* Gatekeeper does not intermediate neighbors. */ /* * Although this is the GK block, print_flow_err_msg() uses * G_LOG, so test log level at the Gatekeeper level. */ if (unlikely(G_LOG_CHECK(DEBUG))) print_flow_err_msg(&packet->flow, -ENOENT, "Dropping packet that arrived at the front interface and is destined to a front gateway"); drop_packet_front(pkt, instance); break; case GK_FWD_GATEWAY_BACK_NET: /* * The entry instructs to forward * its packets to the gateway in * the back network, forward accordingly. * * BP block bypasses from the front to the * back interface are expected to bypass * ranges of IP addresses that should not * go through Gatekeeper. * * Notice that one needs to update * the Ethernet header. */ eth_cache = fib->u.gateway.eth_cache; RTE_VERIFY(eth_cache != NULL); if (adjust_pkt_len(pkt, back, 0) == NULL || pkt_copy_cached_eth_header(pkt, eth_cache, back->l2_len_out)) { drop_packet_front(pkt, instance); break; } if (update_ip_hop_count(front, packet, num_pkts, icmp_bufs, &instance->front_icmp_rs, instance, drop_packet_front) < 0) break; tx_bufs[(*num_tx)++] = pkt; break; case GK_FWD_NEIGHBOR_FRONT_NET: G_LOG(CRIT, "%s(): bug: GK_FWD_NEIGHBOR_FRONT_NET should have been already handled; dropping packet...\n", __func__); drop_packet_front(pkt, instance); break; case GK_FWD_NEIGHBOR_BACK_NET: /* * The entry instructs to forward * its packets to the neighbor in * the back network, forward accordingly. */ if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) { eth_cache = lookup_ether_cache(&fib->u.neigh, &packet->flow.f.v4.dst); } else { eth_cache = lookup_ether_cache(&fib->u.neigh, &packet->flow.f.v6.dst); } if (eth_cache == NULL) { /* * Although this is the GK block, print_flow_err_msg() * uses G_LOG, so test log level at the Gatekeeper * level. * * NOTICE that the unknown back neighbor that the log * entry below refers to could be the address of * our back interface as well. We cannot just send * the packet to the filter of the back interface * because the target filter may be implemented in * the hardware of the back interface. */ if (unlikely(G_LOG_CHECK(DEBUG))) print_flow_err_msg(&packet->flow, -ENOENT, "Dropping packet that arrived at the front interface and is destined to an unknown back neighbor"); drop_packet_front(pkt, instance); break; } if (adjust_pkt_len(pkt, back, 0) == NULL || pkt_copy_cached_eth_header(pkt, eth_cache, back->l2_len_out)) { drop_packet_front(pkt, instance); break; } if (update_ip_hop_count(front, packet, num_pkts, icmp_bufs, &instance->front_icmp_rs, instance, drop_packet_front) < 0) break; tx_bufs[(*num_tx)++] = pkt; break; case GK_DROP: /* FALLTHROUGH */ default: drop_packet_front(pkt, instance); break; } no_fe: *fe_index = HS_HASH_MISS; return NULL; } static int process_flow_entry(struct flow_entry *fe, int32_t fe_index, struct ipacket *packet, struct rte_mbuf **req_bufs, uint16_t *num_reqs, struct gk_config *gk_conf, struct gk_instance *instance) { int ret; /* * Some notes regarding flow rates and units: * * Flows in the GK_REQUEST state are bandwidth limited * to an overall rate relative to the link. Therefore, * the Ethernet frame overhead is counted toward the * credits used by requests. The request channel rate * is measured in megabits (base 10) per second to * match the units used by hardware specifications. * * Granted flows (in state GK_GRANTED or sometimes * GK_BPF) are allocated budgets that are intended * to reflect the max throughput of the flow, and * therefore do not include the Ethernet frame overhead. * The budgets of granted flows are measured in * kibibytes (base 2). */ switch (fe->state) { case GK_REQUEST: ret = gk_process_request(fe, packet, req_bufs, num_reqs, gk_conf->sol_conf); break; case GK_GRANTED: ret = gk_process_granted(fe, packet, req_bufs, num_reqs, gk_conf->sol_conf, &instance->traffic_stats); break; case GK_DECLINED: ret = gk_process_declined(fe, packet, req_bufs, num_reqs, gk_conf->sol_conf, &instance->traffic_stats); break; case GK_BPF: ret = gk_process_bpf(fe, packet, req_bufs, num_reqs, gk_conf, &instance->traffic_stats); break; default: { char err_msg[256]; int ret2; ret = -1; /* * The flow table is corrupted. * * The ideal solution would be to move the flow into * the GK_REQUEST state and to process it as such. * The corresponding fib entry, however, is not available * to change the state, and finding the fib entry is too * expensive to do here. * * The second best solution, done below, is to remove * the flow entry. */ ret2 = snprintf(err_msg, sizeof(err_msg), "%s(): Unknown flow state: %i; logging and dropping flow entry...\n", __func__, fe->state); RTE_VERIFY(ret2 > 0 && ret2 < (int)sizeof(err_msg)); print_flow_err_msg(&fe->flow, fe_index, err_msg); print_flow_state(fe, fe_index); /* Ignore return value; nothing further to do with it. */ gk_del_flow_entry_at_pos(instance, fe_index); break; } } return ret; } static inline void prefetch_flow_entry(struct flow_entry *fe) { #if RTE_CACHE_LINE_SIZE == 64 RTE_BUILD_BUG_ON(sizeof(*fe) <= RTE_CACHE_LINE_SIZE); RTE_BUILD_BUG_ON(sizeof(*fe) > 2 * RTE_CACHE_LINE_SIZE); rte_prefetch0(fe); rte_prefetch0(((char *)fe) + RTE_CACHE_LINE_SIZE); #elif RTE_CACHE_LINE_SIZE == 128 RTE_BUILD_BUG_ON(sizeof(*fe) > RTE_CACHE_LINE_SIZE); rte_prefetch0(fe); #else #error "Unsupported cache line size" #endif } static void parse_packet(struct ipacket *packet, struct rte_mbuf *pkt, struct rte_mbuf **arp_bufs, uint16_t *num_arp, struct acl_search *acl6, bool ipv4_configured_front, bool ipv6_configured_front, struct ip_flow **flow_arr, uint32_t *flow_hash_val_arr, int *num_ip_flows, struct gatekeeper_if *front, struct gk_instance *instance) { int ret; struct gk_measurement_metrics *stats = &instance->traffic_stats; stats->tot_pkts_size += rte_pktmbuf_pkt_len(pkt); ret = extract_packet_info(pkt, packet, arp_bufs, num_arp, acl6); if (unlikely(ret < 0)) { stats->tot_pkts_num_distributed++; stats->tot_pkts_size_distributed += rte_pktmbuf_pkt_len(pkt); if (unlikely(ret == -EINVAL)) drop_packet_front(pkt, instance); return; } if (unlikely((packet->flow.proto == RTE_ETHER_TYPE_IPV4 && !ipv4_configured_front) || (packet->flow.proto == RTE_ETHER_TYPE_IPV6 && !ipv6_configured_front))) { drop_packet_front(pkt, instance); return; } flow_arr[*num_ip_flows] = &packet->flow; flow_hash_val_arr[*num_ip_flows] = likely(front->rss) ? pkt->hash.rss : rss_flow_hash(front, &packet->flow); (*num_ip_flows)++; } #define PREFETCH_OFFSET (4) /* Process the packets on the front interface. */ static void process_pkts_front(uint16_t port_front, uint16_t rx_queue_front, unsigned int lcore, uint16_t *tx_front_num_pkts, struct rte_mbuf **tx_front_pkts, uint16_t *tx_back_num_pkts, struct rte_mbuf **tx_back_pkts, struct gk_instance *instance, struct gk_config *gk_conf) { int i; int ret; uint16_t num_rx; uint16_t num_arp = 0; uint16_t num_reqs = 0; uint16_t front_max_pkt_burst = gk_conf->front_max_pkt_burst; struct rte_mbuf *rx_bufs[front_max_pkt_burst]; struct rte_mbuf *arp_bufs[front_max_pkt_burst]; struct rte_mbuf *req_bufs[front_max_pkt_burst]; DEFINE_ACL_SEARCH(acl4, front_max_pkt_burst); DEFINE_ACL_SEARCH(acl6, front_max_pkt_burst); struct gatekeeper_if *front = &gk_conf->net->front; struct gatekeeper_if *back = &gk_conf->net->back; struct gk_measurement_metrics *stats = &instance->traffic_stats; bool ipv4_configured_front = ipv4_if_configured(&gk_conf->net->front); bool ipv6_configured_front = ipv6_if_configured(&gk_conf->net->front); int num_ip_flows = 0; struct ipacket pkt_arr[front_max_pkt_burst]; struct ip_flow *flow_arr[front_max_pkt_burst]; uint32_t flow_hash_val_arr[front_max_pkt_burst]; int num_lpm_lookups = 0; int num_lpm6_lookups = 0; struct ip_flow *flows[front_max_pkt_burst]; struct ip_flow *flows6[front_max_pkt_burst]; int32_t lpm_lookup_pos[front_max_pkt_burst]; int32_t lpm6_lookup_pos[front_max_pkt_burst]; uint32_t pos_arr[front_max_pkt_burst]; struct gk_fib *fibs[front_max_pkt_burst]; struct gk_fib *fibs6[front_max_pkt_burst]; struct flow_entry *fe_arr[front_max_pkt_burst]; /* Load a set of packets from the front NIC. */ num_rx = rte_eth_rx_burst(port_front, rx_queue_front, rx_bufs, front_max_pkt_burst); if (unlikely(num_rx == 0)) return; stats->tot_pkts_num += num_rx; /* * This prefetch is enough to load Ethernet header (14 bytes), * optional Ethernet VLAN header (8 bytes), and either * an IPv4 header without options (20 bytes), or * an IPv6 header without options (40 bytes). * IPv4: 14 + 8 + 20 = 42 * IPv6: 14 + 8 + 40 = 62 */ for (i = 0; i < PREFETCH_OFFSET && i < num_rx; i++) rte_prefetch0(rte_pktmbuf_mtod_offset(rx_bufs[i], void *, 0)); /* Extract packet and flow information. */ for (i = 0; i < (num_rx - PREFETCH_OFFSET); i++) { rte_prefetch0(rte_pktmbuf_mtod_offset( rx_bufs[i + PREFETCH_OFFSET], void *, 0)); parse_packet(&pkt_arr[num_ip_flows], rx_bufs[i], arp_bufs, &num_arp, &acl6, ipv4_configured_front, ipv6_configured_front, flow_arr, flow_hash_val_arr, &num_ip_flows, front, instance); } /* Extract the rest packet and flow information. */ for (; i < num_rx; i++) { parse_packet(&pkt_arr[num_ip_flows], rx_bufs[i], arp_bufs, &num_arp, &acl6, ipv4_configured_front, ipv6_configured_front, flow_arr, flow_hash_val_arr, &num_ip_flows, front, instance); } ret = hs_hash_lookup_with_hash_bulk( &instance->ip_flow_hash_table, (const void **)&flow_arr, flow_hash_val_arr, num_ip_flows, pos_arr); if (unlikely(ret < 0)) { G_LOG(NOTICE, "%s(): hs_hash_lookup_with_hash_bulk() failed (errno=%i): %s\n", __func__, -ret, strerror(-ret)); } for (i = 0; i < num_ip_flows; i++) { if (pos_arr[i] != HS_HASH_MISS) { fe_arr[i] = &instance->ip_flow_entry_table[pos_arr[i]]; prefetch_flow_entry(fe_arr[i]); } else { fe_arr[i] = NULL; if (flow_arr[i]->proto == RTE_ETHER_TYPE_IPV4) { lpm_lookup_pos[num_lpm_lookups] = i; flows[num_lpm_lookups] = flow_arr[i]; num_lpm_lookups++; } else { lpm6_lookup_pos[num_lpm6_lookups] = i; flows6[num_lpm6_lookups] = flow_arr[i]; num_lpm6_lookups++; } } } /* The remaining flows need LPM lookups. */ lookup_fib_bulk(&gk_conf->lpm_tbl, flows, num_lpm_lookups, fibs); lookup_fib6_bulk(&gk_conf->lpm_tbl, flows6, num_lpm6_lookups, fibs6); for (i = 0; i < num_lpm_lookups; i++) { int fidx = lpm_lookup_pos[i]; fe_arr[fidx] = lookup_fe_from_lpm(&pkt_arr[fidx], flow_hash_val_arr[fidx], fibs[i], &pos_arr[fidx], tx_back_num_pkts, tx_back_pkts, &acl4, &acl6, tx_front_num_pkts, tx_front_pkts, front, back, instance, gk_conf); } for (i = 0; i < num_lpm6_lookups; i++) { int fidx = lpm6_lookup_pos[i]; fe_arr[fidx] = lookup_fe_from_lpm(&pkt_arr[fidx], flow_hash_val_arr[fidx], fibs6[i], &pos_arr[fidx], tx_back_num_pkts, tx_back_pkts, &acl4, &acl6, tx_front_num_pkts, tx_front_pkts, front, back, instance, gk_conf); } for (i = 0; i < num_ip_flows; i++) { if (fe_arr[i] == NULL) continue; ret = process_flow_entry(fe_arr[i], pos_arr[i], &pkt_arr[i], req_bufs, &num_reqs, gk_conf, instance); if (ret < 0) drop_packet_front(pkt_arr[i].pkt, instance); else if (ret == EINPROGRESS) { /* Request will be serviced by another lcore. */ continue; } else if (likely(ret == 0)) tx_back_pkts[(*tx_back_num_pkts)++] = pkt_arr[i].pkt; else rte_panic("Invalid return value (%d) from processing a packet in a flow with state %d", ret, fe_arr[i]->state); } if (num_reqs > 0) { uint64_t acc_size_request[num_reqs + 1]; acc_size_request[0] = 0; for (i = 1; i <= num_reqs; i++) { acc_size_request[i] = acc_size_request[i - 1] + rte_pktmbuf_pkt_len(req_bufs[i - 1]); } ret = RTE_MAX(gk_solicitor_enqueue_bulk(instance->sol_inst, req_bufs, num_reqs), 0); if (ret < num_reqs) { for (i = ret; i < num_reqs; i++) drop_packet_front(req_bufs[i], instance); } stats->pkts_num_request += ret; stats->pkts_size_request += acc_size_request[ret]; } if (num_arp > 0) submit_arp(arp_bufs, num_arp, &gk_conf->net->front); process_pkts_acl(&gk_conf->net->front, lcore, &acl4, RTE_ETHER_TYPE_IPV4); process_pkts_acl(&gk_conf->net->front, lcore, &acl6, RTE_ETHER_TYPE_IPV6); } static void process_fib_back(struct ipacket *packet, struct gk_fib *fib, uint16_t *num_tx, struct rte_mbuf **tx_bufs, struct acl_search *acl4, struct acl_search *acl6, uint16_t *num_pkts, struct rte_mbuf **icmp_bufs, struct gatekeeper_if *front, struct gatekeeper_if *back, struct gk_instance *instance) { struct rte_mbuf *pkt = packet->pkt; struct ether_cache *eth_cache; char err_msg[128]; int ret; if (fib == NULL || fib->action == GK_FWD_NEIGHBOR_BACK_NET) { if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) add_pkt_acl(acl4, pkt); else if (likely(packet->flow.proto == RTE_ETHER_TYPE_IPV6)) add_pkt_acl(acl6, pkt); else { ret = snprintf(err_msg, sizeof(err_msg), "%s(): failed to find the fib entry or it is not an IP packet; dropping packet...", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&packet->flow, -ENOENT, err_msg); drop_packet(pkt); } return; } switch (fib->action) { case GK_FWD_GATEWAY_FRONT_NET: /* * The entry instructs to forward * its packets to the gateway in * the front network, forward accordingly. * * BP bypasses from the back to the front interface * are expected to bypass the outgoing traffic * from the AS to its peers. * * Notice that one needs to update * the Ethernet header. */ eth_cache = fib->u.gateway.eth_cache; RTE_VERIFY(eth_cache != NULL); if (adjust_pkt_len(pkt, front, 0) == NULL || pkt_copy_cached_eth_header(pkt, eth_cache, front->l2_len_out)) { drop_packet(pkt); return; } if (update_ip_hop_count(back, packet, num_pkts, icmp_bufs, &instance->back_icmp_rs, instance, drop_packet_back) < 0) return; tx_bufs[(*num_tx)++] = pkt; return; case GK_FWD_GATEWAY_BACK_NET: /* Gatekeeper does not intermediate neighbors. */ /* print_flow_err_msg() uses G_LOG, so test log level. */ if (unlikely(G_LOG_CHECK(DEBUG))) { ret = snprintf(err_msg, sizeof(err_msg), "%s(): packet arrived at the back interface and is destined to a back gateway; dropping packet...", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&packet->flow, -ENOENT, err_msg); } drop_packet(pkt); return; case GK_FWD_NEIGHBOR_FRONT_NET: /* * The entry instructs to forward its packets to * the neighbor in the front network, forward accordingly. */ if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) { eth_cache = lookup_ether_cache(&fib->u.neigh, &packet->flow.f.v4.dst); } else { eth_cache = lookup_ether_cache(&fib->u.neigh, &packet->flow.f.v6.dst); } if (eth_cache == NULL) { /* * print_flow_err_msg() uses G_LOG, so test log level. * * NOTICE that the unknown front neighbor that the log * entry below refers to could be the address of * our front interface as well. We cannot just send * the packet to the filter of the front interface * because the target filter may be implemented in * the hardware of the front interface. */ if (unlikely(G_LOG_CHECK(DEBUG))) { ret = snprintf(err_msg, sizeof(err_msg), "%s(): packet arrived at the back interface and is destined to unknown front neighbor; dropping packet...", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&packet->flow, -ENOENT, err_msg); } drop_packet(pkt); return; } if (adjust_pkt_len(pkt, front, 0) == NULL || pkt_copy_cached_eth_header(pkt, eth_cache, front->l2_len_out)) { drop_packet(pkt); return; } if (update_ip_hop_count(back, packet, num_pkts, icmp_bufs, &instance->back_icmp_rs, instance, drop_packet_back) < 0) return; tx_bufs[(*num_tx)++] = pkt; return; case GK_FWD_NEIGHBOR_BACK_NET: ret = snprintf(err_msg, sizeof(err_msg), "%s(): bug: GK_FWD_NEIGHBOR_BACK_NET should have been already handled; dropping packet...", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); /* * XXX The log priority should be CRIT, but * print_flow_err_msg() does not have a priority parameter. */ print_flow_err_msg(&packet->flow, -ENOENT, err_msg); drop_packet(pkt); return; case GK_DROP: drop_packet(pkt); return; default: /* All other actions should log an error. */ ret = snprintf(err_msg, sizeof(err_msg), "%s(): a FIB entry has the unknown action %u; dropping packet...", __func__, fib->action); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&packet->flow, -ENOENT, err_msg); drop_packet(pkt); return; } } /* Process the packets on the back interface. */ static void process_pkts_back(uint16_t port_back, uint16_t rx_queue_back, unsigned int lcore, uint16_t *tx_front_num_pkts, struct rte_mbuf **tx_front_pkts, uint16_t *tx_back_num_pkts, struct rte_mbuf **tx_back_pkts, struct gk_instance *instance, struct gk_config *gk_conf) { int i; int ret; uint16_t num_rx; uint16_t num_arp = 0; uint16_t back_max_pkt_burst = gk_conf->back_max_pkt_burst; struct rte_mbuf *rx_bufs[back_max_pkt_burst]; struct rte_mbuf *arp_bufs[back_max_pkt_burst]; DEFINE_ACL_SEARCH(acl4, back_max_pkt_burst); DEFINE_ACL_SEARCH(acl6, back_max_pkt_burst); struct gatekeeper_if *front = &gk_conf->net->front; struct gatekeeper_if *back = &gk_conf->net->back; bool ipv4_configured_back = ipv4_if_configured(&gk_conf->net->back); bool ipv6_configured_back = ipv6_if_configured(&gk_conf->net->back); int num_ip_flows = 0; struct ipacket pkt_arr[back_max_pkt_burst]; int num_lpm_lookups = 0; int num_lpm6_lookups = 0; int lpm_lookup_pos[back_max_pkt_burst]; int lpm6_lookup_pos[back_max_pkt_burst]; struct ip_flow *flows[back_max_pkt_burst]; struct ip_flow *flows6[back_max_pkt_burst]; struct gk_fib *fibs[back_max_pkt_burst]; struct gk_fib *fibs6[back_max_pkt_burst]; /* Load a set of packets from the back NIC. */ num_rx = rte_eth_rx_burst(port_back, rx_queue_back, rx_bufs, back_max_pkt_burst); if (unlikely(num_rx == 0)) return; /* * This prefetch is enough to load Ethernet header (14 bytes), * optional Ethernet VLAN header (8 bytes), and either * an IPv4 header without options (20 bytes), or * an IPv6 header without options (40 bytes). * IPv4: 14 + 8 + 20 = 42 * IPv6: 14 + 8 + 40 = 62 */ for (i = 0; i < num_rx; i++) rte_prefetch0(rte_pktmbuf_mtod_offset(rx_bufs[i], void *, 0)); for (i = 0; i < num_rx; i++) { struct ipacket *packet = &pkt_arr[num_ip_flows]; struct rte_mbuf *pkt = rx_bufs[i]; ret = extract_packet_info(pkt, packet, arp_bufs, &num_arp, &acl6); if (unlikely(ret < 0)) { if (unlikely(ret == -EINVAL)) drop_packet(pkt); continue; } if (unlikely((packet->flow.proto == RTE_ETHER_TYPE_IPV4 && !ipv4_configured_back) || (packet->flow.proto == RTE_ETHER_TYPE_IPV6 && !ipv6_configured_back))) { drop_packet_back(pkt, instance); continue; } if (packet->flow.proto == RTE_ETHER_TYPE_IPV4) { lpm_lookup_pos[num_lpm_lookups] = num_ip_flows; flows[num_lpm_lookups] = &packet->flow; num_lpm_lookups++; } else { lpm6_lookup_pos[num_lpm6_lookups] = num_ip_flows; flows6[num_lpm6_lookups] = &packet->flow; num_lpm6_lookups++; } num_ip_flows++; } lookup_fib_bulk(&gk_conf->lpm_tbl, flows, num_lpm_lookups, fibs); lookup_fib6_bulk(&gk_conf->lpm_tbl, flows6, num_lpm6_lookups, fibs6); for (i = 0; i < num_lpm_lookups; i++) { int fidx = lpm_lookup_pos[i]; process_fib_back(&pkt_arr[fidx], fibs[i], tx_front_num_pkts, tx_front_pkts, &acl4, &acl6, tx_back_num_pkts, tx_back_pkts, front, back, instance); } for (i = 0; i < num_lpm6_lookups; i++) { int fidx = lpm6_lookup_pos[i]; process_fib_back(&pkt_arr[fidx], fibs6[i], tx_front_num_pkts, tx_front_pkts, &acl4, &acl6, tx_back_num_pkts, tx_back_pkts, front, back, instance); } if (num_arp > 0) submit_arp(arp_bufs, num_arp, &gk_conf->net->back); process_pkts_acl(&gk_conf->net->back, lcore, &acl4, RTE_ETHER_TYPE_IPV4); process_pkts_acl(&gk_conf->net->back, lcore, &acl6, RTE_ETHER_TYPE_IPV6); } static void update_flow_entry(struct flow_entry *fe, struct ggu_policy *policy) { uint64_t now = rte_rdtsc(); switch (policy->state) { case GK_GRANTED: fe->state = GK_GRANTED; fe->expire_at = now + policy->params.granted.cap_expire_sec * cycles_per_sec; fe->u.granted.tx_rate_kib_cycle = policy->params.granted.tx_rate_kib_sec; fe->u.granted.send_next_renewal_at = now + policy->params.granted.next_renewal_ms * cycles_per_ms; fe->u.granted.renewal_step_cycle = policy->params.granted.renewal_step_ms * cycles_per_ms; fe->u.granted.budget_renew_at = now + cycles_per_sec; fe->u.granted.budget_byte = (uint64_t)fe->u.granted.tx_rate_kib_cycle * 1024; break; case GK_DECLINED: fe->state = GK_DECLINED; fe->expire_at = now + policy->params.declined.expire_sec * cycles_per_sec; break; case GK_BPF: fe->state = GK_BPF; fe->expire_at = now + policy->params.bpf.expire_sec * cycles_per_sec; fe->program_index = policy->params.bpf.program_index; fe->u.bpf.cookie = policy->params.bpf.cookie; break; default: G_LOG(ERR, "%s(): unknown flow state %u\n", __func__, policy->state); break; } } static void update_flow_table(struct gk_fib *fib, struct ggu_policy *policy, struct gk_instance *instance, struct gk_config *gk_conf, uint32_t rss_hash_val) { int ret; struct flow_entry *fe; uint32_t fe_index; if (fib == NULL || fib->action != GK_FWD_GRANTOR) { /* Drop this solicitation to add a policy decision. */ char err_msg[128]; ret = snprintf(err_msg, sizeof(err_msg), "%s(): error while initializing flow entry", __func__); RTE_VERIFY(ret > 0 && ret < (int)sizeof(err_msg)); print_flow_err_msg(&policy->flow, -ENOENT, err_msg); return; } ret = gk_hash_add_flow_entry(instance, &policy->flow, rss_hash_val, gk_conf, &fe_index); if (unlikely(ret == -EEXIST)) { /* * This rare case happens when two or more policy updates of * a new flow are present in the same batch of policy updates * being processed. * * DO_NOTHING; */ } else if (unlikely(ret < 0)) return; fe = &instance->ip_flow_entry_table[fe_index]; initialize_flow_entry(fe, &policy->flow, rss_hash_val, fib); update_flow_entry(fe, policy); } static void add_ggu_policy_bulk(struct gk_add_policy **policies, int num_policies, struct gk_instance *instance, struct gk_config *gk_conf) { int i; int ret; struct ip_flow *flow_arr[num_policies]; hash_sig_t flow_hash_val_arr[num_policies]; uint32_t pos_arr[num_policies]; int num_lpm_lookups = 0; int num_lpm6_lookups = 0; int32_t lpm_lookup_pos[num_policies]; int32_t lpm6_lookup_pos[num_policies]; struct ip_flow *flows[num_policies]; struct ip_flow *flows6[num_policies]; struct gk_fib *fibs[num_policies]; struct gk_fib *fibs6[num_policies]; for (i = 0; i < num_policies; i++) { flow_arr[i] = &policies[i]->policy.flow; flow_hash_val_arr[i] = policies[i]->flow_hash_val; } ret = hs_hash_lookup_with_hash_bulk( &instance->ip_flow_hash_table, (const void **)&flow_arr, flow_hash_val_arr, num_policies, pos_arr); if (unlikely(ret < 0)) { G_LOG(NOTICE, "%s(): hs_hash_lookup_with_hash_bulk() failed (errno=%i): %s\n", __func__, -ret, strerror(-ret)); } for (i = 0; i < num_policies; i++) { if (likely(pos_arr[i] != HS_HASH_MISS)) { struct ggu_policy *policy = &policies[i]->policy; struct flow_entry *fe = &instance->ip_flow_entry_table[pos_arr[i]]; update_flow_entry(fe, policy); } else if (likely(flow_arr[i]->proto == RTE_ETHER_TYPE_IPV4)) { lpm_lookup_pos[num_lpm_lookups] = i; flows[num_lpm_lookups] = flow_arr[i]; num_lpm_lookups++; } else { lpm6_lookup_pos[num_lpm6_lookups] = i; flows6[num_lpm6_lookups] = flow_arr[i]; num_lpm6_lookups++; } } if (instance->num_scan_del > 0) return; /* The remaining flows need LPM lookups. */ lookup_fib_bulk(&gk_conf->lpm_tbl, flows, num_lpm_lookups, fibs); lookup_fib6_bulk(&gk_conf->lpm_tbl, flows6, num_lpm6_lookups, fibs6); for (i = 0; i < num_lpm_lookups; i++) { int fidx = lpm_lookup_pos[i]; update_flow_table(fibs[i], &policies[fidx]->policy, instance, gk_conf, policies[fidx]->flow_hash_val); } for (i = 0; i < num_lpm6_lookups; i++) { int fidx = lpm6_lookup_pos[i]; update_flow_table(fibs6[i], &policies[fidx]->policy, instance, gk_conf, policies[fidx]->flow_hash_val); } } static void process_cmds_from_mailbox( struct gk_instance *instance, struct gk_config *gk_conf) { int i; int num_cmd; int num_policies = 0; unsigned int mailbox_burst_size = gk_conf->mailbox_burst_size; struct gk_cmd_entry *gk_cmds[mailbox_burst_size]; struct gk_add_policy *policies[mailbox_burst_size]; /* Load a set of commands from its mailbox ring. */ num_cmd = mb_dequeue_burst(&instance->mb, (void **)gk_cmds, mailbox_burst_size); for (i = 0; i < num_cmd; i++) process_gk_cmd(gk_cmds[i], policies, &num_policies, instance); if (num_policies > 0) add_ggu_policy_bulk(policies, num_policies, instance, gk_conf); mb_free_entry_bulk(&instance->mb, (void * const *)gk_cmds, num_cmd); } static void log_stats(const struct gk_instance *instance, const struct gk_measurement_metrics *stats) { G_LOG(NOTICE, "Basic measurements [tot_pkts_num = %"PRIu64", tot_pkts_size = %"PRIu64", pkts_num_granted = %"PRIu64", pkts_size_granted = %"PRIu64", pkts_num_request = %"PRIu64", pkts_size_request = %"PRIu64", pkts_num_declined = %"PRIu64", pkts_size_declined = %"PRIu64", tot_pkts_num_dropped = %"PRIu64", tot_pkts_size_dropped = %"PRIu64", tot_pkts_num_distributed = %"PRIu64", tot_pkts_size_distributed = %"PRIu64", flow_table_occupancy = %"PRIu32"/%u=%.1f%%]\n", stats->tot_pkts_num, stats->tot_pkts_size, stats->pkts_num_granted, stats->pkts_size_granted, stats->pkts_num_request, stats->pkts_size_request, stats->pkts_num_declined, stats->pkts_size_declined, stats->tot_pkts_num_dropped, stats->tot_pkts_size_dropped, stats->tot_pkts_num_distributed, stats->tot_pkts_size_distributed, instance->ip_flow_ht_num_items, instance->ip_flow_entry_table_size, 100.0 * instance->ip_flow_ht_num_items / instance->ip_flow_entry_table_size); } static int gk_proc(void *arg) { unsigned int lcore = rte_lcore_id(); struct gk_config *gk_conf = (struct gk_config *)arg; unsigned int block_idx = get_block_idx(gk_conf, lcore); struct gk_instance *instance = &gk_conf->instances[block_idx]; uint16_t port_front = gk_conf->net->front.id; uint16_t port_back = gk_conf->net->back.id; uint16_t rx_queue_front = instance->rx_queue_front; uint16_t tx_queue_front = instance->tx_queue_front; uint16_t rx_queue_back = instance->rx_queue_back; uint16_t tx_queue_back = instance->tx_queue_back; uint16_t tx_front_num_pkts; uint16_t tx_back_num_pkts; uint16_t tx_max_num_pkts = gk_conf->front_max_pkt_burst + gk_conf->back_max_pkt_burst; struct rte_mbuf *tx_front_pkts[tx_max_num_pkts]; struct rte_mbuf *tx_back_pkts[tx_max_num_pkts]; uint32_t entry_idx = 0; uint64_t last_measure_tsc = rte_rdtsc(); uint64_t basic_measurement_logging_cycles = gk_conf->basic_measurement_logging_ms * rte_get_tsc_hz() / 1000; uint32_t scan_iter = gk_conf->flow_table_scan_iter; uint32_t iter_count = 0; G_LOG(NOTICE, "The GK block is running at tid = %u\n", gettid()); if (needed_caps(0, NULL) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } gk_conf_hold(gk_conf); while (likely(!exiting)) { struct flow_entry *fe = NULL; tx_front_num_pkts = 0; tx_back_num_pkts = 0; if (iter_count >= scan_iter) { entry_idx = (entry_idx + 1) % gk_conf->flow_ht_size; fe = &instance->ip_flow_entry_table[entry_idx]; /* * Only one prefetch is needed here because we only * need the beginning of a struct flow_entry to * check if it's expired. */ rte_prefetch_non_temporal(fe); iter_count = 0; } else iter_count++; process_pkts_front(port_front, rx_queue_front, lcore, &tx_front_num_pkts, tx_front_pkts, &tx_back_num_pkts, tx_back_pkts, instance, gk_conf); process_pkts_back(port_back, rx_queue_back, lcore, &tx_front_num_pkts, tx_front_pkts, &tx_back_num_pkts, tx_back_pkts, instance, gk_conf); if (fe != NULL && fe->in_use && rte_rdtsc() >= fe->expire_at) { hs_hash_prefetch_bucket_non_temporal( &instance->ip_flow_hash_table, fe->flow_hash_val); } else fe = NULL; send_pkts(port_front, tx_queue_front, tx_front_num_pkts, tx_front_pkts); send_pkts(port_back, tx_queue_back, tx_back_num_pkts, tx_back_pkts); process_cmds_from_mailbox(instance, gk_conf); if (fe != NULL && fe->in_use && rte_rdtsc() >= fe->expire_at) { /* Ignore return value; nothing further to do. */ gk_del_flow_entry_at_pos(instance, entry_idx); } if (rte_rdtsc() - last_measure_tsc >= basic_measurement_logging_cycles) { struct gk_measurement_metrics *stats = &instance->traffic_stats; log_stats(instance, stats); memset(stats, 0, sizeof(*stats)); last_measure_tsc = rte_rdtsc(); } } G_LOG(NOTICE, "The GK block is exiting\n"); return gk_conf_put(gk_conf); } struct gk_config * alloc_gk_conf(void) { return rte_calloc("gk_config", 1, sizeof(struct gk_config), 0); } static void destroy_gk_lpm(struct gk_lpm *ltbl) { fib_free(&ltbl->fib); rte_free(ltbl->fib_tbl); ltbl->fib_tbl = NULL; fib_free(&ltbl->fib6); rte_free(ltbl->fib_tbl6); ltbl->fib_tbl6 = NULL; } static int cleanup_gk(struct gk_config *gk_conf) { int i; unsigned int ui; for (i = 0; i < gk_conf->num_lcores; i++) { destroy_mempool(gk_conf->instances[i].mp); hs_hash_free(&gk_conf->instances[i].ip_flow_hash_table); if (gk_conf->instances[i].ip_flow_entry_table != NULL) { rte_free(gk_conf->instances[i]. ip_flow_entry_table); } destroy_mailbox(&gk_conf->instances[i].mb); } if (gk_conf->lpm_tbl.fib_tbl != NULL) { for (ui = 0; ui < gk_conf->max_num_ipv4_rules; ui++) { struct gk_fib *fib = &gk_conf->lpm_tbl.fib_tbl[ui]; if (fib->action == GK_FWD_NEIGHBOR_FRONT_NET || fib->action == GK_FWD_NEIGHBOR_BACK_NET) { destroy_neigh_hash_table(&fib->u.neigh); } } } if (gk_conf->lpm_tbl.fib_tbl6 != NULL) { for (ui = 0; ui < gk_conf->max_num_ipv6_rules; ui++) { struct gk_fib *fib = &gk_conf->lpm_tbl.fib_tbl6[ui]; if (fib->action == GK_FWD_NEIGHBOR_FRONT_NET || fib->action == GK_FWD_NEIGHBOR_BACK_NET) { destroy_neigh_hash_table(&fib->u.neigh); } } } destroy_gk_lpm(&gk_conf->lpm_tbl); rte_free(gk_conf->queue_id_to_instance); rte_free(gk_conf->instances); rte_free(gk_conf->lcores); sol_conf_put(gk_conf->sol_conf); gk_conf->sol_conf = NULL; rte_free(gk_conf); return 0; } int gk_conf_put(struct gk_config *gk_conf) { /* * Atomically decrements the atomic counter by one and returns true * if the result is 0, or false in all other cases. */ if (rte_atomic32_dec_and_test(&gk_conf->ref_cnt)) return cleanup_gk(gk_conf); return 0; } static int gk_stage1(void *arg) { struct gk_config *gk_conf = arg; int num_rx_queues = gk_conf->net->front.num_rx_queues; int ret, i; unsigned int num_mbuf; unsigned int socket_id = rte_lcore_to_socket_id(gk_conf->lcores[0]); struct sol_config *sol_conf; gk_conf->instances = rte_calloc_socket(__func__, gk_conf->num_lcores, sizeof(struct gk_instance), 0, socket_id); if (gk_conf->instances == NULL) goto cleanup; gk_conf->queue_id_to_instance = rte_malloc_socket(__func__, num_rx_queues * sizeof(*gk_conf->queue_id_to_instance), 0, socket_id); if (gk_conf->queue_id_to_instance == NULL) goto cleanup; for(i = 0; i < num_rx_queues; i++) gk_conf->queue_id_to_instance[i] = -1; /* * Set up the GK LPM table. We assume that * all the GK instances are running on the same socket. */ ret = setup_gk_lpm(gk_conf, socket_id); if (ret < 0) goto cleanup; num_mbuf = calculate_mempool_config_para("gk", gk_conf->net, gk_conf->front_max_pkt_burst + gk_conf->back_max_pkt_burst + /* * One cannot divide the sum below per gk_conf->num_lcores * because, though unlikely, it might happen that * all packets go to a single instance. */ (gk_conf->net->front.total_pkt_burst + gk_conf->net->back.total_pkt_burst)); sol_conf = gk_conf->sol_conf; for (i = 0; i < gk_conf->num_lcores; i++) { unsigned int lcore = gk_conf->lcores[i]; struct gk_instance *inst_ptr = &gk_conf->instances[i]; inst_ptr->mp = create_pktmbuf_pool("gk", lcore, num_mbuf); if (inst_ptr->mp == NULL) goto cleanup; /* Set up queue identifiers for RSS. */ ret = get_queue_id(&gk_conf->net->front, QUEUE_TYPE_RX, lcore, inst_ptr->mp); if (ret < 0) { G_LOG(ERR, "Cannot assign an RX queue for the front interface for lcore %u\n", lcore); goto cleanup; } inst_ptr->rx_queue_front = ret; gk_conf->queue_id_to_instance[ret] = i; ret = get_queue_id(&gk_conf->net->front, QUEUE_TYPE_TX, lcore, NULL); if (ret < 0) { G_LOG(ERR, "Cannot assign a TX queue for the front interface for lcore %u\n", lcore); goto cleanup; } inst_ptr->tx_queue_front = ret; ret = get_queue_id(&gk_conf->net->back, QUEUE_TYPE_RX, lcore, inst_ptr->mp); if (ret < 0) { G_LOG(ERR, "Cannot assign an RX queue for the back interface for lcore %u\n", lcore); goto cleanup; } inst_ptr->rx_queue_back = ret; ret = get_queue_id(&gk_conf->net->back, QUEUE_TYPE_TX, lcore, NULL); if (ret < 0) { G_LOG(ERR, "Cannot assign a TX queue for the back interface for lcore %u\n", lcore); goto cleanup; } inst_ptr->tx_queue_back = ret; if (gk_conf->gk_sol_map[i] >= (unsigned int)sol_conf->num_lcores) { G_LOG(ERR, "Invalid index (%u) of sol_conf->instances[] for lcore %u\n", gk_conf->gk_sol_map[i], lcore); goto cleanup; } inst_ptr->sol_inst = &sol_conf->instances[gk_conf->gk_sol_map[i]]; /* Setup the GK instance at @lcore. */ ret = setup_gk_instance(lcore, gk_conf); if (ret < 0) { G_LOG(ERR, "Failed to setup gk instances for GK block at lcore %u\n", lcore); goto cleanup; } } return 0; cleanup: cleanup_gk(gk_conf); return -1; } static int gk_stage2(void *arg) { struct gk_config *gk_conf = arg; int ret = gk_setup_rss(gk_conf); if (ret < 0) goto cleanup; return 0; cleanup: cleanup_gk(gk_conf); return ret; } int run_gk(struct net_config *net_conf, struct gk_config *gk_conf, struct sol_config *sol_conf) { int ret, i; if (net_conf == NULL || gk_conf == NULL || sol_conf == NULL) { ret = -1; goto out; } for (i = 0; i < gk_conf->num_lcores; i++) { log_ratelimit_state_init(gk_conf->lcores[i], gk_conf->log_ratelimit_interval_ms, gk_conf->log_ratelimit_burst, gk_conf->log_level, "GK"); } if (!net_conf->back_iface_enabled) { G_LOG(ERR, "Back interface is required\n"); ret = -1; goto out; } if (!(gk_conf->front_max_pkt_burst > 0 && gk_conf->back_max_pkt_burst > 0)) { ret = -1; goto out; } if (gk_conf->gk_sol_map == NULL) { G_LOG(ERR, "GK-to-SOL mapping is required for initialization\n"); ret = -1; goto out; } gk_conf->net = net_conf; sol_conf_hold(sol_conf); gk_conf->sol_conf = sol_conf; if (gk_conf->num_lcores <= 0) goto success; ret = net_launch_at_stage1( net_conf, gk_conf->num_lcores, gk_conf->num_lcores, gk_conf->num_lcores, gk_conf->num_lcores, gk_stage1, gk_conf); if (ret < 0) goto put_sol; ret = launch_at_stage2(gk_stage2, gk_conf); if (ret < 0) goto stage1; for (i = 0; i < gk_conf->num_lcores; i++) { unsigned int lcore = gk_conf->lcores[i]; ret = launch_at_stage3("gk", gk_proc, gk_conf, lcore); if (ret < 0) { pop_n_at_stage3(i); goto stage2; } } goto success; stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); put_sol: gk_conf->sol_conf = NULL; sol_conf_put(sol_conf); out: return ret; success: rte_atomic32_init(&gk_conf->ref_cnt); return 0; } struct mailbox * get_responsible_gk_mailbox(uint32_t flow_hash_val, const struct gk_config *gk_conf) { /* * Calculate the RSS hash value for the * pair <Src, Dst> in the decision. */ uint32_t rss_hash_val; uint32_t idx; uint32_t shift; uint16_t queue_id; int block_idx; if (unlikely(!gk_conf->net->front.rss)) { block_idx = 0; goto done; } RTE_VERIFY(gk_conf->rss_conf_front.reta_size > 0); rss_hash_val = flow_hash_val % gk_conf->rss_conf_front.reta_size; /* * Identify which GK block is responsible for the * pair <Src, Dst> in the decision. */ idx = rss_hash_val / RTE_ETH_RETA_GROUP_SIZE; shift = rss_hash_val % RTE_ETH_RETA_GROUP_SIZE; queue_id = gk_conf->rss_conf_front.reta_conf[idx].reta[shift]; block_idx = gk_conf->queue_id_to_instance[queue_id]; if (block_idx == -1) G_LOG(ERR, "Wrong RSS configuration for GK blocks\n"); done: return &gk_conf->instances[block_idx].mb; } int gk_flush_flow_table(const char *src_prefix, const char *dst_prefix, struct gk_config *gk_conf) { int i; uint16_t proto = 0; struct gk_flush_request flush; if (src_prefix == NULL && dst_prefix == NULL) { G_LOG(ERR, "Failed to flush flow table: both source and destination prefixes are NULL\n"); return -1; } memset(&flush, 0, sizeof(flush)); /* * Field .str is only meant to help logging and debugging, * but we cannot pass src_prefix or dst_prefix along * because they go away soon after this function returns. */ flush.src.str = __func__; flush.dst.str = __func__; if (src_prefix != NULL) { flush.src.len = parse_ip_prefix(src_prefix, &flush.src.addr); if (flush.src.len < 0) return -1; proto = flush.src.addr.proto; } if (dst_prefix != NULL) { flush.dst.len = parse_ip_prefix(dst_prefix, &flush.dst.addr); if (flush.dst.len < 0 || (src_prefix != NULL && flush.dst.addr.proto != proto)) return -1; proto = flush.dst.addr.proto; } if (src_prefix == NULL) flush.src.addr.proto = proto; if (dst_prefix == NULL) flush.dst.addr.proto = proto; for (i = 0; i < gk_conf->num_lcores; i++) { struct gk_cmd_entry *entry = mb_alloc_entry(&gk_conf->instances[i].mb); if (entry == NULL) { G_LOG(WARNING, "Cannot allocate an entry for the mailbox of the GK block at lcore %u to flush flows that match src_prefix=%s and dst_prefix=%s\n", gk_conf->lcores[i], src_prefix, dst_prefix); continue; } entry->op = GK_FLUSH_FLOW_TABLE; entry->u.flush = flush; mb_send_entry(&gk_conf->instances[i].mb, entry); } return 0; } int gk_log_flow_state(const char *src_addr, const char *dst_addr, struct gk_config *gk_conf) { int ret; uint32_t flow_hash_val; struct ipaddr src; struct ipaddr dst; struct ip_flow flow; struct mailbox *mb; struct gk_cmd_entry *entry; if (src_addr == NULL) { G_LOG(ERR, "gk: failed to log flow state - source address is NULL\n"); return -1; } if (dst_addr == NULL) { G_LOG(ERR, "gk: failed to log flow state - destination address is NULL\n"); return -1; } if (gk_conf == NULL) { G_LOG(ERR, "gk: failed to log flow state - gk_conf is NULL\n"); return -1; } ret = convert_str_to_ip(src_addr, &src); if (ret < 0) { G_LOG(ERR, "gk: failed to log flow state - source address (%s) is invalid\n", src_addr); return -1; } ret = convert_str_to_ip(dst_addr, &dst); if (ret < 0) { G_LOG(ERR, "gk: failed to log flow state - destination address (%s) is invalid\n", dst_addr); return -1; } if (unlikely(src.proto != dst.proto)) { G_LOG(ERR, "gk: failed to log flow state - source (%s) and destination (%s) addresses don't have the same IP type\n", src_addr, dst_addr); return -1; } if (unlikely(src.proto != RTE_ETHER_TYPE_IPV4 && src.proto != RTE_ETHER_TYPE_IPV6)) { G_LOG(ERR, "gk: failed to log flow state - source (%s) and destination (%s) addresses don't have valid IP type %hu\n", src_addr, dst_addr, src.proto); return -1; } memset(&flow, 0, sizeof(flow)); flow.proto = src.proto; if (flow.proto == RTE_ETHER_TYPE_IPV4) { flow.f.v4.src = src.ip.v4; flow.f.v4.dst = dst.ip.v4; } else { flow.f.v6.src = src.ip.v6; flow.f.v6.dst = dst.ip.v6; } flow_hash_val = rss_flow_hash(&gk_conf->net->front, &flow); mb = get_responsible_gk_mailbox(flow_hash_val, gk_conf); if (mb == NULL) { G_LOG(ERR, "gk: failed to get responsible GK mailbox to log flow state that matches src_addr=%s and dst_addr=%s\n", src_addr, dst_addr); return -1; } entry = mb_alloc_entry(mb); if (entry == NULL) { G_LOG(WARNING, "gk: failed to allocate an entry for the mailbox of the GK block to log flow state that matches src_addr=%s and dst_addr=%s\n", src_addr, dst_addr); return -1; } entry->op = GK_LOG_FLOW_STATE; entry->u.log.flow = flow; entry->u.log.flow_hash_val = flow_hash_val; mb_send_entry(mb, entry); return 0; } static int notify_gk_instance(struct gk_instance *instance, rte_atomic32_t *done_counter, fill_in_gk_cmd_entry_t fill_f, void *arg) { int ret; struct mailbox *mb = &instance->mb; struct gk_cmd_entry *entry = mb_alloc_entry(mb); if (entry == NULL) { G_LOG(ERR, "Failed to allocate a `struct gk_cmd_entry` entry at %s()\n", __func__); return -1; } fill_f(entry, done_counter, arg); ret = mb_send_entry(mb, entry); if (ret < 0) { G_LOG(ERR, "Failed to send a `struct gk_cmd_entry` entry at %s()\n", __func__); return -1; } return 0; } /* * XXX #70 What we are doing here is analogous to RCU's synchronize_rcu(), * what suggests that we may be able to profit from RCU. But we are going * to postpone that until we have a better case to bring RCU to Gatekeeper. */ void synchronize_gk_instances(struct gk_config *gk_conf, fill_in_gk_cmd_entry_t fill_f, void *arg) { int loop, num_succ_notified_inst = 0; bool is_succ_notified[gk_conf->num_lcores]; rte_atomic32_t done_counter = RTE_ATOMIC32_INIT(0); /* The maximum number of times to try to notify the GK instances. */ const int MAX_NUM_NOTIFY_TRY = 3; memset(is_succ_notified, false, sizeof(is_succ_notified)); for (loop = 0; loop < MAX_NUM_NOTIFY_TRY; loop++) { int i; /* Notify all GK instances. */ for (i = 0; i < gk_conf->num_lcores; i++) { int ret; if (is_succ_notified[i]) continue; ret = notify_gk_instance(&gk_conf->instances[i], &done_counter, fill_f, arg); if (unlikely(ret < 0)) continue; is_succ_notified[i] = true; num_succ_notified_inst++; if (num_succ_notified_inst >= gk_conf->num_lcores) goto finish_notify; } } finish_notify: if (num_succ_notified_inst != gk_conf->num_lcores) { G_LOG(WARNING, "%s() successfully notified only GK %d/%d instances\n", __func__, num_succ_notified_inst, gk_conf->num_lcores); } /* Wait for all GK instances to synchronize. */ while (rte_atomic32_read(&done_counter) < num_succ_notified_inst) rte_pause(); } ```
/content/code_sandbox/gk/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
25,744
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <net/if.h> #include <unistd.h> #include "gatekeeper_cps.h" #include "gatekeeper_l2.h" #include "gatekeeper_launch.h" #include "gatekeeper_lls.h" #include "gatekeeper_log_ratelimit.h" #include "gatekeeper_varip.h" #include "kni.h" #include "rd.h" static struct cps_config cps_conf; struct cps_config * get_cps_conf(void) { return &cps_conf; } static int cleanup_cps(void) { /* * From cps_stage2() */ /* * rd_event_sock_close() can be called even when the netlink * socket is not open. */ rd_event_sock_close(&cps_conf); kni_free(&cps_conf.back_kni); kni_free(&cps_conf.front_kni); /* * From cps_stage1() -> assign_cps_queue_ids() */ destroy_mempool(cps_conf.mp); /* * From run_cps() */ if (cps_conf.gt != NULL) gt_conf_put(cps_conf.gt); cps_conf.gt = NULL; if (cps_conf.gk != NULL) gk_conf_put(cps_conf.gk); cps_conf.gk = NULL; rte_timer_stop(&cps_conf.scan_timer); rd_free_coro(&cps_conf); destroy_mailbox(&cps_conf.mailbox); rte_mempool_free(cps_conf.nd_mp); cps_conf.nd_mp = NULL; rte_mempool_free(cps_conf.arp_mp); cps_conf.arp_mp = NULL; return 0; } /* * Responding to ARP and ND packets from the KNI. If responding to * an ARP/ND packet fails, we remove the request from the linked list * anyway, forcing the KNI to issue another resolution request. */ static void send_arp_reply_kni(struct cps_config *cps_conf, struct cps_arp_req *arp) { struct gatekeeper_if *iface = arp->iface; struct rte_mbuf *created_pkt; struct rte_ether_hdr *eth_hdr; struct rte_arp_hdr *arp_hdr; size_t pkt_size; struct cps_kni *kni; int ret; created_pkt = rte_pktmbuf_alloc(cps_conf->mp); if (unlikely(created_pkt == NULL)) { G_LOG(ERR, "%s(%s): could not allocate an ARP reply\n", __func__, iface->name); return; } pkt_size = sizeof(struct rte_ether_hdr) + sizeof(struct rte_arp_hdr); created_pkt->data_len = pkt_size; created_pkt->pkt_len = pkt_size; /* * Set-up Ethernet header. The Ethernet address of the KNI is the * same as that of the Gatekeeper interface, so we use that in * the Ethernet and ARP headers. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *); rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->dst_addr); rte_ether_addr_copy(&arp->ha, &eth_hdr->src_addr); eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP); /* Set-up ARP header. */ arp_hdr = (struct rte_arp_hdr *)&eth_hdr[1]; arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER); arp_hdr->arp_protocol = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); arp_hdr->arp_hlen = RTE_ETHER_ADDR_LEN; arp_hdr->arp_plen = sizeof(struct in_addr); arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); rte_ether_addr_copy(&arp->ha, &arp_hdr->arp_data.arp_sha); rte_memcpy(&arp_hdr->arp_data.arp_sip, &arp->ip, sizeof(arp_hdr->arp_data.arp_sip)); rte_ether_addr_copy(&iface->eth_addr, &arp_hdr->arp_data.arp_tha); arp_hdr->arp_data.arp_tip = iface->ip4_addr.s_addr; kni = iface == &cps_conf->net->front ? &cps_conf->front_kni : &cps_conf->back_kni; ret = kni_tx_burst(kni, &created_pkt, 1); if (unlikely(ret != 1)) { rte_pktmbuf_free(created_pkt); G_LOG(ERR, "%s(%s): could not transmit an ARP reply (ret=%i)\n", __func__, iface->name, ret); } } static void send_nd_reply_kni(struct cps_config *cps_conf, struct cps_nd_req *nd) { struct gatekeeper_if *iface = nd->iface; struct rte_mbuf *created_pkt; struct rte_ether_hdr *eth_hdr; struct rte_ipv6_hdr *ipv6_hdr; struct icmpv6_hdr *icmpv6_hdr; struct nd_neigh_msg *nd_msg; struct nd_opt_lladdr *nd_opt; struct cps_kni *kni; int ret; created_pkt = rte_pktmbuf_alloc(cps_conf->mp); if (unlikely(created_pkt == NULL)) { G_LOG(ERR, "%s(%s): could not allocate an ND advertisement\n", __func__, iface->name); return; } /* Advertisement will include target link layer address. */ created_pkt->data_len = ND_NEIGH_PKT_LLADDR_MIN_LEN(sizeof(*eth_hdr)); created_pkt->pkt_len = created_pkt->data_len; /* * Set-up Ethernet header. The Ethernet address of the KNI is the * same as that of the Gatekeeper interface, so we use that in * the Ethernet header. */ eth_hdr = rte_pktmbuf_mtod(created_pkt, struct rte_ether_hdr *); rte_ether_addr_copy(&iface->eth_addr, &eth_hdr->dst_addr); rte_ether_addr_copy(&nd->ha, &eth_hdr->src_addr); eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); /* Set-up IPv6 header. */ ipv6_hdr = (struct rte_ipv6_hdr *)&eth_hdr[1]; ipv6_hdr->vtc_flow = rte_cpu_to_be_32(IPv6_DEFAULT_VTC_FLOW); ipv6_hdr->payload_len = rte_cpu_to_be_16(created_pkt->data_len - (sizeof(*eth_hdr) + sizeof(*ipv6_hdr))); ipv6_hdr->proto = IPPROTO_ICMPV6; /* * The IP Hop Limit field must be 255 as required by * RFC 4861, sections 7.1.1 and 7.1.2. */ ipv6_hdr->hop_limits = 255; rte_memcpy(ipv6_hdr->src_addr, nd->ip, sizeof(ipv6_hdr->dst_addr)); rte_memcpy(ipv6_hdr->dst_addr, iface->ll_ip6_addr.s6_addr, sizeof(ipv6_hdr->dst_addr)); /* Set-up ICMPv6 header. */ icmpv6_hdr = (struct icmpv6_hdr *)&ipv6_hdr[1]; icmpv6_hdr->type = ND_NEIGHBOR_ADVERTISEMENT_TYPE; icmpv6_hdr->code = ND_NEIGHBOR_ADVERTISEMENT_CODE; icmpv6_hdr->cksum = 0; /* Calculated below. */ /* Set up ND Advertisement header with target LL addr option. */ nd_msg = (struct nd_neigh_msg *)&icmpv6_hdr[1]; nd_msg->flags = rte_cpu_to_be_32(LLS_ND_NA_OVERRIDE|LLS_ND_NA_SOLICITED); rte_memcpy(nd_msg->target, nd->ip, sizeof(nd_msg->target)); nd_opt = (struct nd_opt_lladdr *)&nd_msg[1]; nd_opt->type = ND_OPT_TARGET_LL_ADDR; nd_opt->len = 1; rte_ether_addr_copy(&nd->ha, &nd_opt->ha); icmpv6_hdr->cksum = rte_ipv6_icmpv6_cksum(ipv6_hdr, icmpv6_hdr); kni = iface == &cps_conf->net->front ? &cps_conf->front_kni : &cps_conf->back_kni; ret = kni_tx_burst(kni, &created_pkt, 1); if (unlikely(ret != 1)) { rte_pktmbuf_free(created_pkt); G_LOG(ERR, "%s(%s): could not transmit an ND advertisement (ret=%i)\n", __func__, iface->name, ret); } } static void tx_to_kni(struct gatekeeper_if *iface, struct cps_kni *kni, struct rte_mbuf **pkts, const uint16_t num_pkts) { uint16_t num_kni; uint16_t num_tx; uint16_t i; if (unlikely(num_pkts == 0)) return; if (!iface->vlan_insert) { num_kni = num_pkts; goto kni_tx; } /* Remove VLAN headers before passing to the KNI. */ num_kni = 0; for (i = 0; i < num_pkts; i++) { struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkts[i], struct rte_ether_hdr *); struct rte_vlan_hdr *vlan_hdr; if (unlikely(eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))) { G_LOG(WARNING, "%s iface is configured for VLAN but received a non-VLAN packet\n", iface->name); goto to_kni; } /* Copy Ethernet header over VLAN header. */ vlan_hdr = (struct rte_vlan_hdr *)&eth_hdr[1]; eth_hdr->ether_type = vlan_hdr->eth_proto; memmove(RTE_PTR_ADD(eth_hdr, sizeof(struct rte_vlan_hdr)), eth_hdr, sizeof(*eth_hdr)); /* Remove the unneeded bytes from the front of the buffer. */ if (unlikely(rte_pktmbuf_adj(pkts[i], sizeof(struct rte_vlan_hdr)) == NULL)) { G_LOG(ERR, "Can't remove VLAN header\n"); rte_pktmbuf_free(pkts[i]); continue; } to_kni: pkts[num_kni++] = pkts[i]; } kni_tx: num_tx = kni_tx_burst(kni, pkts, num_kni); if (unlikely(num_tx < num_kni)) rte_pktmbuf_free_bulk(&pkts[num_tx], num_kni - num_tx); } struct arp_request { struct list_head list; uint32_t addr; int stale; }; struct nd_request { struct list_head list; uint8_t addr[16]; int stale; }; static void process_reqs(struct cps_config *cps_conf) { unsigned int mailbox_burst_size = cps_conf->mailbox_burst_size; struct cps_request *reqs[mailbox_burst_size]; unsigned int count = mb_dequeue_burst(&cps_conf->mailbox, (void **)reqs, mailbox_burst_size); unsigned int i; for (i = 0; i < count; i++) { switch (reqs[i]->ty) { case CPS_REQ_DIRECT: { struct cps_direct_req *direct = &reqs[i]->u.direct; struct cps_kni *kni = direct->iface == &cps_conf->net->front ? &cps_conf->front_kni : &cps_conf->back_kni; tx_to_kni(direct->iface, kni, direct->pkts, direct->num_pkts); break; } case CPS_REQ_ARP: { struct cps_arp_req *arp = &reqs[i]->u.arp; struct arp_request *entry, *next; send_arp_reply_kni(cps_conf, arp); list_for_each_entry_safe(entry, next, &cps_conf->arp_requests, list) { if (arp->ip == entry->addr) { list_del(&entry->list); rte_mempool_put(cps_conf->arp_mp, entry); break; } } break; } case CPS_REQ_ND: { struct cps_nd_req *nd = &reqs[i]->u.nd; struct nd_request *entry, *next; send_nd_reply_kni(cps_conf, nd); list_for_each_entry_safe(entry, next, &cps_conf->nd_requests, list) { if (ipv6_addrs_equal(nd->ip, entry->addr)) { list_del(&entry->list); rte_mempool_put(cps_conf->nd_mp, entry); break; } } break; } default: G_LOG(ERR, "Unrecognized request type (%d)\n", reqs[i]->ty); break; } mb_free_entry(&cps_conf->mailbox, reqs[i]); } } static void process_ingress(struct gatekeeper_if *iface, struct cps_kni *kni, uint16_t rx_queue, uint16_t cps_max_pkt_burst) { struct rte_mbuf *rx_bufs[cps_max_pkt_burst]; uint16_t num_rx = rte_eth_rx_burst(iface->id, rx_queue, rx_bufs, cps_max_pkt_burst); tx_to_kni(iface, kni, rx_bufs, num_rx); } static int cps_pkt_is_nd_neighbor(struct gatekeeper_if *iface, struct rte_ether_hdr *eth_hdr, uint16_t pkt_len) { struct rte_ipv6_hdr *ipv6_hdr; struct icmpv6_hdr *icmpv6_hdr; if (pkt_len < (sizeof(*eth_hdr) + sizeof(*ipv6_hdr) + sizeof(*icmpv6_hdr))) return false; ipv6_hdr = (struct rte_ipv6_hdr *)&eth_hdr[1]; if (ipv6_hdr->proto != IPPROTO_ICMPV6) return false; /* * Make sure this is an ND neighbor message and that it was * sent by us (our global address, link-local address, or * either of the solicited-node multicast addresses). */ icmpv6_hdr = (struct icmpv6_hdr *)&ipv6_hdr[1]; return pkt_is_nd_neighbor(icmpv6_hdr->type, icmpv6_hdr->code) && (ipv6_addrs_equal(ipv6_hdr->src_addr, iface->ll_ip6_addr.s6_addr) || ipv6_addrs_equal(ipv6_hdr->src_addr, iface->ip6_addr.s6_addr) || ipv6_addrs_equal(ipv6_hdr->src_addr, iface->ip6_mc_addr.s6_addr) || ipv6_addrs_equal(ipv6_hdr->src_addr, iface->ll_ip6_mc_addr.s6_addr)); } static void cps_arp_cb(const struct lls_map *map, void *arg, __attribute__((unused)) enum lls_reply_ty ty, int *pcall_again) { struct cps_config *cps_conf = get_cps_conf(); struct cps_request *req; int ret; if (pcall_again != NULL) *pcall_again = false; else { /* * Destination didn't reply, so this callback * is the result of a call to put_arp(). */ return; } RTE_VERIFY(!map->stale); /* * If this allocation or queueing of an entry fails, the * resolution request will time out after two iterations * of the timer and be removed in cps_scan() anyway. */ req = mb_alloc_entry(&cps_conf->mailbox); if (req == NULL) { G_LOG(ERR, "%s: allocation of mailbox message failed\n", __func__); return; } req->ty = CPS_REQ_ARP; req->u.arp.ip = map->addr.ip.v4.s_addr; rte_memcpy(&req->u.arp.ha, &map->ha, sizeof(req->u.arp.ha)); req->u.arp.iface = arg; ret = mb_send_entry(&cps_conf->mailbox, req); if (ret < 0) { G_LOG(ERR, "%s: failed to enqueue message to mailbox\n", __func__); return; } } static void process_arp(struct cps_config *cps_conf, struct gatekeeper_if *iface, struct rte_mbuf *buf, const struct rte_ether_hdr *eth_hdr) { int ret; struct rte_arp_hdr *arp_hdr; uint16_t pkt_len = rte_pktmbuf_data_len(buf); struct arp_request *arp_req = NULL; struct arp_request *entry; if (unlikely(!arp_enabled(cps_conf->lls))) { G_LOG(NOTICE, "KNI for %s iface received ARP packet, but the interface is not configured for ARP\n", iface->name); goto out; } if (unlikely(pkt_len < sizeof(*eth_hdr) + sizeof(*arp_hdr))) { G_LOG(ERR, "KNI received ARP packet of size %hu bytes, but it should be at least %zu bytes\n", pkt_len, sizeof(*eth_hdr) + sizeof(*arp_hdr)); goto out; } arp_hdr = rte_pktmbuf_mtod_offset(buf, struct rte_arp_hdr *, sizeof(*eth_hdr)); /* If it's a Gratuitous ARP or reply, then no action is needed. */ if (unlikely(rte_be_to_cpu_16(arp_hdr->arp_opcode) != RTE_ARP_OP_REQUEST || is_garp_pkt(arp_hdr))) goto out; list_for_each_entry(entry, &cps_conf->arp_requests, list) { /* There's already a resolution request for this address. */ if (arp_hdr->arp_data.arp_tip == entry->addr) goto out; } ret = rte_mempool_get(cps_conf->arp_mp, (void **)&arp_req); if (unlikely(ret < 0)) { G_LOG(ERR, "Failed to get a new entry from the ARP request mempool - %s\n", rte_strerror(-ret)); goto out; } arp_req->addr = arp_hdr->arp_data.arp_tip; arp_req->stale = false; list_add_tail(&arp_req->list, &cps_conf->arp_requests); hold_arp(cps_arp_cb, iface, (struct in_addr *)&arp_hdr->arp_data.arp_tip, cps_conf->lcore_id); out: rte_pktmbuf_free(buf); } static void cps_nd_cb(const struct lls_map *map, void *arg, __attribute__((unused)) enum lls_reply_ty ty, int *pcall_again) { struct cps_config *cps_conf = get_cps_conf(); struct cps_request *req; int ret; if (pcall_again != NULL) *pcall_again = false; else { /* * Destination didn't reply, so this callback * is the result of a call to put_nd(). */ return; } RTE_VERIFY(!map->stale); /* * If this allocation or queueing of an entry fails, the * resolution request will time out after two iterations * of the timer and be removed anyway. */ req = mb_alloc_entry(&cps_conf->mailbox); if (req == NULL) { G_LOG(ERR, "%s: allocation of mailbox message failed\n", __func__); return; } req->ty = CPS_REQ_ND; rte_memcpy(req->u.nd.ip, map->addr.ip.v6.s6_addr, sizeof(req->u.nd.ip)); rte_memcpy(&req->u.nd.ha, &map->ha, sizeof(req->u.nd.ha)); req->u.nd.iface = arg; ret = mb_send_entry(&cps_conf->mailbox, req); if (ret < 0) { G_LOG(ERR, "%s: failed to enqueue message to mailbox\n", __func__); return; } } static void process_nd(struct cps_config *cps_conf, struct gatekeeper_if *iface, struct rte_mbuf *buf, const struct rte_ether_hdr *eth_hdr, uint16_t pkt_len) { int ret; struct icmpv6_hdr *icmpv6_hdr; struct nd_neigh_msg *nd_msg; struct nd_request *nd_req = NULL; struct nd_request *entry; if (unlikely(!nd_enabled(cps_conf->lls))) { G_LOG(NOTICE, "KNI for %s iface received ND packet, but the interface is not configured for ND\n", iface->name); goto out; } if (pkt_len < ND_NEIGH_PKT_MIN_LEN(sizeof(*eth_hdr))) { G_LOG(NOTICE, "ND packet received is %"PRIx16" bytes but should be at least %lu bytes\n", pkt_len, ND_NEIGH_PKT_MIN_LEN(sizeof(*eth_hdr))); goto out; } icmpv6_hdr = rte_pktmbuf_mtod_offset(buf, struct icmpv6_hdr *, sizeof(*eth_hdr) + sizeof(struct rte_ipv6_hdr)); if (icmpv6_hdr->type == ND_NEIGHBOR_ADVERTISEMENT_TYPE && icmpv6_hdr->code == ND_NEIGHBOR_ADVERTISEMENT_CODE) { G_LOG(NOTICE, "ND Advertisement packet received from KNI attached to %s iface\n", iface->name); goto out; } nd_msg = (struct nd_neigh_msg *)&icmpv6_hdr[1]; list_for_each_entry(entry, &cps_conf->nd_requests, list) { /* There's already a resolution request for this address. */ if (ipv6_addrs_equal(nd_msg->target, entry->addr)) goto out; } ret = rte_mempool_get(cps_conf->nd_mp, (void **)&nd_req); if (unlikely(ret < 0)) { G_LOG(ERR, "Failed to get a new entry from the ND request mempool - %s\n", rte_strerror(-ret)); goto out; } rte_memcpy(nd_req->addr, nd_msg->target, sizeof(nd_req->addr)); nd_req->stale = false; list_add_tail(&nd_req->list, &cps_conf->nd_requests); hold_nd(cps_nd_cb, iface, (struct in6_addr *)nd_msg->target, cps_conf->lcore_id); out: rte_pktmbuf_free(buf); } static void process_egress(struct cps_config *cps_conf, struct gatekeeper_if *iface, struct cps_kni *kni, uint16_t tx_queue, uint16_t cps_max_pkt_burst) { struct rte_mbuf *bufs[cps_max_pkt_burst]; struct rte_mbuf *forward_bufs[cps_max_pkt_burst]; uint16_t num_rx = kni_rx_burst(kni, bufs, cps_max_pkt_burst); uint16_t num_forward = 0; unsigned int num_tx; unsigned int i; if (num_rx == 0) return; for (i = 0; i < num_rx; i++) { /* Packets sent by the KNI do not have VLAN headers. */ struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(bufs[i], struct rte_ether_hdr *); uint16_t ether_type = rte_be_to_cpu_16(eth_hdr->ether_type); switch (ether_type) { case RTE_ETHER_TYPE_ARP: /* Intercept ARP packet and handle it. */ process_arp(cps_conf, iface, bufs[i], eth_hdr); break; case RTE_ETHER_TYPE_IPV6: { uint16_t pkt_len = rte_pktmbuf_data_len(bufs[i]); if (cps_pkt_is_nd_neighbor(iface, eth_hdr, pkt_len)) { /* Intercept ND packet and handle it. */ process_nd(cps_conf, iface, bufs[i], eth_hdr, pkt_len); break; } } /* FALLTHROUGH */ default: { /* * Forward all other packets to the interface, * adding a VLAN header if necessary. */ struct rte_ether_hdr *new_eth_hdr; uint16_t vlan_tag_be; if (!iface->vlan_insert) goto to_eth; /* Need to make room for a VLAN header. */ new_eth_hdr = (struct rte_ether_hdr *) rte_pktmbuf_prepend(bufs[i], sizeof(struct rte_vlan_hdr)); if (unlikely(new_eth_hdr == NULL)) { G_LOG(ERR, "Can't add a VLAN header\n"); rte_pktmbuf_free(bufs[i]); continue; } memmove(new_eth_hdr, eth_hdr, sizeof(*new_eth_hdr)); vlan_tag_be = ether_type == RTE_ETHER_TYPE_IPV4 ? iface->ipv4_vlan_tag_be : iface->ipv6_vlan_tag_be; fill_vlan_hdr(new_eth_hdr, vlan_tag_be, ether_type); to_eth: forward_bufs[num_forward++] = bufs[i]; break; } } } num_tx = rte_eth_tx_burst(iface->id, tx_queue, forward_bufs, num_forward); if (unlikely(num_tx < num_forward)) rte_pktmbuf_free_bulk(&forward_bufs[num_tx], num_forward - num_tx); } static int cps_proc(void *arg) { struct cps_config *cps_conf = (struct cps_config *)arg; struct net_config *net_conf = cps_conf->net; struct gatekeeper_if *front_iface = &net_conf->front; struct gatekeeper_if *back_iface = &net_conf->back; struct cps_kni *front_kni = &cps_conf->front_kni; struct cps_kni *back_kni = &cps_conf->back_kni; /* * CAP_NET_ADMIN: allow RTNetlink communication between the CPS and * routing daemons. * CAP_SYS_MODULE: remove the rte_kni kernel module while exiting. */ cap_value_t caps[] = {CAP_NET_ADMIN, CAP_SYS_MODULE}; G_LOG(NOTICE, "The CPS block is running at tid = %u\n", gettid()); if (needed_caps(RTE_DIM(caps), caps) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } while (likely(!exiting)) { /* * Read in IPv4 TCP packets that arrive directly * on the Gatekeeper interfaces. */ if (cps_conf->rx_method_front & RX_METHOD_NIC) { process_ingress(front_iface, front_kni, cps_conf->rx_queue_front, cps_conf->front_max_pkt_burst); } if (net_conf->back_iface_enabled && cps_conf->rx_method_back & RX_METHOD_NIC) { process_ingress(back_iface, back_kni, cps_conf->rx_queue_back, cps_conf->back_max_pkt_burst); } /* * Process any requests made to the CPS block. * The mailbox is used regardless of what RX * methods are used, since it handles requests * from the KNI. */ process_reqs(cps_conf); /* * Read in packets from KNI interfaces, and * transmit to respective Gatekeeper interfaces. */ process_egress(cps_conf, front_iface, front_kni, cps_conf->tx_queue_front, cps_conf->front_max_pkt_burst); if (net_conf->back_iface_enabled) process_egress(cps_conf, back_iface, back_kni, cps_conf->tx_queue_back, cps_conf->back_max_pkt_burst); /* Periodically scan resolution requests from KNIs. */ rte_timer_manage(); /* Read in routing table updates and update LPM table. */ rd_process_events(cps_conf); } G_LOG(NOTICE, "The CPS block is exiting\n"); return cleanup_cps(); } int cps_submit_direct(struct rte_mbuf **pkts, unsigned int num_pkts, struct gatekeeper_if *iface) { struct cps_config *cps_conf = get_cps_conf(); struct cps_request *req = mb_alloc_entry(&cps_conf->mailbox); int ret; RTE_VERIFY(num_pkts <= cps_conf->mailbox_max_pkt_burst); if (req == NULL) { G_LOG(ERR, "%s: allocation of mailbox message failed\n", __func__); ret = -ENOMEM; goto free_pkts; } req->ty = CPS_REQ_DIRECT; req->u.direct.num_pkts = num_pkts; req->u.direct.iface = iface; rte_memcpy(req->u.direct.pkts, pkts, sizeof(*req->u.direct.pkts) * num_pkts); ret = mb_send_entry(&cps_conf->mailbox, req); if (ret < 0) { G_LOG(ERR, "%s: failed to enqueue message to mailbox\n", __func__); goto free_pkts; } return 0; free_pkts: rte_pktmbuf_free_bulk(pkts, num_pkts); return ret; } static int assign_cps_queue_ids(struct cps_config *cps_conf) { int ret; /* * Take the packets created for processing requests from mailbox * as well as the packets in the KNI into account. */ unsigned int total_pkt_burst = 2 * cps_conf->total_pkt_burst + cps_conf->mailbox_burst_size; unsigned int num_mbuf; /* The front NIC doesn't have hardware support. */ if (!cps_conf->net->front.rss) total_pkt_burst -= cps_conf->front_max_pkt_burst; /* The back NIC is enabled but doesn't have hardware support. */ if (cps_conf->net->back_iface_enabled && !cps_conf->net->back.rss) total_pkt_burst -= cps_conf->back_max_pkt_burst; /* * Each KNI interface needs at least (cps_conf->kni_queue_size) packets * per queue. There are two queues per interface: 1 RX and 1 TX queues. */ total_pkt_burst += 2 * cps_conf->kni_queue_size; if (cps_conf->net->back_iface_enabled) total_pkt_burst += 2 * cps_conf->kni_queue_size; num_mbuf = calculate_mempool_config_para("cps", cps_conf->net, total_pkt_burst); cps_conf->mp = create_pktmbuf_pool("cps", cps_conf->lcore_id, num_mbuf); if (cps_conf->mp == NULL) { ret = -1; goto fail; } /* * CPS should only get its own RX queue if RSS is enabled, * even if ntuple filter is not enabled. * * If RSS is disabled, then the network configuration can * tell that it should ignore all other blocks' requests * for queues and just allocate one RX queue. * * If RSS is enabled, then CPS has already informed the * network configuration that it will be using a queue. * The network configuration will crash if CPS doesn't * configure that queue, so it still should, even if * ntuple filter is not supported and CPS will not use it. */ if (cps_conf->net->front.rss) { ret = get_queue_id(&cps_conf->net->front, QUEUE_TYPE_RX, cps_conf->lcore_id, cps_conf->mp); if (ret < 0) goto fail; cps_conf->rx_queue_front = ret; } ret = get_queue_id(&cps_conf->net->front, QUEUE_TYPE_TX, cps_conf->lcore_id, NULL); if (ret < 0) goto fail; cps_conf->tx_queue_front = ret; if (cps_conf->net->back_iface_enabled) { if (cps_conf->net->back.rss) { ret = get_queue_id(&cps_conf->net->back, QUEUE_TYPE_RX, cps_conf->lcore_id, cps_conf->mp); if (ret < 0) goto fail; cps_conf->rx_queue_back = ret; } ret = get_queue_id(&cps_conf->net->back, QUEUE_TYPE_TX, cps_conf->lcore_id, NULL); if (ret < 0) goto fail; cps_conf->tx_queue_back = ret; } return 0; fail: G_LOG(ERR, "Cannot assign queues\n"); return ret; } static void cps_scan(__attribute__((unused)) struct rte_timer *timer, void *arg) { struct cps_config *cps_conf = (struct cps_config *)arg; if (arp_enabled(cps_conf->lls)) { struct arp_request *entry, *next; list_for_each_entry_safe(entry, next, &cps_conf->arp_requests, list) { if (entry->stale) { /* * It's possible that if this request * was recently satisfied the callback * has already been disabled, but it's * safe to issue an extra put_arp() here. */ put_arp((struct in_addr *)&entry->addr, cps_conf->lcore_id); list_del(&entry->list); rte_mempool_put(cps_conf->arp_mp, entry); } else entry->stale = true; } } if (nd_enabled(cps_conf->lls)) { struct nd_request *entry, *next; list_for_each_entry_safe(entry, next, &cps_conf->nd_requests, list) { if (entry->stale) { /* Same as above -- this may be unnecessary. */ put_nd((struct in6_addr *)entry->addr, cps_conf->lcore_id); list_del(&entry->list); rte_mempool_put(cps_conf->nd_mp, entry); } else entry->stale = true; } } } static int cps_stage1(void *arg) { struct cps_config *cps_conf = arg; int ret = assign_cps_queue_ids(cps_conf); if (unlikely(ret < 0)) cleanup_cps(); return ret; } /* * Match the packet if it fails to be classifed by ACL rules. * If it's a TCP packet, then submit it to the CPS block. * * Return values: 0 for successful match, and -ENOENT for no matching. */ static int match_tcp4(struct rte_mbuf *pkt, struct gatekeeper_if *iface) { const uint16_t BE_ETHER_TYPE_IPv4 = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv4_hdr *ip4hdr; uint16_t ether_type_be = pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip4hdr); size_t l2_len = pkt_in_l2_hdr_len(pkt); uint16_t minimum_size = l2_len + sizeof(struct rte_ipv4_hdr); if (unlikely(ether_type_be != BE_ETHER_TYPE_IPv4)) return -ENOENT; if (pkt->data_len < minimum_size) return -ENOENT; if (ip4hdr->dst_addr != iface->ip4_addr.s_addr) return -ENOENT; if (ip4hdr->next_proto_id != IPPROTO_TCP) return -ENOENT; return 0; } /* * Match the packet if it fails to be classifed by ACL rules. * If it's a TCP packet, then submit it to the CPS block. * * Return values: 0 for successful match, and -ENOENT for no matching. */ static int match_tcp6(struct rte_mbuf *pkt, struct gatekeeper_if *iface) { /* * The TCP header offset in terms of the * beginning of the IPv6 header. */ int tcp_offset; uint8_t nexthdr; const uint16_t BE_ETHER_TYPE_IPv6 = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); struct rte_ipv6_hdr *ip6hdr; uint16_t ether_type_be = pkt_in_skip_l2(pkt, eth_hdr, (void **)&ip6hdr); size_t l2_len = pkt_in_l2_hdr_len(pkt); uint16_t minimum_size = l2_len + sizeof(struct rte_ipv6_hdr); if (unlikely(ether_type_be != BE_ETHER_TYPE_IPv6)) return -ENOENT; if (pkt->data_len < minimum_size) return -ENOENT; if ((memcmp(ip6hdr->dst_addr, &iface->ip6_addr, sizeof(iface->ip6_addr)) != 0)) return -ENOENT; tcp_offset = ipv6_skip_exthdr(ip6hdr, pkt->data_len - l2_len, &nexthdr); if (tcp_offset < 0 || nexthdr != IPPROTO_TCP) return -ENOENT; return 0; } static int add_tcp_filters(struct gatekeeper_if *iface, uint16_t rx_queue, uint8_t *rx_method) { int ret; if (ipv4_if_configured(iface)) { ret = ipv4_pkt_filter_add(iface, iface->ip4_addr.s_addr, 0, 0, 0, 0, IPPROTO_TCP, rx_queue, cps_submit_direct, match_tcp4, rx_method); if (ret < 0) { G_LOG(ERR, "Could not add IPv4 TCP filter on %s iface\n", iface->name); return ret; } } if (ipv6_if_configured(iface)) { ret = ipv6_pkt_filter_add(iface, (rte_be32_t *)&iface->ip6_addr.s6_addr, 0, 0, 0, 0, IPPROTO_TCP, rx_queue, cps_submit_direct, match_tcp6, rx_method); if (ret < 0) { G_LOG(ERR, "Could not add IPv6 TCP filter on %s iface\n", iface->name); return ret; } } return 0; } static int cps_stage2(void *arg) { struct cps_config *cps_conf = arg; int ret; ret = add_tcp_filters(&cps_conf->net->front, cps_conf->rx_queue_front, &cps_conf->rx_method_front); if (ret < 0) { G_LOG(ERR, "Failed to add TCP filters on the front iface"); goto error; } ret = kni_create(&cps_conf->front_kni, &cps_conf->net->front, cps_conf->mp, cps_conf->kni_queue_size); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create KNI for \"%s\" interface (errno=%i): %s\n", __func__, cps_conf->net->front.name, -ret, strerror(-ret)); goto error; } if (cps_conf->net->back_iface_enabled) { ret = add_tcp_filters(&cps_conf->net->back, cps_conf->rx_queue_back, &cps_conf->rx_method_back); if (ret < 0) { G_LOG(ERR, "Failed to add TCP filters on the back iface"); goto error; } ret = kni_create(&cps_conf->back_kni, &cps_conf->net->back, cps_conf->mp, cps_conf->kni_queue_size); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(): failed to create KNI for \"%s\" interface (errno=%i): %s\n", __func__, cps_conf->net->back.name, -ret, strerror(-ret)); goto error; } } ret = rd_event_sock_open(cps_conf); if (ret < 0) { G_LOG(ERR, "Failed to open routing daemon event socket\n"); goto error; } return 0; error: cleanup_cps(); return ret; } int run_cps(struct net_config *net_conf, struct gk_config *gk_conf, struct gt_config *gt_conf, struct cps_config *cps_conf, struct lls_config *lls_conf) { int ret; int ele_size; uint16_t front_inc, back_inc = 0; unsigned int socket_id = rte_lcore_to_socket_id(cps_conf->lcore_id); if (net_conf == NULL || (gk_conf == NULL && gt_conf == NULL) || cps_conf == NULL || lls_conf == NULL) { ret = -1; goto out; } log_ratelimit_state_init(cps_conf->lcore_id, cps_conf->log_ratelimit_interval_ms, cps_conf->log_ratelimit_burst, cps_conf->log_level, "CPS"); front_inc = cps_conf->front_max_pkt_burst; net_conf->front.total_pkt_burst += front_inc; if (net_conf->back_iface_enabled) { back_inc = cps_conf->back_max_pkt_burst; net_conf->back.total_pkt_burst += back_inc; } cps_conf->total_pkt_burst = front_inc + back_inc; ret = net_launch_at_stage1(net_conf, 1, 1, 1, 1, cps_stage1, cps_conf); if (ret < 0) goto burst; ret = launch_at_stage2(cps_stage2, cps_conf); if (ret < 0) goto stage1; ret = launch_at_stage3("cps", cps_proc, cps_conf, cps_conf->lcore_id); if (ret < 0) goto stage2; cps_conf->net = net_conf; cps_conf->lls = lls_conf; if (cps_conf->nl_pid == 0) { G_LOG(ERR, "Option nl_pid must be greater than 0\n"); goto stage3; } cps_conf->arp_mp = rte_mempool_create( "arp_request_pool", (1 << cps_conf->arp_max_entries_exp) - 1, sizeof(struct arp_request), 0, 0, NULL, NULL, NULL, NULL, socket_id, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); if (cps_conf->arp_mp == NULL) { G_LOG(ERR, "Can't create mempool arp_request_pool at lcore %u\n", cps_conf->lcore_id); ret = -1; goto stage3; } cps_conf->nd_mp = rte_mempool_create( "nd_request_pool", (1 << cps_conf->nd_max_entries_exp) - 1, sizeof(struct nd_request), 0, 0, NULL, NULL, NULL, NULL, socket_id, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); if (cps_conf->nd_mp == NULL) { G_LOG(ERR, "Can't create mempool nd_request_pool at lcore %u\n", cps_conf->lcore_id); ret = -1; goto arp_mp; } if (gk_conf != NULL) { cps_conf->mailbox_max_pkt_burst = RTE_MAX(gk_conf->front_max_pkt_burst, gk_conf->back_max_pkt_burst); } if (gt_conf != NULL) { cps_conf->mailbox_max_pkt_burst = RTE_MAX(cps_conf->mailbox_max_pkt_burst, gt_conf->max_pkt_burst); } ele_size = RTE_MAX(sizeof(struct cps_request), offsetof(struct cps_request, end_of_header) + sizeof(struct cps_direct_req) + sizeof(struct rte_mbuf *) * cps_conf->mailbox_max_pkt_burst); ret = init_mailbox("cps_mb", cps_conf->mailbox_max_entries_exp, ele_size, cps_conf->mailbox_mem_cache_size, cps_conf->lcore_id, &cps_conf->mailbox); if (ret < 0) goto nd_mp; ret = rd_alloc_coro(cps_conf); if (ret < 0) { G_LOG(ERR, "Failed to allocate coroutines\n"); goto mailbox; } if (arp_enabled(cps_conf->lls)) INIT_LIST_HEAD(&cps_conf->arp_requests); if (nd_enabled(cps_conf->lls)) INIT_LIST_HEAD(&cps_conf->nd_requests); rte_timer_init(&cps_conf->scan_timer); ret = rte_timer_reset(&cps_conf->scan_timer, cps_conf->scan_interval_sec * rte_get_timer_hz(), PERIODICAL, cps_conf->lcore_id, cps_scan, cps_conf); if (ret < 0) { G_LOG(ERR, "Cannot set CPS scan timer\n"); goto coro; } if (gk_conf != NULL) gk_conf_hold(gk_conf); cps_conf->gk = gk_conf; if (gt_conf != NULL) gt_conf_hold(gt_conf); cps_conf->gt = gt_conf; return 0; coro: rd_free_coro(cps_conf); mailbox: destroy_mailbox(&cps_conf->mailbox); nd_mp: rte_mempool_free(cps_conf->nd_mp); arp_mp: rte_mempool_free(cps_conf->arp_mp); stage3: pop_n_at_stage3(1); stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); burst: net_conf->front.total_pkt_burst -= front_inc; net_conf->back.total_pkt_burst -= back_inc; out: return ret; } ```
/content/code_sandbox/cps/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
10,093
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_CPS_NETLINK_H_ #define _GATEKEEPER_CPS_NETLINK_H_ #include "gatekeeper_cps.h" /* Functions to handle interactions with the routing daemon. */ int rd_alloc_coro(struct cps_config *cps_conf); void rd_free_coro(struct cps_config *cps_conf); int rd_event_sock_open(struct cps_config *cps_conf); void rd_event_sock_close(struct cps_config *cps_conf); void rd_process_events(struct cps_config *cps_conf); #endif /* _GATEKEEPER_CPS_NETLINK_H_ */ ```
/content/code_sandbox/cps/rd.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
213
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GATEKEEPER_CPS_KNI_H_ #define _GATEKEEPER_CPS_KNI_H_ #include "gatekeeper_net.h" #include "gatekeeper_cps.h" int kni_create(struct cps_kni *kni, const struct gatekeeper_if *iface, struct rte_mempool *mp, uint16_t queue_size); void kni_free(struct cps_kni *kni); static inline uint16_t kni_rx_burst(const struct cps_kni *kni, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { return rte_eth_rx_burst(kni->cps_portid, 0, rx_pkts, nb_pkts); } static inline uint16_t kni_tx_burst(const struct cps_kni *kni, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { return rte_eth_tx_burst(kni->cps_portid, 0, tx_pkts, nb_pkts); } static inline const char * kni_get_krnname(const struct cps_kni *kni) { return kni->krn_name; } static inline unsigned int kni_get_ifindex(const struct cps_kni *kni) { return kni->krn_ifindex; } #endif /* _GATEKEEPER_CPS_KNI_H_ */ ```
/content/code_sandbox/cps/kni.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
380
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <libmnl/libmnl.h> #include <linux/rtnetlink.h> #include "gatekeeper_main.h" #include "kni.h" #define KNI_BUS_NAME "vdev" void kni_free(struct cps_kni *kni) { int ret; if (unlikely(kni->cps_name[0] == '\0')) return; ret = rte_eal_hotplug_remove(KNI_BUS_NAME, kni->cps_name); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to remove virtio_user port (errno=%i): %s\n", __func__, kni->cps_name, -ret, rte_strerror(-ret)); } kni->cps_name[0] = '\0'; } static int setup_dpdk_interface(struct cps_kni *kni, const struct gatekeeper_if *iface, struct rte_mempool *mp, uint16_t queue_size) { struct rte_eth_conf port_conf = { .rxmode = { .mtu = iface->mtu, .offloads = RTE_ETH_RX_OFFLOAD_SCATTER, }, }; int ret = rte_eth_dev_get_port_by_name(kni->cps_name, &kni->cps_portid); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot get port ID of \"%s\" (errno=%i): %s\n", __func__, iface->name, kni->cps_name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_dev_configure(kni->cps_portid, 1, 1, &port_conf); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to configure port (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_rx_queue_setup(kni->cps_portid, 0, queue_size, mp->socket_id, NULL, mp); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to set up rx queue (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_tx_queue_setup(kni->cps_portid, 0, queue_size, mp->socket_id, NULL); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to set up tx queue (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } ret = rte_eth_dev_start(kni->cps_portid); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to start port (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); return ret; } return 0; } static int modify_ipaddr(struct mnl_socket *nl, unsigned int cmd, int flags, int family, const void *ipaddr, uint8_t prefixlen, const char *kni_name, unsigned int kni_index) { char buf[MNL_SOCKET_BUFFER_SIZE]; struct nlmsghdr *nlh; struct ifaddrmsg *ifa; unsigned int seq; unsigned int portid = mnl_socket_get_portid(nl); int ret; nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = cmd; nlh->nlmsg_flags = flags|NLM_F_ACK; nlh->nlmsg_seq = seq = time(NULL); ifa = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifa)); ifa->ifa_family = family; ifa->ifa_prefixlen = prefixlen; ifa->ifa_scope = RT_SCOPE_UNIVERSE; ifa->ifa_index = kni_index; if (ifa->ifa_family == AF_INET) mnl_attr_put_u32(nlh, IFA_LOCAL, *(const uint32_t *)ipaddr); else if (likely(ifa->ifa_family == AF_INET6)) mnl_attr_put(nlh, IFA_LOCAL, 16, ipaddr); else { G_LOG(CRIT, "%s(): bug: address family (%i) not recognized\n", __func__, family); return -EINVAL; } ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len); if (unlikely(ret < 0)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_socket_sendto() failed, cannot update IP address (family=%i, operation=%u, errno=%i): %s\n", __func__, kni_name, family, cmd, errno, strerror(errno)); return ret; } /* * We specified NLM_F_ACK to get an acknowledgement, so receive the * ACK and verify that the interface configuration message was valid * using the default libmnl callback for doing message verification. */ ret = mnl_socket_recvfrom(nl, buf, sizeof(buf)); if (unlikely(ret == -1)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_socket_recvfrom() failed, cannot update IP address (family=%i, operation=%u, errno=%i): %s\n", __func__, kni_name, family, cmd, errno, strerror(errno)); return ret; } ret = mnl_cb_run(buf, ret, seq, portid, NULL, NULL); if (unlikely(ret == MNL_CB_ERROR)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_cb_run() failed, cannot update IP address (family=%i, operation=%u, errno=%i): %s\n", __func__, kni_name, family, cmd, errno, strerror(errno)); return ret; } return ret; } static inline int add_ipaddr(struct mnl_socket *nl, int family, const void *ipaddr, uint8_t prefixlen, const char *kni_name, unsigned int kni_index) { return modify_ipaddr(nl, RTM_NEWADDR, NLM_F_CREATE|NLM_F_REQUEST|NLM_F_EXCL, family, ipaddr, prefixlen, kni_name, kni_index); } /* Add global and link-local IPv4 and IPv6 addresses. */ static int config_ip_addrs(struct mnl_socket *nl, const char *kni_name, unsigned int kni_index, const struct gatekeeper_if *iface) { int ret; if (ipv4_if_configured(iface)) { ret = add_ipaddr(nl, AF_INET, &iface->ip4_addr, iface->ip4_addr_plen, kni_name, kni_index); if (unlikely(ret < 0)) return ret; } if (ipv6_if_configured(iface)) { ret = add_ipaddr(nl, AF_INET6, &iface->ip6_addr, iface->ip6_addr_plen, kni_name, kni_index); if (unlikely(ret < 0)) return ret; } return 0; } static int modify_link(struct mnl_socket *nl, const char *kni_name, unsigned int kni_index, uint32_t mtu, int if_up) { char buf[MNL_SOCKET_BUFFER_SIZE]; struct nlmsghdr *nlh; struct ifinfomsg *ifm; unsigned int seq, flags = 0; unsigned int portid = mnl_socket_get_portid(nl); int ret; if (if_up) flags |= IFF_UP; else flags &= ~IFF_UP; nlh = mnl_nlmsg_put_header(buf); nlh->nlmsg_type = RTM_NEWLINK; nlh->nlmsg_flags = NLM_F_REQUEST|NLM_F_ACK; nlh->nlmsg_seq = seq = time(NULL); ifm = mnl_nlmsg_put_extra_header(nlh, sizeof(*ifm)); ifm->ifi_family = AF_UNSPEC; ifm->ifi_index = kni_index; ifm->ifi_flags = flags; ifm->ifi_change = IFF_UP; mnl_attr_put_u32(nlh, IFLA_MTU, mtu); ret = mnl_socket_sendto(nl, nlh, nlh->nlmsg_len); if (unlikely(ret < 0)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_socket_sendto() failed, cannot bring KNI %s (errno=%i): %s\n", __func__, kni_name, if_up ? "up" : "down", errno, strerror(errno)); return ret; } ret = mnl_socket_recvfrom(nl, buf, sizeof(buf)); if (unlikely(ret == -1)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_socket_recvfrom() failed, cannot bring KNI %s (errno=%i): %s\n", __func__, kni_name, if_up ? "up" : "down", errno, strerror(errno)); return ret; } ret = mnl_cb_run(buf, ret, seq, portid, NULL, NULL); if (unlikely(ret == MNL_CB_ERROR)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_cb_run() failed, cannot bring KNI %s (errno=%i): %s\n", __func__, kni_name, if_up ? "up" : "down", errno, strerror(errno)); return ret; } return 0; } static int setup_kernel_interface(const struct cps_kni *kni, const struct gatekeeper_if *iface) { struct mnl_socket *nl; int ret; nl = mnl_socket_open(NETLINK_ROUTE); if (unlikely(nl == NULL)) { ret = -errno; G_LOG(ERR, "%s(%s): mnl_socket_open() failed (errno=%i): %s\n", __func__, kni->krn_name, errno, strerror(errno)); return ret; } ret = mnl_socket_bind(nl, 0, MNL_SOCKET_AUTOPID); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): mnl_socket_bind() failed (errno=%i): %s\n", __func__, kni->krn_name, errno, strerror(errno)); goto close; } ret = config_ip_addrs(nl, kni->krn_name, kni->krn_ifindex, iface); if (unlikely(ret < 0)) goto close; /* Set MTU and bring interface up. */ ret = modify_link(nl, kni->krn_name, kni->krn_ifindex, iface->mtu, true); close: mnl_socket_close(nl); return ret; } static int cps_port_name(char *port_name, uint8_t port_id) { int ret = snprintf(port_name, IF_NAMESIZE, "virtio_user%u", port_id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(port_id=%u): snprintf() failed (errno=%i): %s\n", __func__, port_id, -ret, strerror(-ret)); return ret; } if (unlikely(ret >= IF_NAMESIZE)) { G_LOG(ERR, "%s(port_id=%u): port name is too long (len=%i)\n", __func__, port_id, ret); return -ENOSPC; } return 0; } static int kernel_port_name(char *port_name, const char *origin_port_name) { int ret = snprintf(port_name, IF_NAMESIZE, "kni_%s", origin_port_name); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): snprintf() failed (errno=%i): %s\n", __func__, origin_port_name, -ret, strerror(-ret)); return ret; } if (unlikely(ret >= IF_NAMESIZE)) { G_LOG(ERR, "%s(%s): port name is too long (len=%i)\n", __func__, origin_port_name, ret); return -ENOSPC; } return 0; } int kni_create(struct cps_kni *kni, const struct gatekeeper_if *iface, struct rte_mempool *mp, uint16_t queue_size) { char cps_name[IF_NAMESIZE], port_args[256]; int ret = cps_port_name(cps_name, iface->id); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot name virtio_user port (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto out; } ret = kernel_port_name(kni->krn_name, iface->name); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): cannot name kernel port (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto out; } /* Get @port_args. */ ret = snprintf(port_args, sizeof(port_args), "path=/dev/vhost-net,queues=%u,queue_size=%u,iface=%s,mac=" RTE_ETHER_ADDR_PRT_FMT, 1, queue_size, kni->krn_name, RTE_ETHER_ADDR_BYTES(&iface->eth_addr)); if (unlikely(ret < 0)) { G_LOG(CRIT, "%s(%s): bug: snprintf() failed (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto out; } if (unlikely(ret >= (int)sizeof(port_args))) { G_LOG(CRIT, "%s(%s): bug: port argument is too long (len=%i)\n", __func__, iface->name, ret); ret = -ENOSPC; goto out; } ret = rte_eal_hotplug_add(KNI_BUS_NAME, cps_name, port_args); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to initialize virtio-user port (errno=%i): %s\n", __func__, iface->name, -ret, rte_strerror(-ret)); goto out; } RTE_BUILD_BUG_ON(sizeof(kni->cps_name) != sizeof(cps_name)); strcpy(kni->cps_name, cps_name); /* * DPDK does not return the index of the kernel interface, and * the kernel allows other applications to rename any interface. * Therefore, there is potentially a race condition here. * To minimize the chance of being affected by this race condition, * obtain the index of the kernel interface as soon as possible. */ kni->krn_ifindex = if_nametoindex(kni->krn_name); if (unlikely(kni->krn_ifindex == 0)) { ret = -errno; G_LOG(ERR, "%s(%s): cannot get index for interface \"%s\" (errno=%i): %s\n", __func__, iface->name, kni->krn_name, errno, strerror(errno)); goto free_kni; } ret = setup_dpdk_interface(kni, iface, mp, queue_size); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to set up DPDK interface (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto free_kni; } ret = setup_kernel_interface(kni, iface); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to set up kernel interface (errno=%i): %s\n", __func__, iface->name, -ret, strerror(-ret)); goto free_kni; } return 0; free_kni: kni_free(kni); out: return ret; } ```
/content/code_sandbox/cps/kni.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
3,628
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <arpa/inet.h> #include <libmnl/libmnl.h> #include <linux/rtnetlink.h> #include <net/if.h> #include <net/if_arp.h> #include "kni.h" #include "rd.h" /* Defined in the kernel headers, but not included in net/if.h. */ #define IFF_LOWER_UP (1<<16) struct route_update { /* Type of route update: RTM_NEWROUTE or RTM_DELROUTE. */ int type; /* Address family of route: AF_INET or AF_INET6. */ int family; /* * Whether this update has all the fields and attributes * necessary to update the LPM table. */ int valid; /* Route type. See field rtm_type of struct rtmsg. */ uint8_t rt_type; /* Properties of the route to be saved in the FIB. */ struct route_properties rt_props; /* * Flags over the update request. * See field nlmsg_flags of struct nlmsghdr. */ unsigned int rt_flags; /* Output interface index of route. */ uint32_t oif_index; /* The network prefix (destination) of route. */ char ip_px_buf[INET6_ADDRSTRLEN + 4]; struct ip_prefix prefix_info; /* IP address of route gateway. */ char gw_buf[INET6_ADDRSTRLEN]; struct ipaddr gw; }; void rd_event_sock_close(struct cps_config *cps_conf) { if (cps_conf->rd_nl != NULL) { mnl_socket_close(cps_conf->rd_nl); cps_conf->rd_nl = NULL; } } int rd_event_sock_open(struct cps_config *cps_conf) { struct mnl_socket *nl; int ret; nl = mnl_socket_open(NETLINK_ROUTE); if (nl == NULL) { G_LOG(ERR, "%s: mnl_socket_open: %s\n", __func__, strerror(errno)); return -1; } cps_conf->rd_nl = nl; /* * This binds the Netlink socket to port @nl_pid, * so the routing daemon may interact with Gatekeeper. */ ret = mnl_socket_bind(nl, 0, cps_conf->nl_pid); if (ret < 0) { G_LOG(ERR, "%s: mnl_socket_bind: %s\n", __func__, strerror(errno)); goto close; } return 0; close: rd_event_sock_close(cps_conf); return ret; } static int get_prefix_fib_locked(struct route_update *update, struct gk_lpm *ltbl, struct gk_fib **prefix_fib) { uint32_t fib_id; int ret; if (update->family == AF_INET) { ret = rib_is_rule_present(rib4_from_ltbl(ltbl), (uint8_t *)&update->prefix_info.addr.ip.v4.s_addr, update->prefix_info.len, &fib_id); if (ret < 0) { G_LOG(ERR, "%s(): IPv4 rib_is_rule_present(%s) failed (errno=%i): %s\n", __func__, update->prefix_info.str, -ret, strerror(-ret)); return ret; } if (ret == 1) { *prefix_fib = &ltbl->fib_tbl[fib_id]; return 0; } } else if (likely(update->family == AF_INET6)) { ret = rib_is_rule_present(rib6_from_ltbl(ltbl), update->prefix_info.addr.ip.v6.s6_addr, update->prefix_info.len, &fib_id); if (ret < 0) { G_LOG(ERR, "%s(): IPv6 rib_is_rule_present(%s) failed (errno=%i): %s\n", __func__, update->prefix_info.str, -ret, strerror(-ret)); return ret; } if (ret == 1) { *prefix_fib = &ltbl->fib_tbl6[fib_id]; return 0; } } else { G_LOG(ERR, "%s(): unknown address family %i\n", __func__, update->family); return -EAFNOSUPPORT; } RTE_VERIFY(ret == 0); *prefix_fib = NULL; return 0; } static inline void rd_yield(struct cps_config *cps_conf) { coro_transfer(&cps_conf->coro_rd, &cps_conf->coro_root); } static void spinlock_lock_with_yield(rte_spinlock_t *sl, struct cps_config *cps_conf) { int ret; while ((ret = rte_spinlock_trylock_tm(sl)) == 0) rd_yield(cps_conf); RTE_VERIFY(ret == 1); } static int can_rd_del_route(struct route_update *update, struct gk_fib *prefix_fib) { /* * Protect grantor entries from configuration mistakes * in routing daemons. */ if (prefix_fib->action == GK_FWD_GRANTOR) { G_LOG(ERR, "Prefix %s cannot be updated via RTNetlink because it is a grantor entry; use the dynamic configuration block to update grantor entries\n", update->prefix_info.str); return -EPERM; } return 0; } static int new_route(struct route_update *update, struct cps_config *cps_conf) { struct gk_lpm *ltbl = &cps_conf->gk->lpm_tbl; struct gk_fib *prefix_fib; int ret; spinlock_lock_with_yield(&ltbl->lock, cps_conf); ret = get_prefix_fib_locked(update, ltbl, &prefix_fib); if (ret < 0) goto out; if (prefix_fib != NULL) { if ((update->rt_flags & NLM_F_EXCL) || !(update->rt_flags & NLM_F_REPLACE)) { ret = -EEXIST; goto out; } ret = can_rd_del_route(update, prefix_fib); if (ret < 0) goto out; /* Gatekeeper does not currently support multipath. */ if (update->rt_flags & NLM_F_APPEND) { G_LOG(WARNING, "%s(%s): flag NLM_F_APPEND is NOT supported\n", __func__, update->ip_px_buf); ret = -EOPNOTSUPP; goto out; } ret = del_fib_entry_numerical_locked(&update->prefix_info, cps_conf->gk); if (unlikely(ret < 0)) { G_LOG(ERR, "%s(%s): failed to remove prefix after successful lookup (errno=%i): %s\n", __func__, update->ip_px_buf, -ret, strerror(-ret)); goto out; } } else if (!(update->rt_flags & NLM_F_CREATE)) { ret = -ENOENT; goto out; } if (update->rt_type == RTN_BLACKHOLE) { ret = add_fib_entry_numerical_locked(&update->prefix_info, NULL, NULL, 0, GK_DROP, &update->rt_props, cps_conf->gk); goto out; } if (update->oif_index == 0) { /* * Find out where the gateway is a neighbor: * front or back network. */ struct gk_fib *gw_fib = NULL; uint32_t next_hop; /* * Obtain @gw_fib. */ if (update->family == AF_INET) { ret = rib_lookup(rib4_from_ltbl(ltbl), (uint8_t *)&update->gw.ip.v4.s_addr, &next_hop); if (ret < 0) { if (ret == -ENOENT) { G_LOG(WARNING, "%s(%s): there is no route to the gateway %s\n", __func__, update->ip_px_buf, update->gw_buf); } goto out; } gw_fib = &ltbl->fib_tbl[next_hop]; } else if (likely(update->family == AF_INET6)) { ret = rib_lookup(rib6_from_ltbl(ltbl), update->gw.ip.v6.s6_addr, &next_hop); if (ret < 0) { if (ret == -ENOENT) { G_LOG(WARNING, "%s(%s): there is no route to the gateway %s\n", __func__, update->ip_px_buf, update->gw_buf); } goto out; } gw_fib = &ltbl->fib_tbl6[next_hop]; } else { /* The execution should never reach here. */ G_LOG(CRIT, "%s(%s): bug: unknown family = %i\n", __func__, update->ip_px_buf, update->family); ret = -EINVAL; goto out; } RTE_VERIFY(gw_fib != NULL); if (gw_fib->action == GK_FWD_NEIGHBOR_FRONT_NET) { update->oif_index = kni_get_ifindex(&cps_conf->front_kni); } else if (likely(gw_fib->action == GK_FWD_NEIGHBOR_BACK_NET)) { update->oif_index = kni_get_ifindex(&cps_conf->back_kni); } else { G_LOG(ERR, "%s(%s): the gateway %s is NOT a neighbor\n", __func__, update->ip_px_buf, update->gw_buf); ret = -EINVAL; goto out; } } if (update->oif_index == kni_get_ifindex(&cps_conf->front_kni)) { ret = add_fib_entry_numerical_locked(&update->prefix_info, NULL, &update->gw, 1, GK_FWD_GATEWAY_FRONT_NET, &update->rt_props, cps_conf->gk); goto out; } if (likely(update->oif_index == kni_get_ifindex(&cps_conf->back_kni))) { ret = add_fib_entry_numerical_locked(&update->prefix_info, NULL, &update->gw, 1, GK_FWD_GATEWAY_BACK_NET, &update->rt_props, cps_conf->gk); goto out; } G_LOG(ERR, "%s(%s): interface %u is neither the KNI front (%u) or KNI back (%u) interface\n", __func__, update->ip_px_buf, update->oif_index, kni_get_ifindex(&cps_conf->front_kni), kni_get_ifindex(&cps_conf->back_kni)); ret = -EINVAL; out: rte_spinlock_unlock_tm(&ltbl->lock); return ret; } static int del_route(struct route_update *update, struct cps_config *cps_conf) { int ret; struct gk_fib *prefix_fib; struct gk_lpm *ltbl = &cps_conf->gk->lpm_tbl; spinlock_lock_with_yield(&ltbl->lock, cps_conf); ret = get_prefix_fib_locked(update, ltbl, &prefix_fib); if (ret < 0) goto out; if (prefix_fib == NULL) { ret = -ENOENT; goto out; } ret = can_rd_del_route(update, prefix_fib); if (ret < 0) goto out; ret = del_fib_entry_numerical_locked(&update->prefix_info, cps_conf->gk); out: rte_spinlock_unlock_tm(&ltbl->lock); /* * Although the Linux kernel uses ENOENT for similar situations (e.g. * when RTM_NEWROUTE tries to replace an entry that does not exist), * it uses ESRCH for RTM_DELROUTE. */ if (ret == -ENOENT) return -ESRCH; return ret; } /* * @addr_buf must be at least INET6_ADDRSTRLEN bytes long. * @addr can be NULL. */ static int __convert_ip_attr(int family, struct nlattr *tb[], enum rtattr_type_t attr_type, const char *attr_name, struct ipaddr *addr, char *addr_buf) { if (family == AF_INET) { const struct in_addr *addr4 = mnl_attr_get_payload(tb[attr_type]); if (addr) { addr->proto = RTE_ETHER_TYPE_IPV4; addr->ip.v4 = *addr4; } if (unlikely(inet_ntop(AF_INET, addr4, addr_buf, INET_ADDRSTRLEN) == NULL)) { int saved_errno = errno; G_LOG(ERR, "%s(%s): failed to convert an IPv4 address: %s\n", __func__, attr_name, strerror(errno)); return -saved_errno; } return 0; } if (likely(family == AF_INET6)) { const struct in6_addr *addr6 = mnl_attr_get_payload(tb[attr_type]); if (addr) { addr->proto = RTE_ETHER_TYPE_IPV6; addr->ip.v6 = *addr6; } if (unlikely(inet_ntop(AF_INET6, addr6, addr_buf, INET6_ADDRSTRLEN) == NULL)) { int saved_errno = errno; G_LOG(ERR, "%s(%s): failed to convert an IPv6 address: %s\n", __func__, attr_name, strerror(errno)); return -saved_errno; } return 0; } G_LOG(WARNING, "%s(%s): unknown address family %d\n", __func__, attr_name, family); return -EAFNOSUPPORT; } #define convert_ip_attr(family, tb, attr_type, addr, addr_buf) \ __convert_ip_attr(family, tb, attr_type, #attr_type, addr, addr_buf) static int attr_get(struct route_update *update, int family, struct nlattr *tb[]) { char addr_buf[INET6_ADDRSTRLEN]; int ret; bool dst_present = false; bool gw_present = false; if (tb[RTA_MULTIPATH]) { /* * XXX #75 This is the attribute used to implement ECMP. * We should more closely parse this attribute and * return the appropriate information through * @update to Grantor, if we're running Grantor. * * Example usage: * * struct rtnexthop *rt = * mnl_attr_get_payload(tb[RTA_MULTIPATH]); */ G_LOG(WARNING, "cps update: the rtnetlink command has information (RTA_MULTIPATH) that we don't need or don't honor\n"); } if (tb[RTA_DST]) { ret = convert_ip_attr(family, tb, RTA_DST, &update->prefix_info.addr, addr_buf); if (ret < 0) return ret; /* Fill in prefix string. */ ret = snprintf(update->ip_px_buf, sizeof(update->ip_px_buf), "%s/%hhu", addr_buf, update->prefix_info.len); RTE_VERIFY(ret > 0 && ret < (int)sizeof(update->ip_px_buf)); update->prefix_info.str = update->ip_px_buf; G_LOG(DEBUG, "cps update: dst: %s\n", update->ip_px_buf); dst_present = true; } if (tb[RTA_SRC]) { ret = convert_ip_attr(family, tb, RTA_SRC, NULL, addr_buf); if (ret < 0) return ret; G_LOG(WARNING, "cps update: the rtnetlink command has information (RTA_SRC with IP address %s) that we don't need or don't honor\n", addr_buf); } if (tb[RTA_OIF]) { update->oif_index = mnl_attr_get_u32(tb[RTA_OIF]); G_LOG(DEBUG, "cps update: oif=%u\n", update->oif_index); } if (tb[RTA_FLOW]) { G_LOG(WARNING, "cps update: the rtnetlink command has information (RTA_FLOW with flow=%u) that we don't need or don't honor\n", mnl_attr_get_u32(tb[RTA_FLOW])); } if (tb[RTA_PREFSRC]) { ret = convert_ip_attr(family, tb, RTA_PREFSRC, NULL, addr_buf); if (ret < 0) return ret; G_LOG(WARNING, "cps update: the rtnetlink command has information (RTA_PREFSRC with IP address %s) that we don't need or don't honor\n", addr_buf); } if (tb[RTA_GATEWAY]) { ret = convert_ip_attr(family, tb, RTA_GATEWAY, &update->gw, update->gw_buf); if (ret < 0) return ret; G_LOG(DEBUG, "cps update: gw: %s\n", update->gw_buf); gw_present = true; } if (tb[RTA_PRIORITY]) { update->rt_props.priority = mnl_attr_get_u32(tb[RTA_PRIORITY]); G_LOG(DEBUG, "cps update: priority = %u\n", update->rt_props.priority); } update->valid = dst_present && ( (update->type == RTM_DELROUTE) || (update->type == RTM_NEWROUTE && gw_present) || (update->type == RTM_NEWROUTE && update->rt_type == RTN_BLACKHOLE) ); return 0; } static int data_ipv4_attr_cb(const struct nlattr *attr, void *data) { const struct nlattr **tb = data; int type = mnl_attr_get_type(attr); /* Skip unsupported attribute in user-space. */ if (mnl_attr_type_valid(attr, RTA_MAX) < 0) return MNL_CB_OK; switch (type) { case RTA_MULTIPATH: if (mnl_attr_validate(attr, MNL_TYPE_NESTED) < 0) return MNL_CB_ERROR; break; case RTA_TABLE: case RTA_DST: case RTA_SRC: case RTA_OIF: case RTA_FLOW: case RTA_PREFSRC: case RTA_GATEWAY: case RTA_PRIORITY: if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) return MNL_CB_ERROR; break; default: /* Skip attributes we don't know about. */ return MNL_CB_OK; } tb[type] = attr; return MNL_CB_OK; } static int data_ipv6_attr_cb(const struct nlattr *attr, void *data) { const struct nlattr **tb = data; int type = mnl_attr_get_type(attr); /* Skip unsupported attribute in user-space. */ if (mnl_attr_type_valid(attr, RTA_MAX) < 0) return MNL_CB_OK; switch (type) { case RTA_MULTIPATH: if (mnl_attr_validate(attr, MNL_TYPE_NESTED) < 0) return MNL_CB_ERROR; break; case RTA_TABLE: case RTA_OIF: case RTA_FLOW: case RTA_PRIORITY: if (mnl_attr_validate(attr, MNL_TYPE_U32) < 0) return MNL_CB_ERROR; break; case RTA_DST: case RTA_SRC: case RTA_PREFSRC: case RTA_GATEWAY: if (mnl_attr_validate2(attr, MNL_TYPE_BINARY, sizeof(struct in6_addr)) < 0) return MNL_CB_ERROR; break; default: /* Skip attributes we don't know about. */ return MNL_CB_OK; } tb[type] = attr; return MNL_CB_OK; } static ssize_t sendto_with_yield(int sockfd, const void *buf, size_t len, const struct sockaddr *dest_addr, socklen_t addrlen, struct cps_config *cps_conf) { ssize_t ret; while ( ((ret = sendto(sockfd, buf, len, MSG_DONTWAIT, dest_addr, addrlen)) == -1) && (errno == EAGAIN || errno == EWOULDBLOCK) ) rd_yield(cps_conf); return ret; } static void rd_send_err(const struct nlmsghdr *req, struct cps_config *cps_conf, int err) { struct nlmsghdr *rep; struct nlmsgerr *errmsg; char buf[MNL_SOCKET_BUFFER_SIZE]; struct sockaddr_nl rd_sa; unsigned int payload_len; unsigned int errmsg_len; memset(&rd_sa, 0, sizeof(rd_sa)); rd_sa.nl_family = AF_NETLINK; rd_sa.nl_pid = req->nlmsg_pid; rep = mnl_nlmsg_put_header(buf); rep->nlmsg_type = NLMSG_ERROR; rep->nlmsg_flags = 0; rep->nlmsg_seq = req->nlmsg_seq; rep->nlmsg_pid = cps_conf->nl_pid; /* * For acknowledgements, just send the struct nlmsgerr. * For errors, send the struct nlmsgerr and the payload. */ payload_len = 0; errmsg_len = sizeof(*errmsg); if (err) { payload_len += mnl_nlmsg_get_payload_len(req); errmsg_len += payload_len; } errmsg = mnl_nlmsg_put_extra_header(rep, errmsg_len); errmsg->error = err; memcpy(&errmsg->msg, req, sizeof(errmsg->msg) + payload_len); if (sendto_with_yield(mnl_socket_get_fd(cps_conf->rd_nl), rep, sizeof(*rep) + errmsg_len, (struct sockaddr *)&rd_sa, sizeof(rd_sa), cps_conf) < 0) { G_LOG(ERR, "sendto_with_yield: cannot send NLMSG_ERROR to daemon (pid=%u seq=%u): %s\n", req->nlmsg_pid, req->nlmsg_seq, strerror(errno)); } } static inline void put_priority(struct nlmsghdr *reply, uint32_t priority) { /* * Not only is the default priority very common, * it does not need to be reported. */ if (likely(priority == 0)) return; mnl_attr_put_u32(reply, RTA_PRIORITY, priority); } static void rd_fill_getroute_reply(const void *prefix, const struct cps_config *cps_conf, struct nlmsghdr *reply, const struct gk_fib *fib, int family, uint32_t seq, uint8_t prefix_len, const struct ipaddr **gw_addr) { struct rtmsg *rm; reply->nlmsg_type = RTM_NEWROUTE; reply->nlmsg_flags = NLM_F_MULTI; reply->nlmsg_seq = seq; reply->nlmsg_pid = cps_conf->nl_pid; rm = mnl_nlmsg_put_extra_header(reply, sizeof(*rm)); rm->rtm_family = family; rm->rtm_dst_len = prefix_len; rm->rtm_src_len = 0; rm->rtm_tos = 0; rm->rtm_table = RT_TABLE_MAIN; rm->rtm_scope = RT_SCOPE_UNIVERSE; rm->rtm_flags = 0; switch (fib->action) { case GK_FWD_GRANTOR: mnl_attr_put_u32(reply, RTA_OIF, kni_get_ifindex(&cps_conf->back_kni)); rm->rtm_protocol = RTPROT_STATIC; rm->rtm_type = RTN_UNICAST; /* * Gateway will be filled in by the caller, since Grantor * entries can have multiple corresponding Grantors, each * with their own gateway. */ *gw_addr = NULL; break; case GK_FWD_GATEWAY_FRONT_NET: mnl_attr_put_u32(reply, RTA_OIF, kni_get_ifindex(&cps_conf->front_kni)); put_priority(reply, fib->u.gateway.props.priority); rm->rtm_protocol = fib->u.gateway.props.rt_proto; rm->rtm_type = RTN_UNICAST; *gw_addr = &fib->u.gateway.eth_cache->ip_addr; break; case GK_FWD_GATEWAY_BACK_NET: mnl_attr_put_u32(reply, RTA_OIF, kni_get_ifindex(&cps_conf->back_kni)); put_priority(reply, fib->u.gateway.props.priority); rm->rtm_protocol = fib->u.gateway.props.rt_proto; rm->rtm_type = RTN_UNICAST; *gw_addr = &fib->u.gateway.eth_cache->ip_addr; break; case GK_FWD_NEIGHBOR_FRONT_NET: mnl_attr_put_u32(reply, RTA_OIF, kni_get_ifindex(&cps_conf->front_kni)); rm->rtm_protocol = RTPROT_STATIC; rm->rtm_type = RTN_UNICAST; *gw_addr = NULL; break; case GK_FWD_NEIGHBOR_BACK_NET: mnl_attr_put_u32(reply, RTA_OIF, kni_get_ifindex(&cps_conf->back_kni)); rm->rtm_protocol = RTPROT_STATIC; rm->rtm_type = RTN_UNICAST; *gw_addr = NULL; break; case GK_DROP: put_priority(reply, fib->u.drop.props.priority); rm->rtm_protocol = fib->u.drop.props.rt_proto; rm->rtm_type = RTN_BLACKHOLE; *gw_addr = NULL; break; default: { /* * Things went bad, but keep going. */ char str_prefix[INET6_ADDRSTRLEN]; RTE_BUILD_BUG_ON(INET6_ADDRSTRLEN < INET_ADDRSTRLEN); /* Enter some generic values. */ rm->rtm_protocol = RTPROT_STATIC; rm->rtm_type = RTN_UNICAST; *gw_addr = NULL; if (unlikely(inet_ntop(family, prefix, str_prefix, sizeof(str_prefix)) == NULL)) { G_LOG(ERR, "%s(): failed to convert address of family=%i to a string (errno=%i): %s\n", __func__, family, errno, strerror(errno)); strcpy(str_prefix, "<ERROR>"); } G_LOG(CRIT, "%s(%s/%i): invalid FIB action (%u) in FIB", __func__, str_prefix, prefix_len, fib->action); break; } } } static int rd_send_batch(struct cps_config *cps_conf, struct mnl_nlmsg_batch *batch, const char *daemon, uint32_t seq, uint32_t pid, int done) { /* Address of routing daemon. */ struct sockaddr_nl rd_sa; int ret = 0; if (done) { struct nlmsghdr *done = mnl_nlmsg_put_header(mnl_nlmsg_batch_current(batch)); done->nlmsg_type = NLMSG_DONE; done->nlmsg_flags = NLM_F_MULTI; done->nlmsg_seq = seq; done->nlmsg_pid = cps_conf->nl_pid; if (!mnl_nlmsg_batch_next(batch)) { /* Send the *full* batch without the DONE message. */ ret = rd_send_batch(cps_conf, batch, daemon, seq, pid, false); if (ret < 0) return ret; /* Go on to send the DONE message. */ } } memset(&rd_sa, 0, sizeof(rd_sa)); rd_sa.nl_family = AF_NETLINK; rd_sa.nl_pid = pid; if (sendto_with_yield(mnl_socket_get_fd(cps_conf->rd_nl), mnl_nlmsg_batch_head(batch), mnl_nlmsg_batch_size(batch), (struct sockaddr *)&rd_sa, sizeof(rd_sa), cps_conf) < 0) { ret = -errno; G_LOG(ERR, "sendto_with_yield: cannot dump route batch to %s daemon (pid=%u seq=%u): %s\n", daemon, pid, seq, strerror(errno)); } mnl_nlmsg_batch_reset(batch); return ret; } static void attr_put_ipaddr(struct nlmsghdr *nlh, uint16_t type, const struct ipaddr *addr) { if (addr->proto == RTE_ETHER_TYPE_IPV4) return mnl_attr_put_u32(nlh, type, addr->ip.v4.s_addr); if (likely(addr->proto == RTE_ETHER_TYPE_IPV6)) return mnl_attr_put(nlh, type, sizeof(addr->ip.v6), &addr->ip.v6); G_LOG(CRIT, "%s(): bug: unknown protocol %i\n", __func__, addr->proto); } static void attr_put_rib_addr(struct nlmsghdr *nlh, uint16_t type, int family, rib_address_t address_no) { if (family == AF_INET) return mnl_attr_put_u32(nlh, type, ipv4_from_rib_addr(address_no)); if (likely(family == AF_INET6)) return mnl_attr_put(nlh, type, sizeof(struct in6_addr), &address_no); G_LOG(CRIT, "%s(): bug: unknown family %i\n", __func__, family); } static int rd_getroute_family(const char *daemon, struct cps_config *cps_conf, const struct rib_head *rib, const struct gk_fib *fib_table, rte_spinlock_t *lock, int family, struct mnl_nlmsg_batch *batch, const struct nlmsghdr *req) { struct rib_longer_iterator_state state; int ret; spinlock_lock_with_yield(lock, cps_conf); ret = rib_longer_iterator_state_init(&state, rib, NULL, 0, false); if (unlikely(ret < 0)) { rte_spinlock_unlock_tm(lock); G_LOG(ERR, "%s(): failed to initialize the %s RIB iterator (errno=%i): %s\n", __func__, daemon, -ret, strerror(-ret)); return ret; } while (true) { struct rib_iterator_rule rule; struct nlmsghdr *reply; const struct gk_fib *fib; const struct ipaddr *gw_addr; ret = rib_longer_iterator_next(&state, &rule); if (unlikely(ret < 0)) { rib_longer_iterator_end(&state); rte_spinlock_unlock_tm(lock); if (unlikely(ret != -ENOENT)) { G_LOG(ERR, "%s(): %s RIB iterator failed (errno=%i): %s\n", __func__, daemon, -ret, strerror(-ret)); return ret; } return 0; } reply = mnl_nlmsg_put_header(mnl_nlmsg_batch_current(batch)); fib = &fib_table[rule.next_hop]; rd_fill_getroute_reply(&rule.address_no, cps_conf, reply, fib, family, req->nlmsg_seq, rule.depth, &gw_addr); /* Add address. */ attr_put_rib_addr(reply, RTA_DST, family, rule.address_no); if (fib->action == GK_FWD_GRANTOR) { unsigned int i; for (i = 0; i < fib->u.grantor.set->num_entries; i++) { gw_addr = &fib->u.grantor.set->entries[i] .eth_cache->ip_addr; attr_put_ipaddr(reply, RTA_GATEWAY, gw_addr); } } else if (gw_addr != NULL) { /* Only report gateway for main routes. */ attr_put_ipaddr(reply, RTA_GATEWAY, gw_addr); } if (!mnl_nlmsg_batch_next(batch)) { /* * Do not access @fib or any FIB-related variable * without the lock. */ rte_spinlock_unlock_tm(lock); ret = rd_send_batch(cps_conf, batch, daemon, req->nlmsg_seq, req->nlmsg_pid, false); if (unlikely(ret < 0)) { rib_longer_iterator_end(&state); return ret; } /* * Obtain the lock when starting a new Netlink batch. * For the last batch, which won't be sent in * this function, the lock will be released at the end. */ spinlock_lock_with_yield(lock, cps_conf); } } } static int rd_getroute(const struct nlmsghdr *req, struct cps_config *cps_conf, int *err) { /* * Buffer length set according to libmnl documentation: * the buffer that you have to use to store the batch must be * double of MNL_SOCKET_BUFFER_SIZE to ensure that the last * message (message N+1) that did not fit into the batch is * written inside valid memory boundaries. */ char buf[2 * MNL_SOCKET_BUFFER_SIZE]; struct mnl_nlmsg_batch *batch; struct gk_lpm *ltbl = &cps_conf->gk->lpm_tbl; const char *family_str; int family; if (mnl_nlmsg_get_payload_len(req) < sizeof(struct rtgenmsg)) { G_LOG(ERR, "Not enough room in CPS GETROUTE message from routing daemon in %s\n", __func__); *err = -EINVAL; goto out; } family = ((struct rtgenmsg *)mnl_nlmsg_get_payload(req))->rtgen_family; switch (family) { case AF_INET: family_str = "IPv4"; break; case AF_INET6: family_str = "IPv6"; break; case AF_UNSPEC: family_str = "IPV4/IPv6"; break; case AF_MPLS: family_str = "MPLS"; break; default: G_LOG(ERR, "Unsupported address family type (%d) in %s\n", family, __func__); *err = -EAFNOSUPPORT; goto out; } batch = mnl_nlmsg_batch_start(buf, MNL_SOCKET_BUFFER_SIZE); if (batch == NULL) { G_LOG(ERR, "Failed to allocate a batch for a GETROUTE reply\n"); *err = -ENOMEM; goto out; } if (family == AF_INET || family == AF_UNSPEC) { if (!ipv4_configured(cps_conf->net)) { if (family == AF_UNSPEC) goto ipv6; else { *err = -EAFNOSUPPORT; goto free_batch; } } *err = rd_getroute_family("IPv4", cps_conf, rib4_from_ltbl(ltbl), ltbl->fib_tbl, &ltbl->lock, AF_INET, batch, req); if (*err < 0) goto free_batch; } ipv6: if (family == AF_INET6 || family == AF_UNSPEC) { if (!ipv6_configured(cps_conf->net)) { if (family == AF_UNSPEC) goto send; else { *err = -EAFNOSUPPORT; goto free_batch; } } *err = rd_getroute_family("IPv6", cps_conf, rib6_from_ltbl(ltbl), ltbl->fib_tbl6, &ltbl->lock, AF_INET6, batch, req); if (*err < 0) goto free_batch; } send: /* In the case of no entries, the only message sent is NLMSG_DONE. */ *err = rd_send_batch(cps_conf, batch, family_str, req->nlmsg_seq, req->nlmsg_pid, true); free_batch: mnl_nlmsg_batch_stop(batch); out: return MNL_CB_OK; } static void rd_fill_getlink_reply(const struct cps_config *cps_conf, struct mnl_nlmsg_batch *batch, const char *kni_name, unsigned int kni_index, unsigned int kni_mtu, uint32_t seq) { struct nlmsghdr *reply; struct ifinfomsg *ifim; reply = mnl_nlmsg_put_header(mnl_nlmsg_batch_current(batch)); reply->nlmsg_type = RTM_NEWLINK; reply->nlmsg_flags = NLM_F_MULTI; reply->nlmsg_seq = seq; reply->nlmsg_pid = cps_conf->nl_pid; ifim = mnl_nlmsg_put_extra_header(reply, sizeof(*ifim)); ifim->ifi_family = AF_UNSPEC; ifim->ifi_type = ARPHRD_ETHER; ifim->ifi_index = kni_index; ifim->ifi_flags = IFF_UP|IFF_LOWER_UP; ifim->ifi_change = 0xFFFFFFFF; mnl_attr_put_strz(reply, IFLA_IFNAME, kni_name); mnl_attr_put_u32(reply, IFLA_MTU, kni_mtu); } static int rd_getlink(const struct nlmsghdr *req, struct cps_config *cps_conf, int *err) { char buf[2 * MNL_SOCKET_BUFFER_SIZE]; struct mnl_nlmsg_batch *batch; batch = mnl_nlmsg_batch_start(buf, MNL_SOCKET_BUFFER_SIZE); if (batch == NULL) { G_LOG(ERR, "Failed to allocate a batch for a GETLINK reply\n"); *err = -ENOMEM; goto out; } rd_fill_getlink_reply(cps_conf, batch, kni_get_krnname(&cps_conf->front_kni), kni_get_ifindex(&cps_conf->front_kni), cps_conf->net->front.mtu, req->nlmsg_seq); if (!mnl_nlmsg_batch_next(batch)) { /* Send whatever was in the batch, if anything. */ *err = rd_send_batch(cps_conf, batch, "LINK", req->nlmsg_seq, req->nlmsg_pid, false); if (*err < 0) goto free_batch; } if (cps_conf->net->back_iface_enabled) { rd_fill_getlink_reply(cps_conf, batch, kni_get_krnname(&cps_conf->back_kni), kni_get_ifindex(&cps_conf->back_kni), cps_conf->net->back.mtu, req->nlmsg_seq); if (!mnl_nlmsg_batch_next(batch)) { *err = rd_send_batch(cps_conf, batch, "LINK", req->nlmsg_seq, req->nlmsg_pid, false); if (*err < 0) goto free_batch; } } *err = rd_send_batch(cps_conf, batch, "LINK", req->nlmsg_seq, req->nlmsg_pid, true); free_batch: mnl_nlmsg_batch_stop(batch); out: return MNL_CB_OK; } static int rd_modroute(const struct nlmsghdr *req, struct cps_config *cps_conf, int *err) { struct nlattr *tb[__RTA_MAX] = {}; struct rtmsg *rm = mnl_nlmsg_get_payload(req); struct route_update update; if (unlikely(cps_conf->gk == NULL)) { /* * Grantor only runs CPS for ECMP support and * shouldn't be receiving route updates. */ G_LOG(WARNING, "The system is running as Grantor, and there shouldn't be any rtnetlink message processed under this configuration while receiving route update messages\n"); *err = -EOPNOTSUPP; goto out; } G_LOG(DEBUG, "cps update: [%s] family=%u dst_len=%u src_len=%u tos=%u table=%u protocol=%u scope=%u type=%u flags=%x\n", req->nlmsg_type == RTM_NEWROUTE ? "NEW" : "DEL", rm->rtm_family, rm->rtm_dst_len, rm->rtm_src_len, rm->rtm_tos, rm->rtm_table, rm->rtm_protocol, rm->rtm_scope, rm->rtm_type, rm->rtm_flags); memset(&update, 0, sizeof(update)); update.valid = false; update.type = req->nlmsg_type; update.family = rm->rtm_family; /* Destination prefix length, e.g., 24 or 32 for IPv4. */ update.prefix_info.len = rm->rtm_dst_len; /* Default to an invalid index number. */ update.oif_index = 0; /* Route type. */ update.rt_type = rm->rtm_type; /* Route origin (routing daemon). */ update.rt_props.rt_proto = rm->rtm_protocol; /* Default route priority. */ update.rt_props.priority = 0; /* * Flags over the update request. * Example: NLM_F_REQUEST|NLM_F_ACK|NLM_F_REPLACE|NLM_F_CREATE */ update.rt_flags = req->nlmsg_flags; switch (rm->rtm_family) { case AF_INET: if (!ipv4_configured(cps_conf->net)) { *err = -EAFNOSUPPORT; goto out; } mnl_attr_parse(req, sizeof(*rm), data_ipv4_attr_cb, tb); *err = attr_get(&update, rm->rtm_family, tb); if (*err) goto out; break; case AF_INET6: if (!ipv6_configured(cps_conf->net)) { *err = -EAFNOSUPPORT; goto out; } mnl_attr_parse(req, sizeof(*rm), data_ipv6_attr_cb, tb); *err = attr_get(&update, rm->rtm_family, tb); if (*err) goto out; break; default: G_LOG(NOTICE, "Unrecognized family in netlink event: %u\n", rm->rtm_family); *err = -EAFNOSUPPORT; goto out; } if (update.valid) { if (update.type == RTM_NEWROUTE) { *err = new_route(&update, cps_conf); } else if (likely(update.type == RTM_DELROUTE)) { *err = del_route(&update, cps_conf); } else { G_LOG(WARNING, "Receiving an unexpected update rule with type = %d\n", update.type); *err = -EOPNOTSUPP; } } else *err = -EINVAL; out: return MNL_CB_OK; } static void rd_fill_getaddr_reply(const struct cps_config *cps_conf, struct mnl_nlmsg_batch *batch, struct gatekeeper_if *iface, uint8_t family, unsigned int kni_index, uint32_t seq) { struct nlmsghdr *reply; struct ifaddrmsg *ifam; reply = mnl_nlmsg_put_header(mnl_nlmsg_batch_current(batch)); reply->nlmsg_type = RTM_NEWADDR; reply->nlmsg_flags = NLM_F_MULTI; reply->nlmsg_seq = seq; reply->nlmsg_pid = cps_conf->nl_pid; ifam = mnl_nlmsg_put_extra_header(reply, sizeof(*ifam)); ifam->ifa_family = family; ifam->ifa_flags = IFA_F_PERMANENT; ifam->ifa_scope = RT_SCOPE_UNIVERSE; ifam->ifa_index = kni_index; /* * The exact meaning of IFA_LOCAL and IFA_ADDRESS depend * on the address family being used and the device type. * For broadcast devices (like the interfaces we use), * for IPv4 we specify both and they are used interchangeably. * For IPv6, only IFA_ADDRESS needs to be set. */ if (family == AF_INET) { mnl_attr_put_u32(reply, IFA_LOCAL, iface->ip4_addr.s_addr); mnl_attr_put_u32(reply, IFA_ADDRESS, iface->ip4_addr.s_addr); ifam->ifa_prefixlen = iface->ip4_addr_plen; } else if (likely(family == AF_INET6)) { mnl_attr_put(reply, IFA_ADDRESS, sizeof(iface->ip6_addr), &iface->ip6_addr); ifam->ifa_prefixlen = iface->ip6_addr_plen; } else { rte_panic("Invalid address family (%hhu) in request while being processed by CPS block in %s\n", family, __func__); } } static int rd_getaddr_iface(struct cps_config *cps_conf, struct mnl_nlmsg_batch *batch, struct gatekeeper_if *iface, uint8_t family, unsigned int kni_index, uint32_t seq, uint32_t pid) { int ret = 0; if ((family == AF_INET || family == AF_UNSPEC) && ipv4_if_configured(iface)) { rd_fill_getaddr_reply(cps_conf, batch, iface, AF_INET, kni_index, seq); if (!mnl_nlmsg_batch_next(batch)) { /* Send whatever was in the batch, if anything. */ ret = rd_send_batch(cps_conf, batch, "IPv4", seq, pid, false); if (ret < 0) return ret; } } if ((family == AF_INET6 || family == AF_UNSPEC) && ipv6_if_configured(iface)) { rd_fill_getaddr_reply(cps_conf, batch, iface, AF_INET6, kni_index, seq); if (!mnl_nlmsg_batch_next(batch)) { /* Send whatever was in the batch, if anything. */ ret = rd_send_batch(cps_conf, batch, "IPv6", seq, pid, false); if (ret < 0) return ret; } } return ret; } static int rd_getaddr(const struct nlmsghdr *req, struct cps_config *cps_conf, int *err) { char buf[2 * MNL_SOCKET_BUFFER_SIZE]; struct mnl_nlmsg_batch *batch; struct net_config *net_conf = cps_conf->net; int family; const char *family_str; if (mnl_nlmsg_get_payload_len(req) < sizeof(struct rtgenmsg)) { G_LOG(ERR, "Not enough room in CPS GETADDR message from routing daemon in %s\n", __func__); *err = -EINVAL; goto out; } family = ((struct rtgenmsg *)mnl_nlmsg_get_payload(req))->rtgen_family; switch (family) { case AF_INET: family_str = "IPv4"; break; case AF_INET6: family_str = "IPv6"; break; case AF_UNSPEC: family_str = "IPV4/IPv6"; break; default: G_LOG(ERR, "Unsupported address family type (%d) in %s\n", family, __func__); *err = -EAFNOSUPPORT; goto out; } batch = mnl_nlmsg_batch_start(buf, MNL_SOCKET_BUFFER_SIZE); if (batch == NULL) { G_LOG(ERR, "Failed to allocate a batch for a GETADDR reply\n"); *err = -ENOMEM; goto out; } *err = rd_getaddr_iface(cps_conf, batch, &net_conf->front, family, kni_get_ifindex(&cps_conf->front_kni), req->nlmsg_seq, req->nlmsg_pid); if (*err < 0) goto free_batch; if (net_conf->back_iface_enabled) { *err = rd_getaddr_iface(cps_conf, batch, &net_conf->back, family, kni_get_ifindex(&cps_conf->back_kni), req->nlmsg_seq, req->nlmsg_pid); if (*err < 0) goto free_batch; } *err = rd_send_batch(cps_conf, batch, family_str, req->nlmsg_seq, req->nlmsg_pid, true); free_batch: mnl_nlmsg_batch_stop(batch); out: return MNL_CB_OK; } static int rd_cb(const struct nlmsghdr *req, void *arg) { struct cps_config *cps_conf = arg; int ret = MNL_CB_OK; int err; /* Only requests should be received here. */ if (!(req->nlmsg_flags & NLM_F_REQUEST)) { err = -EINVAL; goto out; } switch (req->nlmsg_type) { case RTM_NEWROUTE: /* FALLTHROUGH */ case RTM_DELROUTE: ret = rd_modroute(req, cps_conf, &err); break; case RTM_GETROUTE: ret = rd_getroute(req, cps_conf, &err); break; case RTM_GETLINK: ret = rd_getlink(req, cps_conf, &err); break; case RTM_GETADDR: ret = rd_getaddr(req, cps_conf, &err); break; default: G_LOG(NOTICE, "Unrecognized netlink message type: %u\n", req->nlmsg_type); err = -EOPNOTSUPP; break; } out: if ((req->nlmsg_flags & NLM_F_ACK) || err) rd_send_err(req, cps_conf, err); return ret; } /* * Receive a netlink message with the ability to pass flags to recvmsg(). * This function is an adaptation of mnl_socket_recvfrom() from * path_to_url#n263, which does * not allow flags. */ static ssize_t mnl_socket_recvfrom_flags(const struct mnl_socket *nl, void *buf, size_t bufsiz, int flags) { ssize_t ret; struct sockaddr_nl addr; struct iovec iov = { .iov_base = buf, .iov_len = bufsiz, }; struct msghdr msg = { .msg_name = &addr, .msg_namelen = sizeof(struct sockaddr_nl), .msg_iov = &iov, .msg_iovlen = 1, .msg_control = NULL, .msg_controllen = 0, .msg_flags = 0, }; ret = recvmsg(mnl_socket_get_fd(nl), &msg, flags); if (ret == -1) return ret; if (msg.msg_flags & MSG_TRUNC) { errno = ENOSPC; return -1; } if (msg.msg_namelen != sizeof(struct sockaddr_nl)) { errno = EINVAL; return -1; } return ret; } void rd_process_events(struct cps_config *cps_conf) { coro_transfer(&cps_conf->coro_root, &cps_conf->coro_rd); } static void __rd_process_events(struct cps_config *cps_conf) { unsigned int update_pkts = cps_conf->max_rt_update_pkts; do { char buf[MNL_SOCKET_BUFFER_SIZE]; int ret = mnl_socket_recvfrom_flags(cps_conf->rd_nl, buf, sizeof(buf), MSG_DONTWAIT); if (ret == -1) { if (errno != EAGAIN && errno != EWOULDBLOCK) G_LOG(ERR, "%s: recv: %s\n", __func__, strerror(errno)); break; } ret = mnl_cb_run(buf, ret, 0, 0, rd_cb, cps_conf); if (ret != MNL_CB_OK) break; update_pkts--; } while (update_pkts > 0); } static void cps_co_rd_main(void *arg) { struct cps_config *cps_conf = arg; while (true) { __rd_process_events(cps_conf); rd_yield(cps_conf); } rte_panic("%s() terminated\n", __func__); } int rd_alloc_coro(struct cps_config *cps_conf) { const unsigned int stack_size_byte = 1024 * 1024; /* 1MB */ const unsigned int stack_size_ptr = stack_size_byte / sizeof(void *); if (unlikely(coro_stack_alloc(&cps_conf->coro_rd_stack, stack_size_ptr) != 1)) { G_LOG(ERR, "Failed to allocate stack for RD coroutine\n"); return -1; } coro_create(&cps_conf->coro_root, NULL, NULL, NULL, 0); coro_create(&cps_conf->coro_rd, cps_co_rd_main, cps_conf, cps_conf->coro_rd_stack.sptr, cps_conf->coro_rd_stack.ssze); return 0; } void rd_free_coro(struct cps_config *cps_conf) { coro_destroy(&cps_conf->coro_rd); coro_destroy(&cps_conf->coro_root); coro_stack_free(&cps_conf->coro_rd_stack); } ```
/content/code_sandbox/cps/rd.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
11,636
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* This BPF program mimics the state GK_DECLINED of a flow entry. */ #include <stdint.h> #include <rte_common.h> #include "gatekeeper_flow_bpf.h" SEC("init") uint64_t declined_init(struct gk_bpf_init_ctx *ctx) { RTE_SET_USED(ctx); return GK_BPF_INIT_RET_OK; } SEC("pkt") uint64_t declined_pkt(struct gk_bpf_pkt_ctx *ctx) { RTE_SET_USED(ctx); return GK_BPF_PKT_RET_DECLINE; } ```
/content/code_sandbox/bpf/declined.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
209
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* * This BPF program limits traffic with two rate limits: * primary and secondary limits or channels. * All traffic is subject to the primary limit. * Traffic that fits the primary limit, but is not desirable * (e.g. fragmented packets) is subject to the second limit. */ #include <arpa/inet.h> #include "grantedv2.h" SEC("init") uint64_t grantedv2_init(struct gk_bpf_init_ctx *ctx) { return grantedv2_init_inline(ctx); } SEC("pkt") uint64_t grantedv2_pkt(struct gk_bpf_pkt_ctx *ctx) { struct grantedv2_state *state = (struct grantedv2_state *)pkt_ctx_to_cookie(ctx); uint32_t pkt_len = pkt_ctx_to_pkt(ctx)->pkt_len; uint64_t ret = grantedv2_pkt_begin(ctx, state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; if (ctx->fragmented || (ctx->l4_proto != IPPROTO_UDP && ctx->l4_proto != IPPROTO_TCP)) { /* Secondary budget. */ ret = grantedv2_pkt_test_2nd_limit(state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; } return grantedv2_pkt_end(ctx, state); } ```
/content/code_sandbox/bpf/grantedv2.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
378
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _GRANTEDV2_H_ #define _GRANTEDV2_H_ #include <stdint.h> #include <stdbool.h> #include <rte_common.h> #include <rte_mbuf_core.h> #include <rte_branch_prediction.h> #include "gatekeeper_flow_bpf.h" struct grantedv2_params { /* * Primary rate limit: kibibyte/second. * This limit can never be exceeded. */ uint32_t tx1_rate_kib_sec; /* * Secondary rate limit: kibibyte/second. * This limit only applies to some part of the traffic. * * The traffic subject to the secondary rate limit is traffic that * is allowed, but at a lower limit. * * When tx2_rate_kib_sec >= tx1_rate_kib_sec, it has no effect. */ uint32_t tx2_rate_kib_sec; /* * The first value of send_next_renewal_at at * flow entry comes from next_renewal_ms. */ uint32_t next_renewal_ms; /* * How many milliseconds (unit) GK must wait * before sending the next capability renewal * request. */ uint32_t renewal_step_ms; /* * If true, do not encapsulate granted packets and send them directly * to their destinations whenever it is possible. * When a capability is expiring, its granted packets must go * through Grantor servers. */ bool direct_if_possible; } __attribute__ ((packed)); struct grantedv2_state { /* When @budget_byte is reset. */ uint64_t budget_renew_at; /* * When @budget1_byte is reset, * add @tx1_rate_kib_cycle * 1024 bytes to it. */ uint32_t tx1_rate_kib_cycle; /* * When @budget2_byte is reset, * reset it to @tx2_rate_kib_cycle * 1024 bytes. */ uint32_t tx2_rate_kib_cycle; /* How many bytes @src can still send in current cycle. */ int64_t budget1_byte; /* * How many bytes @src can still send in current cycle in * the secondary channel. */ int64_t budget2_byte; /* * When GK should send the next renewal to * the corresponding grantor. */ uint64_t send_next_renewal_at; /* * How many cycles (unit) GK must wait before * sending the next capability renewal request. */ uint64_t renewal_step_cycle; /* * If true, do not encapsulate granted packets and send them directly * to their destinations whenever it is possible. */ bool direct_if_possible; }; static inline uint64_t grantedv2_init_inline(struct gk_bpf_init_ctx *ctx) { struct gk_bpf_cookie *cookie = init_ctx_to_cookie(ctx); struct grantedv2_params params = *(struct grantedv2_params *)cookie; struct grantedv2_state *state = (struct grantedv2_state *)cookie; RTE_BUILD_BUG_ON(sizeof(params) > sizeof(*cookie)); RTE_BUILD_BUG_ON(sizeof(*state) > sizeof(*cookie)); state->budget_renew_at = ctx->now + cycles_per_sec; state->tx1_rate_kib_cycle = params.tx1_rate_kib_sec; state->tx2_rate_kib_cycle = params.tx2_rate_kib_sec; state->budget1_byte = (int64_t)params.tx1_rate_kib_sec * 1024; state->budget2_byte = (int64_t)params.tx2_rate_kib_sec * 1024; state->send_next_renewal_at = ctx->now + params.next_renewal_ms * cycles_per_ms; state->renewal_step_cycle = params.renewal_step_ms * cycles_per_ms; state->direct_if_possible = params.direct_if_possible; return GK_BPF_INIT_RET_OK; } static inline uint64_t grantedv2_pkt_begin(const struct gk_bpf_pkt_ctx *ctx, struct grantedv2_state *state, uint32_t pkt_len) { if (unlikely(ctx->now >= state->budget_renew_at)) { int64_t max_budget1 = (int64_t)state->tx1_rate_kib_cycle * 1024; int64_t cycles = ctx->now - state->budget_renew_at; int64_t epochs = cycles / cycles_per_sec; state->budget_renew_at = ctx->now + cycles_per_sec - (cycles % cycles_per_sec); state->budget1_byte += max_budget1 * (epochs + 1); if (state->budget1_byte > max_budget1) state->budget1_byte = max_budget1; state->budget2_byte = (int64_t)state->tx2_rate_kib_cycle * 1024; } /* Primary budget. */ state->budget1_byte -= pkt_len; if (state->budget1_byte < 0) return GK_BPF_PKT_RET_DECLINE; return GK_BPF_PKT_RET_FORWARD; } static inline uint64_t grantedv2_pkt_test_2nd_limit(struct grantedv2_state *state, uint32_t pkt_len) { state->budget2_byte -= pkt_len; if (state->budget2_byte < 0) return GK_BPF_PKT_RET_DECLINE; return GK_BPF_PKT_RET_FORWARD; } static inline uint64_t grantedv2_pkt_end(struct gk_bpf_pkt_ctx *ctx, struct grantedv2_state *state) { uint8_t priority = PRIORITY_GRANTED; if (unlikely(ctx->now >= state->send_next_renewal_at)) { state->send_next_renewal_at = ctx->now + state->renewal_step_cycle; priority = PRIORITY_RENEW_CAP; } if (unlikely(gk_bpf_prep_for_tx(ctx, priority, state->direct_if_possible) < 0)) return GK_BPF_PKT_RET_ERROR; return GK_BPF_PKT_RET_FORWARD; } #endif /* _GRANTEDV2_H_ */ ```
/content/code_sandbox/bpf/grantedv2.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,409
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* * This BPF program is intended as an example for a simple web server * that runs the services HTTP, HTTPS, SSH, and FTP. * * This BPF program builds upon the BPF program grantedv2, so * there are primary and secondary limits. The secondary limit is applied to * ICMPv4, ICMPv6, fragmented, and SYN packets. */ #include <net/ethernet.h> #include <netinet/tcp.h> #include "grantedv2.h" #include "libicmp.h" SEC("init") uint64_t web_init(struct gk_bpf_init_ctx *ctx) { return grantedv2_init_inline(ctx); } SEC("pkt") uint64_t web_pkt(struct gk_bpf_pkt_ctx *ctx) { struct grantedv2_state *state = (struct grantedv2_state *)pkt_ctx_to_cookie(ctx); struct rte_mbuf *pkt = pkt_ctx_to_pkt(ctx); uint32_t pkt_len = pkt->pkt_len; struct tcphdr *tcp_hdr; uint64_t ret = grantedv2_pkt_begin(ctx, state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) { /* Primary budget exceeded. */ return ret; } /* Allowed L4 protocols. */ switch (ctx->l4_proto) { case IPPROTO_ICMP: ret = check_icmp(ctx, pkt); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; goto secondary_budget; case IPPROTO_ICMPV6: ret = check_icmp6(ctx, pkt); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; goto secondary_budget; case IPPROTO_TCP: break; default: return GK_BPF_PKT_RET_DECLINE; } /* * Only TCP packets from here on. */ if (ctx->fragmented) goto secondary_budget; if (unlikely(pkt->l4_len < sizeof(*tcp_hdr))) { /* Malformed TCP header. */ return GK_BPF_PKT_RET_DECLINE; } tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct tcphdr *, pkt->l2_len + pkt->l3_len); /* * For information on active and passive modes of FTP, * refer to the following page: * path_to_url */ /* Listening sockets. */ switch (ntohs(tcp_hdr->th_dport)) { /* * ATTENTION * These ports must match the one configured in the FTP * daemon. See the following page for an example: * path_to_url */ case 51000 ... 51999: /* FTP data (passive mode) */ case 21: /* FTP command */ case 80: /* HTTP */ case 443: /* HTTPS */ case 22: /* SSH */ if (tcp_hdr->syn) { if (tcp_hdr->ack) { /* Amplification attack. */ return GK_BPF_PKT_RET_DECLINE; } /* Contain SYN floods. */ goto secondary_budget; } break; case 20: /* FTP data (active mode) */ /* * Accept connections of the active mode of FTP originated * from our web server. */ if (tcp_hdr->syn && !tcp_hdr->ack) { /* All listening ports were already tested. */ return GK_BPF_PKT_RET_DECLINE; } break; default: /* Accept connections originated from our web server. */ if (tcp_hdr->syn && !tcp_hdr->ack) { /* All listening ports were already tested. */ return GK_BPF_PKT_RET_DECLINE; } /* Authorized external services. */ switch (ntohs(tcp_hdr->th_sport)) { case 80: /* HTTP */ case 443: /* HTTPS */ break; default: return GK_BPF_PKT_RET_DECLINE; } break; } goto forward; secondary_budget: ret = grantedv2_pkt_test_2nd_limit(state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; forward: return grantedv2_pkt_end(ctx, state); } ```
/content/code_sandbox/bpf/web.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
1,009
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* * This BPF program mimics the state GK_GRANTED of a flow entry. * This program example is useful to use as a starting point to write * more complex programs that need to limit the bandwidth of a flow. */ #include <stdint.h> #include <stdbool.h> #include <rte_common.h> #include <rte_mbuf_core.h> #include <rte_branch_prediction.h> #include "gatekeeper_flow_bpf.h" struct granted_params { /* Rate limit: kibibyte/second. */ uint32_t tx_rate_kib_sec; /* * The first value of send_next_renewal_at at * flow entry comes from next_renewal_ms. */ uint32_t next_renewal_ms; /* * How many milliseconds (unit) GK must wait * before sending the next capability renewal * request. */ uint32_t renewal_step_ms; } __attribute__ ((packed)); struct granted_state { /* When @budget_byte is reset. */ uint64_t budget_renew_at; /* * When @budget_byte is reset, reset it to * @tx_rate_kib_cycle * 1024 bytes. */ uint32_t tx_rate_kib_cycle; /* How many bytes @src can still send in current cycle. */ uint64_t budget_byte; /* * When GK should send the next renewal to * the corresponding grantor. */ uint64_t send_next_renewal_at; /* * How many cycles (unit) GK must wait before * sending the next capability renewal request. */ uint64_t renewal_step_cycle; }; SEC("init") uint64_t granted_init(struct gk_bpf_init_ctx *ctx) { struct gk_bpf_cookie *cookie = init_ctx_to_cookie(ctx); struct granted_params params = *(struct granted_params *)cookie; struct granted_state *state = (struct granted_state *)cookie; RTE_BUILD_BUG_ON(sizeof(params) > sizeof(*cookie)); RTE_BUILD_BUG_ON(sizeof(*state) > sizeof(*cookie)); state->budget_renew_at = ctx->now + cycles_per_sec; state->tx_rate_kib_cycle = params.tx_rate_kib_sec; state->budget_byte = (uint64_t)params.tx_rate_kib_sec * 1024; state->send_next_renewal_at = ctx->now + params.next_renewal_ms * cycles_per_ms; state->renewal_step_cycle = params.renewal_step_ms * cycles_per_ms; return GK_BPF_INIT_RET_OK; } SEC("pkt") uint64_t granted_pkt(struct gk_bpf_pkt_ctx *ctx) { struct granted_state *state = (struct granted_state *)pkt_ctx_to_cookie(ctx); uint32_t pkt_len; uint8_t priority = PRIORITY_GRANTED; if (unlikely(ctx->now >= state->budget_renew_at)) { state->budget_renew_at = ctx->now + cycles_per_sec; state->budget_byte = (uint64_t)state->tx_rate_kib_cycle * 1024; } pkt_len = pkt_ctx_to_pkt(ctx)->pkt_len; if (pkt_len > state->budget_byte) return GK_BPF_PKT_RET_DECLINE; state->budget_byte -= pkt_len; if (unlikely(ctx->now >= state->send_next_renewal_at)) { state->send_next_renewal_at = ctx->now + state->renewal_step_cycle; priority = PRIORITY_RENEW_CAP; } if (unlikely(gk_bpf_prep_for_tx(ctx, priority, false) < 0)) return GK_BPF_PKT_RET_ERROR; return GK_BPF_PKT_RET_FORWARD; } ```
/content/code_sandbox/bpf/granted.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
879
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #include <stdint.h> #include <stdbool.h> #include <stdlib.h> #include <net/ethernet.h> #include <netinet/tcp.h> #include <rte_common.h> #include <rte_mbuf_core.h> #include <rte_branch_prediction.h> #include "gatekeeper_flow_bpf.h" #include "libicmp.h" #define TCPSRV_MAX_NUM_PORTS (12) struct tcpsrv_ports { uint16_t p[TCPSRV_MAX_NUM_PORTS]; }; struct tcpsrv_params { /* * Primary rate limit: kibibyte/second. * This limit can never be exceeded. */ uint32_t tx1_rate_kib_sec; /* * The first value of send_next_renewal_at at * flow entry comes from next_renewal_ms. */ uint32_t next_renewal_ms; /* * How many milliseconds (unit) GK must wait * before sending the next capability renewal request. */ uint32_t renewal_step_ms:24; /* Number of listening ports. */ uint8_t listening_port_count:4; /* Number of remote ports. */ uint8_t remote_port_count:4; /* * The listening ports start at index zero and * go toward index (@listening_port_count - 1). * Whereas the remote ports start at index (TCPSRV_MAX_NUM_PORTS - 1) * and go toward index (TCPSRV_MAX_NUM_PORTS - @remote_port_count). * Each set of ports, namely listening and remote ports, must be * 1. sorted in ascending order according to * how they are laid on the array; * 2. unique. */ struct tcpsrv_ports ports; } __attribute__ ((packed)); struct tcpsrv_state { /* When @budget_byte is reset. */ uint64_t budget_renew_at; /* * When @budget1_byte is reset, * add @tx1_rate_kib_sec * 1024 bytes to it. */ uint32_t tx1_rate_kib_sec; /* * How many milliseconds (unit) GK must wait * before sending the next capability renewal request. */ uint32_t renewal_step_ms:24; /* Number of listening ports. */ uint8_t listening_port_count:4; /* Number of remote ports. */ uint8_t remote_port_count:4; /* How many bytes @src can still send in current cycle. */ int64_t budget1_byte; /* * How many bytes @src can still send in current cycle in * the secondary channel. */ int64_t budget2_byte; /* * When GK should send the next renewal to * the corresponding grantor. */ uint64_t send_next_renewal_at; /* * The listening ports start at index zero and * go toward index (@listening_port_count - 1). * Whereas the remote ports start at index (TCPSRV_MAX_NUM_PORTS - 1) * and go toward index (TCPSRV_MAX_NUM_PORTS - @remote_port_count). * Each set of ports, namely listening and remote ports, must be * 1. sorted in ascending order according to * how they are laid on the array; * 2. unique. */ struct tcpsrv_ports ports; }; static inline int64_t reset_budget1(const struct tcpsrv_state *state) { return (int64_t)state->tx1_rate_kib_sec * 1024; /* 1024 B/KiB */ } static inline void reset_budget2(struct tcpsrv_state *state) { state->budget2_byte = reset_budget1(state) * 5 / 100; /* 5% */ } SEC("init") uint64_t tcpsrv_init(struct gk_bpf_init_ctx *ctx) { struct gk_bpf_cookie *cookie = init_ctx_to_cookie(ctx); struct tcpsrv_params params = *(struct tcpsrv_params *)cookie; struct tcpsrv_state *state = (struct tcpsrv_state *)cookie; RTE_BUILD_BUG_ON(sizeof(params) > sizeof(*cookie)); RTE_BUILD_BUG_ON(sizeof(*state) > sizeof(*cookie)); /* Are the number of ports correct? */ if (unlikely((int)params.listening_port_count + params.remote_port_count > TCPSRV_MAX_NUM_PORTS)) return GK_BPF_INIT_RET_ERROR; state->budget_renew_at = ctx->now + cycles_per_sec; state->tx1_rate_kib_sec = params.tx1_rate_kib_sec; state->renewal_step_ms = params.renewal_step_ms; state->listening_port_count = params.listening_port_count; state->remote_port_count = params.remote_port_count; state->budget1_byte = reset_budget1(state); reset_budget2(state); state->send_next_renewal_at = ctx->now + params.next_renewal_ms * cycles_per_ms; state->ports = params.ports; return GK_BPF_INIT_RET_OK; } static inline uint64_t tcpsrv_pkt_begin(const struct gk_bpf_pkt_ctx *ctx, struct tcpsrv_state *state, uint32_t pkt_len) { if (unlikely(ctx->now >= state->budget_renew_at)) { int64_t max_budget1 = reset_budget1(state); int64_t cycles = ctx->now - state->budget_renew_at; int64_t epochs = cycles / cycles_per_sec; state->budget_renew_at = ctx->now + cycles_per_sec - (cycles % cycles_per_sec); state->budget1_byte += max_budget1 * (epochs + 1); if (state->budget1_byte > max_budget1) state->budget1_byte = max_budget1; reset_budget2(state); } /* Primary budget. */ state->budget1_byte -= pkt_len; if (state->budget1_byte < 0) return GK_BPF_PKT_RET_DECLINE; return GK_BPF_PKT_RET_FORWARD; } static inline uint64_t tcpsrv_pkt_test_2nd_limit(struct tcpsrv_state *state, uint32_t pkt_len) { state->budget2_byte -= pkt_len; if (state->budget2_byte < 0) return GK_BPF_PKT_RET_DECLINE; return GK_BPF_PKT_RET_FORWARD; } static inline uint64_t tcpsrv_pkt_end(struct gk_bpf_pkt_ctx *ctx, struct tcpsrv_state *state) { uint8_t priority = PRIORITY_GRANTED; if (unlikely(ctx->now >= state->send_next_renewal_at)) { state->send_next_renewal_at = ctx->now + state->renewal_step_ms * cycles_per_ms; priority = PRIORITY_RENEW_CAP; } if (unlikely(gk_bpf_prep_for_tx(ctx, priority, true) < 0)) return GK_BPF_PKT_RET_ERROR; return GK_BPF_PKT_RET_FORWARD; } #define TEST_COUNT(count) \ case count: \ if (*ports >= port) \ break #define FORWARD ports++ static inline bool is_port_listed_forward(const uint16_t *ports, uint8_t count, uint16_t port) { RTE_BUILD_BUG_ON(TCPSRV_MAX_NUM_PORTS != 12); switch (count) { TEST_COUNT(12); FORWARD; TEST_COUNT(11); FORWARD; TEST_COUNT(10); FORWARD; TEST_COUNT(9); FORWARD; TEST_COUNT(8); FORWARD; TEST_COUNT(7); FORWARD; TEST_COUNT(6); FORWARD; TEST_COUNT(5); FORWARD; TEST_COUNT(4); FORWARD; TEST_COUNT(3); FORWARD; TEST_COUNT(2); FORWARD; TEST_COUNT(1); default: return false; } return *ports == port; } static inline bool is_listening_port(struct tcpsrv_state *state, uint16_t port_be) { return is_port_listed_forward(&state->ports.p[0], state->listening_port_count, ntohs(port_be)); } #define BACK ports-- static inline bool is_port_listed_back(const uint16_t *ports, uint8_t count, uint16_t port) { RTE_BUILD_BUG_ON(TCPSRV_MAX_NUM_PORTS != 12); switch (count) { TEST_COUNT(12); BACK; TEST_COUNT(11); BACK; TEST_COUNT(10); BACK; TEST_COUNT(9); BACK; TEST_COUNT(8); BACK; TEST_COUNT(7); BACK; TEST_COUNT(6); BACK; TEST_COUNT(5); BACK; TEST_COUNT(4); BACK; TEST_COUNT(3); BACK; TEST_COUNT(2); BACK; TEST_COUNT(1); default: return false; } return *ports == port; } static inline bool is_remote_port(struct tcpsrv_state *state, uint16_t port_be) { return is_port_listed_back(&state->ports.p[TCPSRV_MAX_NUM_PORTS - 1], state->remote_port_count, ntohs(port_be)); } SEC("pkt") uint64_t tcpsrv_pkt(struct gk_bpf_pkt_ctx *ctx) { struct tcpsrv_state *state = (struct tcpsrv_state *)pkt_ctx_to_cookie(ctx); struct rte_mbuf *pkt = pkt_ctx_to_pkt(ctx); uint32_t pkt_len = pkt->pkt_len; struct tcphdr *tcp_hdr; uint64_t ret = tcpsrv_pkt_begin(ctx, state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) { /* Primary budget exceeded. */ return ret; } /* Allowed L4 protocols. */ switch (ctx->l4_proto) { case IPPROTO_ICMP: ret = check_icmp(ctx, pkt); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; goto secondary_budget; case IPPROTO_ICMPV6: ret = check_icmp6(ctx, pkt); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; goto secondary_budget; case IPPROTO_TCP: break; default: return GK_BPF_PKT_RET_DECLINE; } /* * Only TCP packets from here on. */ if (ctx->fragmented) goto secondary_budget; if (unlikely(pkt->l4_len < sizeof(*tcp_hdr))) { /* Malformed TCP header. */ return GK_BPF_PKT_RET_DECLINE; } tcp_hdr = rte_pktmbuf_mtod_offset(pkt, struct tcphdr *, pkt->l2_len + pkt->l3_len); if (is_listening_port(state, tcp_hdr->th_dport)) { if (tcp_hdr->syn) { if (tcp_hdr->ack) { /* Amplification attack. */ return GK_BPF_PKT_RET_DECLINE; } /* Contain SYN floods. */ goto secondary_budget; } } else { /* Accept connections originated from the destination. */ if (tcp_hdr->syn && !tcp_hdr->ack) { /* All listening ports were already tested. */ return GK_BPF_PKT_RET_DECLINE; } /* Authorized external services. */ if (!is_remote_port(state, tcp_hdr->th_sport)) return GK_BPF_PKT_RET_DECLINE; } goto forward; secondary_budget: ret = tcpsrv_pkt_test_2nd_limit(state, pkt_len); if (ret != GK_BPF_PKT_RET_FORWARD) return ret; forward: return tcpsrv_pkt_end(ctx, state); } ```
/content/code_sandbox/bpf/tcp-services.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
2,569
```objective-c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ #ifndef _LIBICMP_H_ #define _LIBICMP_H_ #include <net/ethernet.h> #include <netinet/ip_icmp.h> #include <netinet/icmp6.h> #include <rte_mbuf_core.h> #include "gatekeeper_flow_bpf.h" static inline uint64_t check_icmp(struct gk_bpf_pkt_ctx *ctx, struct rte_mbuf *pkt) { struct icmphdr *icmp_hdr; if (unlikely(ctx->l3_proto != ETHERTYPE_IP)) { /* ICMP must be on top of IPv4. */ return GK_BPF_PKT_RET_DECLINE; } if (ctx->fragmented) return GK_BPF_PKT_RET_DECLINE; if (unlikely(pkt->l4_len < sizeof(*icmp_hdr))) { /* Malformed ICMP header. */ return GK_BPF_PKT_RET_DECLINE; } icmp_hdr = rte_pktmbuf_mtod_offset(pkt, struct icmphdr *, pkt->l2_len + pkt->l3_len); switch (icmp_hdr->type) { case ICMP_ECHOREPLY: case ICMP_DEST_UNREACH: case ICMP_SOURCE_QUENCH: case ICMP_ECHO: case ICMP_TIME_EXCEEDED: break; default: return GK_BPF_PKT_RET_DECLINE; } return GK_BPF_PKT_RET_FORWARD; } static inline uint64_t check_icmp6(struct gk_bpf_pkt_ctx *ctx, struct rte_mbuf *pkt) { struct icmp6_hdr *icmp6_hdr; if (unlikely(ctx->l3_proto != ETHERTYPE_IPV6)) { /* ICMPv6 must be on top of IPv6. */ return GK_BPF_PKT_RET_DECLINE; } if (ctx->fragmented) return GK_BPF_PKT_RET_DECLINE; if (unlikely(pkt->l4_len < sizeof(*icmp6_hdr))) { /* Malformed ICMPv6 header. */ return GK_BPF_PKT_RET_DECLINE; } icmp6_hdr = rte_pktmbuf_mtod_offset(pkt, struct icmp6_hdr *, pkt->l2_len + pkt->l3_len); switch (icmp6_hdr->icmp6_type) { case ICMP6_DST_UNREACH: case ICMP6_PACKET_TOO_BIG: case ICMP6_TIME_EXCEEDED: case ICMP6_PARAM_PROB: case ICMP6_ECHO_REQUEST: case ICMP6_ECHO_REPLY: break; default: return GK_BPF_PKT_RET_DECLINE; } return GK_BPF_PKT_RET_FORWARD; } #endif /* _LIBICMP_H_ */ ```
/content/code_sandbox/bpf/libicmp.h
objective-c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
636
```c /* * Gatekeeper - DDoS protection system. * * This program is free software: you can redistribute it and/or modify * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * along with this program. If not, see <path_to_url */ /* For gettid(). */ #define _GNU_SOURCE #include <stdbool.h> #include <unistd.h> #include <rte_ip.h> #include <rte_udp.h> #include <rte_log.h> #include <rte_lcore.h> #include <rte_malloc.h> #include <rte_ethdev.h> #include <rte_atomic.h> #include "gatekeeper_ggu.h" #include "gatekeeper_gk.h" #include "gatekeeper_main.h" #include "gatekeeper_config.h" #include "gatekeeper_launch.h" #include "gatekeeper_l2.h" #include "gatekeeper_varip.h" #include "gatekeeper_log_ratelimit.h" static struct ggu_config *ggu_conf; static void process_single_policy(struct ggu_policy *policy, void *arg) { const struct ggu_config *ggu_conf = arg; uint32_t flow_hash_val = rss_flow_hash(&ggu_conf->net->front, &policy->flow); struct gk_cmd_entry *entry; /* * Obtain mailbox of that GK block, * and send the policy decision to the GK block. */ struct mailbox *mb = get_responsible_gk_mailbox(flow_hash_val, ggu_conf->gk); if (mb == NULL) return; entry = mb_alloc_entry(mb); if (entry == NULL) return; entry->op = GK_ADD_POLICY_DECISION; entry->u.ggu.flow_hash_val = flow_hash_val; entry->u.ggu.policy.state = policy->state; rte_memcpy(&entry->u.ggu.policy.flow, &policy->flow, sizeof(entry->u.ggu.policy.flow)); switch (policy->state) { case GK_GRANTED: entry->u.ggu.policy.params.granted = policy->params.granted; break; case GK_DECLINED: entry->u.ggu.policy.params.declined = policy->params.declined; break; case GK_BPF: if (gk_init_bpf_cookie(ggu_conf->gk, policy->params.bpf.program_index, &policy->params.bpf.cookie) < 0) goto error; /* * After calling gk_init_bpf_cookie(), * the whole cookie may be used. */ policy->params.bpf.cookie_len = sizeof(policy->params.bpf.cookie); entry->u.ggu.policy.params.bpf = policy->params.bpf; break; default: G_LOG(ERR, "%s(): unknown policy state %hhu\n", __func__, policy->state); goto error; } mb_send_entry(mb, entry); return; error: mb_free_entry(mb, entry); } void ggu_policy_iterator(struct ggu_decision *ggu_decision, unsigned int decision_list_len, ggu_policy_fn policy_fn, void *policy_arg) { while (decision_list_len >= sizeof(*ggu_decision)) { struct ggu_policy policy; uint8_t decision_type = ggu_decision->type; size_t decision_len = sizeof(*ggu_decision); size_t params_offset; if (ggu_decision->res1 != 0 || ggu_decision->res2 != 0) { G_LOG(NOTICE, "%s(): reserved fields of GGU decisions should be 0 but are %hhu and %hu\n", __func__, ggu_decision->res1, rte_be_to_cpu_16(ggu_decision->res2)); return; } /* Verify decision length and read in flow information. */ switch (decision_type) { case GGU_DEC_IPV4_DECLINED: decision_len += sizeof(policy.flow.f.v4) + sizeof(policy.params.declined); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv4 declined decision\n", __func__); return; } policy.state = GK_DECLINED; policy.flow.proto = RTE_ETHER_TYPE_IPV4; rte_memcpy(&policy.flow.f.v4, ggu_decision->ip_flow, sizeof(policy.flow.f.v4)); params_offset = sizeof(policy.flow.f.v4); break; case GGU_DEC_IPV6_DECLINED: decision_len += sizeof(policy.flow.f.v6) + sizeof(policy.params.declined); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv6 declined decision\n", __func__); return; } policy.state = GK_DECLINED; policy.flow.proto = RTE_ETHER_TYPE_IPV6; rte_memcpy(&policy.flow.f.v6, ggu_decision->ip_flow, sizeof(policy.flow.f.v6)); params_offset = sizeof(policy.flow.f.v6); break; case GGU_DEC_IPV4_GRANTED: decision_len += sizeof(policy.flow.f.v4) + sizeof(policy.params.granted); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv4 granted decision\n", __func__); return; } policy.state = GK_GRANTED; policy.flow.proto = RTE_ETHER_TYPE_IPV4; rte_memcpy(&policy.flow.f.v4, ggu_decision->ip_flow, sizeof(policy.flow.f.v4)); params_offset = sizeof(policy.flow.f.v4); break; case GGU_DEC_IPV6_GRANTED: decision_len += sizeof(policy.flow.f.v6) + sizeof(policy.params.granted); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv6 granted decision\n", __func__); return; } policy.state = GK_GRANTED; policy.flow.proto = RTE_ETHER_TYPE_IPV6; rte_memcpy(&policy.flow.f.v6, ggu_decision->ip_flow, sizeof(policy.flow.f.v6)); params_offset = sizeof(policy.flow.f.v6); break; case GGU_DEC_IPV4_BPF: decision_len += sizeof(policy.flow.f.v4) + sizeof(struct ggu_bpf_wire); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv4 BPF decision\n", __func__); return; } policy.state = GK_BPF; policy.flow.proto = RTE_ETHER_TYPE_IPV4; rte_memcpy(&policy.flow.f.v4, ggu_decision->ip_flow, sizeof(policy.flow.f.v4)); params_offset = sizeof(policy.flow.f.v4); break; case GGU_DEC_IPV6_BPF: decision_len += sizeof(policy.flow.f.v6) + sizeof(struct ggu_bpf_wire); if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed IPv6 BPF decision\n", __func__); return; } policy.state = GK_BPF; policy.flow.proto = RTE_ETHER_TYPE_IPV6; rte_memcpy(&policy.flow.f.v6, ggu_decision->ip_flow, sizeof(policy.flow.f.v6)); params_offset = sizeof(policy.flow.f.v6); break; default: G_LOG(WARNING, "%s(): unexpected decision type: %hu\n", __func__, decision_type); return; } /* Read in decision parameters. */ switch (decision_type) { case GGU_DEC_IPV4_GRANTED: /* FALLTHROUGH */ case GGU_DEC_IPV6_GRANTED: { struct ggu_granted *granted_be = (struct ggu_granted *) (ggu_decision->ip_flow + params_offset); policy.params.granted.tx_rate_kib_sec = rte_be_to_cpu_32(granted_be->tx_rate_kib_sec); policy.params.granted.cap_expire_sec = rte_be_to_cpu_32(granted_be->cap_expire_sec); policy.params.granted.next_renewal_ms = rte_be_to_cpu_32(granted_be->next_renewal_ms); policy.params.granted.renewal_step_ms = rte_be_to_cpu_32(granted_be->renewal_step_ms); break; } case GGU_DEC_IPV4_DECLINED: /* FALLTHROUGH */ case GGU_DEC_IPV6_DECLINED: { struct ggu_declined *declined_be = (struct ggu_declined *) (ggu_decision->ip_flow + params_offset); policy.params.declined.expire_sec = rte_be_to_cpu_32(declined_be->expire_sec); break; } case GGU_DEC_IPV4_BPF: /* FALLTHROUGH */ case GGU_DEC_IPV6_BPF: { struct ggu_bpf_wire *bpf_wire_be = (struct ggu_bpf_wire *) (ggu_decision->ip_flow + params_offset); unsigned int cookie_len; if (bpf_wire_be->reserved != 0) { G_LOG(WARNING, "%s(): malformed BPF decision, reserved=%u\n", __func__, bpf_wire_be->reserved); return; } cookie_len = 4 * bpf_wire_be->cookie_len_4by; if (cookie_len > sizeof(struct gk_bpf_cookie)) { G_LOG(WARNING, "%s(): malformed BPF decision, cookie_len=%u\n", __func__, cookie_len); return; } decision_len += cookie_len; if (decision_list_len < decision_len) { G_LOG(WARNING, "%s(): malformed BPF decision (too short)\n", __func__); return; } policy.params.bpf.expire_sec = rte_be_to_cpu_32(bpf_wire_be->expire_sec); policy.params.bpf.program_index = bpf_wire_be->program_index; policy.params.bpf.reserved = 0; policy.params.bpf.cookie_len = cookie_len; /* * Byte order is responsibility of the init function * of the GK BPF program. */ rte_memcpy(&policy.params.bpf.cookie, bpf_wire_be->cookie, cookie_len); memset(((uint8_t *)&policy.params.bpf.cookie) + cookie_len, 0, sizeof(policy.params.bpf.cookie) - cookie_len); break; } default: rte_panic("%s(): found an unknown decision type after previously verifying it: %hhu\n", __func__, decision_type); } policy_fn(&policy, policy_arg); ggu_decision = (struct ggu_decision *) (((uint8_t *)ggu_decision) + decision_len); decision_list_len -= decision_len; } if (decision_list_len != 0) { G_LOG(WARNING, "%s(): notification packet had partial decision list\n", __func__); } } static void process_single_packet(struct rte_mbuf *pkt, struct ggu_config *ggu_conf) { uint16_t ether_type; struct rte_ether_hdr *eth_hdr; void *l3_hdr; struct rte_udp_hdr *udphdr; uint16_t pkt_udp_checksum, cal_udp_checksum; struct ggu_common_hdr *gguhdr; struct ggu_decision *ggu_decision; uint16_t real_payload_len; uint16_t expected_payload_len; uint16_t decision_list_len; struct gatekeeper_if *back = &ggu_conf->net->back; uint16_t minimum_size; size_t l2_len; int l3_len; eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); ether_type = rte_be_to_cpu_16(pkt_in_skip_l2(pkt, eth_hdr, &l3_hdr)); l2_len = pkt_in_l2_hdr_len(pkt); minimum_size = l2_len; switch (ether_type) { case RTE_ETHER_TYPE_IPV4: { struct rte_ipv4_hdr *ip4hdr; minimum_size += sizeof(struct rte_ipv4_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct ggu_common_hdr); if (pkt->data_len < minimum_size) { G_LOG(NOTICE, "%s(): the IPv4 packet's actual size is %hu, which doesn't have the minimum expected size %hu\n", __func__, pkt->data_len, minimum_size); goto free_packet; } ip4hdr = l3_hdr; if (ip4hdr->next_proto_id != IPPROTO_UDP) { G_LOG(ERR, "%s(): received non-UDP packets, IPv4 filter bug\n", __func__); goto free_packet; } if (ip4hdr->dst_addr != back->ip4_addr.s_addr) { G_LOG(ERR, "%s(): received packets not destined to the Gatekeeper server, IPv4 filter bug\n", __func__); goto free_packet; } if (rte_ipv4_frag_pkt_is_fragmented(ip4hdr)) { G_LOG(WARNING, "%s(): received IPv4 fragmented packets destined to the Gatekeeper server\n", __func__); goto free_packet; } l3_len = ipv4_hdr_len(ip4hdr); /* * Base IPv4 header length was already accounted for, * so add in any extra bytes from extension header(s). */ minimum_size += l3_len - sizeof(*ip4hdr); if (pkt->data_len < minimum_size) { G_LOG(NOTICE, "%s(): the IPv4 packet's actual size is %hu, which doesn't have the minimum expected size %hu\n", __func__, pkt->data_len, minimum_size); goto free_packet; } /* * The ntuple filter/ACL supports IPv4 variable headers. * The following code parses IPv4 variable headers. */ udphdr = (struct rte_udp_hdr *)ipv4_skip_exthdr(ip4hdr); break; } case RTE_ETHER_TYPE_IPV6: { struct rte_ipv6_hdr *ip6hdr; uint8_t nexthdr; minimum_size += sizeof(struct rte_ipv6_hdr) + sizeof(struct rte_udp_hdr) + sizeof(struct ggu_common_hdr); if (pkt->data_len < minimum_size) { G_LOG(NOTICE, "%s(): the IPv6 packet's actual size is %hu, which doesn't have the minimum expected size %hu\n", __func__, pkt->data_len, minimum_size); goto free_packet; } /* * The ntuple filter/ACL supports IPv6 variable headers. * The following code parses IPv6 variable headers. */ ip6hdr = l3_hdr; if (rte_ipv6_frag_get_ipv6_fragment_header(ip6hdr) != NULL) { G_LOG(WARNING, "%s(): received IPv6 fragmented packets destined to the Gatekeeper server\n", __func__); goto free_packet; } l3_len = ipv6_skip_exthdr(ip6hdr, pkt->data_len - l2_len, &nexthdr); if (l3_len < 0) { G_LOG(ERR, "%s(): failed to parse the IPv6 packet's extension headers\n", __func__); goto free_packet; } if (nexthdr != IPPROTO_UDP) { G_LOG(ERR, "%s(): received non-UDP packets, IPv6 filter bug\n", __func__); goto free_packet; } /* * Base IPv6 header length was already accounted for, * so add in any extra bytes from extension header(s). */ minimum_size += l3_len - sizeof(*ip6hdr); if (pkt->data_len < minimum_size) { G_LOG(NOTICE, "%s(): the IPv6 packet's actual size is %hu, which doesn't have the minimum expected size %hu\n", __func__, pkt->data_len, minimum_size); goto free_packet; } udphdr = (struct rte_udp_hdr *)((uint8_t *)ip6hdr + l3_len); break; } default: G_LOG(NOTICE, "%s(): unknown network layer protocol %hu\n", __func__, ether_type); goto free_packet; break; } if (udphdr->src_port != ggu_conf->ggu_src_port || udphdr->dst_port != ggu_conf->ggu_dst_port) { G_LOG(ERR, "%s(): unknown UDP src port %hu, dst port %hu, filter bug\n", __func__, rte_be_to_cpu_16(udphdr->src_port), rte_be_to_cpu_16(udphdr->dst_port)); goto free_packet; } real_payload_len = pkt->data_len - l2_len - l3_len; expected_payload_len = rte_be_to_cpu_16(udphdr->dgram_len); if (real_payload_len != expected_payload_len) { G_LOG(NOTICE, "%s(): the size (%hu) of the payload available in the UDP header doesn't match the expected size (%hu)\n", __func__, real_payload_len, expected_payload_len); goto free_packet; } pkt_udp_checksum = udphdr->dgram_cksum; udphdr->dgram_cksum = 0; if (ether_type == RTE_ETHER_TYPE_IPV4) { cal_udp_checksum = rte_ipv4_udptcp_cksum(l3_hdr, udphdr); if (pkt_udp_checksum != cal_udp_checksum) { G_LOG(ERR, "%s(): the IPv4 packet's UDP checksum (%hu) doesn't match the calculated checksum (%hu)\n", __func__, pkt_udp_checksum, cal_udp_checksum); goto free_packet; } } else { cal_udp_checksum = rte_ipv6_udptcp_cksum(l3_hdr, udphdr); if (pkt_udp_checksum != cal_udp_checksum) { G_LOG(ERR, "%s(): the IPv6 packet's UDP checksum (%hu) doesn't match the calculated checksum (%hu)\n", __func__, pkt_udp_checksum, cal_udp_checksum); goto free_packet; } } gguhdr = (struct ggu_common_hdr *)&udphdr[1]; if (gguhdr->version != GGU_PD_VER) { G_LOG(NOTICE, "%s(): unknown policy decision format %hhu\n", __func__, gguhdr->version); goto free_packet; } if (gguhdr->res1 != 0 || gguhdr->res2 != 0) { G_LOG(NOTICE, "%s(): reserved fields of GGU header should be 0 but are %hhu and %hu\n", __func__, gguhdr->res1, rte_be_to_cpu_16(gguhdr->res2)); goto free_packet; } /* @minimum_size is length of all headers, including GGU. */ decision_list_len = pkt->data_len - minimum_size; ggu_decision = gguhdr->decisions; /* Loop over each policy decision in the packet. */ ggu_policy_iterator(ggu_decision, decision_list_len, process_single_policy, ggu_conf); free_packet: rte_pktmbuf_free(pkt); } /* Information needed to submit GGU packets to the GGU block. */ struct ggu_request { /* Number of packets stored in @pkts. */ unsigned int num_pkts; /* GT-GK Unit packets. */ struct rte_mbuf *pkts[0]; }; static int submit_ggu(struct rte_mbuf **pkts, unsigned int num_pkts, __attribute__((unused)) struct gatekeeper_if *iface) { struct ggu_request *req = mb_alloc_entry(&ggu_conf->mailbox); int ret; RTE_VERIFY(num_pkts <= ggu_conf->mailbox_max_pkt_burst); if (req == NULL) { G_LOG(ERR, "%s: allocation of mailbox message failed\n", __func__); ret = -ENOMEM; goto free_pkts; } req->num_pkts = num_pkts; rte_memcpy(req->pkts, pkts, sizeof(*req->pkts) * num_pkts); ret = mb_send_entry(&ggu_conf->mailbox, req); if (ret < 0) { G_LOG(ERR, "%s: failed to enqueue message to mailbox\n", __func__); goto free_pkts; } return 0; free_pkts: rte_pktmbuf_free_bulk(pkts, num_pkts); return ret; } static void process_back_nic(struct ggu_config *ggu_conf, uint16_t port_in, uint16_t rx_queue, uint16_t max_pkt_burst) { struct rte_mbuf *bufs[max_pkt_burst]; uint16_t num_rx = rte_eth_rx_burst(port_in, rx_queue, bufs, max_pkt_burst); unsigned int i; if (unlikely(num_rx == 0)) return; for (i = 0; i < num_rx; i++) process_single_packet(bufs[i], ggu_conf); } static void process_mb(struct ggu_config *ggu_conf) { unsigned int mailbox_burst_size = ggu_conf->mailbox_burst_size; struct ggu_request *reqs[mailbox_burst_size]; unsigned int num_reqs = mb_dequeue_burst(&ggu_conf->mailbox, (void **)reqs, mailbox_burst_size); unsigned int i; if (unlikely(num_reqs == 0)) return; for (i = 0; i < num_reqs; i++) { unsigned int j; for (j = 0; j < reqs[i]->num_pkts; j++) process_single_packet(reqs[i]->pkts[j], ggu_conf); } mb_free_entry_bulk(&ggu_conf->mailbox, (void * const *)reqs, num_reqs); } static int ggu_proc(void *arg) { struct ggu_config *ggu_conf = arg; uint16_t port_in = ggu_conf->net->back.id; uint16_t rx_queue = ggu_conf->rx_queue_back; uint16_t max_pkt_burst = ggu_conf->max_pkt_burst; G_LOG(NOTICE, "The GT-GK unit is running at tid = %u\n", gettid()); if (needed_caps(0, NULL) < 0) { G_LOG(ERR, "Could not set needed capabilities\n"); exiting = true; } /* * Load sets of GT-GK packets from the back NIC * or from the GGU mailbox. */ while (likely(!exiting)) { if (ggu_conf->rx_method_back & RX_METHOD_NIC) { process_back_nic(ggu_conf, port_in, rx_queue, max_pkt_burst); } if (ggu_conf->rx_method_back & RX_METHOD_MB) process_mb(ggu_conf); } G_LOG(NOTICE, "The GT-GK unit is exiting\n"); return cleanup_ggu(ggu_conf); } static int ggu_stage1(void *arg) { struct ggu_config *ggu_conf = arg; int ret; /* * GGU should only get its own RX queue if RSS is enabled, * even if ntuple filter is not enabled. * * If RSS is disabled, then the network configuration can * tell that it should ignore all other blocks' requests * for queues and just allocate one RX queue. * * If RSS is enabled, then GGU has already informed the * network configuration that it will be using a queue. * The network configuration will crash if GGU doesn't * configure that queue, so it still should, even if * ntuple filter is not supported and GGU will not use it. */ if (ggu_conf->net->back.rss) { unsigned int num_mbuf = calculate_mempool_config_para("ggu", ggu_conf->net, ggu_conf->total_pkt_burst); ggu_conf->mp = create_pktmbuf_pool("ggu", ggu_conf->lcore_id, num_mbuf); if (ggu_conf->mp == NULL) return -1; ret = get_queue_id(&ggu_conf->net->back, QUEUE_TYPE_RX, ggu_conf->lcore_id, ggu_conf->mp); if (ret < 0) { G_LOG(ERR, "Cannot assign an RX queue for the back interface for lcore %u\n", ggu_conf->lcore_id); return ret; } ggu_conf->rx_queue_back = ret; } return 0; } static int ggu_stage2(void *arg) { struct ggu_config *ggu_conf = arg; int ret; /* * Setup the filters that assign the GT-GK packets * to its queue for both IPv4 and IPv6 addresses. * Packets using the GGU protocol don't have variable * length headers, and therefore we don't need a match * function when calling ipv{4,6}_pkt_filter_add(). */ if (ipv4_if_configured(&ggu_conf->net->back)) { /* * Note that the IP address, ports, and masks * are all in big endian ordering as required. */ ret = ipv4_pkt_filter_add(&ggu_conf->net->back, ggu_conf->net->back.ip4_addr.s_addr, ggu_conf->ggu_src_port, UINT16_MAX, ggu_conf->ggu_dst_port, UINT16_MAX, IPPROTO_UDP, ggu_conf->rx_queue_back, submit_ggu, NULL, &ggu_conf->rx_method_back); if (ret < 0) { G_LOG(ERR, "Could not configure IPv4 filter for GGU packets\n"); return ret; } } if (ipv6_if_configured(&ggu_conf->net->back)) { /* * Note that the IP address, ports, and masks * are all in big endian ordering as required. */ ret = ipv6_pkt_filter_add(&ggu_conf->net->back, (rte_be32_t *)&ggu_conf->net->back.ip6_addr.s6_addr, ggu_conf->ggu_src_port, UINT16_MAX, ggu_conf->ggu_dst_port, UINT16_MAX, IPPROTO_UDP, ggu_conf->rx_queue_back, submit_ggu, NULL, &ggu_conf->rx_method_back); if (ret < 0) { G_LOG(ERR, "Could not configure IPv6 filter for GGU packets\n"); return ret; } } return 0; } int run_ggu(struct net_config *net_conf, struct gk_config *gk_conf, struct ggu_config *ggu_conf) { int ret; uint16_t back_inc; if (ggu_conf == NULL || net_conf == NULL || gk_conf == NULL) { ret = -1; goto out; } if (!net_conf->back_iface_enabled) { G_LOG(ERR, "Back interface is required\n"); ret = -1; goto out; } log_ratelimit_state_init(ggu_conf->lcore_id, ggu_conf->log_ratelimit_interval_ms, ggu_conf->log_ratelimit_burst, ggu_conf->log_level, "GGU"); back_inc = ggu_conf->max_pkt_burst; net_conf->back.total_pkt_burst += back_inc; ggu_conf->total_pkt_burst = back_inc; ret = net_launch_at_stage1(net_conf, 0, 0, 1, 0, ggu_stage1, ggu_conf); if (ret < 0) goto burst; ret = launch_at_stage2(ggu_stage2, ggu_conf); if (ret < 0) goto stage1; ret = launch_at_stage3("ggu", ggu_proc, ggu_conf, ggu_conf->lcore_id); if (ret < 0) goto stage2; ggu_conf->net = net_conf; gk_conf_hold(gk_conf); ggu_conf->gk = gk_conf; /* * Convert port numbers in CPU order to network order * to avoid recomputation for each packet. */ ggu_conf->ggu_src_port = rte_cpu_to_be_16(ggu_conf->ggu_src_port); ggu_conf->ggu_dst_port = rte_cpu_to_be_16(ggu_conf->ggu_dst_port); /* * When mailbox is used for processing packets submitted by GK, * it needs to make sure the packet burst size in the mailbox * should be at least equal to the packet burst size in GK. */ ggu_conf->mailbox_max_pkt_burst = gk_conf->back_max_pkt_burst; ret = init_mailbox("ggu_mb", ggu_conf->mailbox_max_entries_exp, sizeof(struct ggu_request) + ggu_conf->mailbox_max_pkt_burst * sizeof(struct rte_mbuf *), ggu_conf->mailbox_mem_cache_size, ggu_conf->lcore_id, &ggu_conf->mailbox); if (ret < 0) goto put_gk; goto out; put_gk: ggu_conf->gk = NULL; gk_conf_put(gk_conf); /* stage3: */ pop_n_at_stage3(1); stage2: pop_n_at_stage2(1); stage1: pop_n_at_stage1(1); burst: net_conf->back.total_pkt_burst -= back_inc; out: return ret; } /* * There should be only one ggu_config instance. * Return an error if trying to allocate the second instance. */ struct ggu_config * alloc_ggu_conf(unsigned int lcore) { static rte_atomic16_t num_ggu_conf_alloc = RTE_ATOMIC16_INIT(0); if (rte_atomic16_test_and_set(&num_ggu_conf_alloc) == 1) { ggu_conf = rte_calloc_socket("ggu_config", 1, sizeof(struct ggu_config), 0, rte_lcore_to_socket_id(lcore)); if (ggu_conf == NULL) { rte_atomic16_clear(&num_ggu_conf_alloc); G_LOG(ERR, "Failed to allocate the first instance of struct ggu_config\n"); return NULL; } ggu_conf->lcore_id = lcore; return ggu_conf; } else { G_LOG(ERR, "Trying to allocate the second instance of struct ggu_config\n"); return NULL; } } int cleanup_ggu(struct ggu_config *ggu_conf) { destroy_mempool(ggu_conf->mp); destroy_mailbox(&ggu_conf->mailbox); ggu_conf->net = NULL; gk_conf_put(ggu_conf->gk); ggu_conf->gk = NULL; rte_free(ggu_conf); return 0; } ```
/content/code_sandbox/ggu/main.c
c
2016-07-06T18:53:45
2024-08-15T00:24:19
gatekeeper
AltraMayor/gatekeeper
1,280
6,883
```rust // Automatically detect tsan in a way that's compatible with both stable (which // doesn't support sanitizers) and nightly (which does). Works because build // scripts gets `cfg` info, even if the cfg is unstable. fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rustc-check-cfg=cfg(tsan_enabled)"); let santizer_list = std::env::var("CARGO_CFG_SANITIZE").unwrap_or_default(); if santizer_list.contains("thread") { println!("cargo:rustc-cfg=tsan_enabled"); } } ```
/content/code_sandbox/core/build.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
133
```toml status = [ "build_tier_one", "build_other_platforms", ] ```
/content/code_sandbox/bors.toml
toml
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
20
```rust use parking_lot::RwLock; struct Lock(RwLock<i32>); #[test] fn issue_392() { let lock = Lock(RwLock::new(0)); let mut rl = lock.0.upgradable_read(); rl.with_upgraded(|_| { println!("lock upgrade"); }); rl.with_upgraded(|_| { println!("lock upgrade"); }); } ```
/content/code_sandbox/tests/issue_392.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
85
```rust use parking_lot::RwLock; use std::thread; struct Bar(RwLock<()>); impl Drop for Bar { fn drop(&mut self) { let _n = self.0.write(); } } thread_local! { static B: Bar = Bar(RwLock::new(())); } #[test] fn main() { thread::spawn(|| { B.with(|_| ()); let a = RwLock::new(()); let _a = a.read(); }) .join() .unwrap(); } ```
/content/code_sandbox/tests/issue_203.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
114
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. // Option::unchecked_unwrap pub trait UncheckedOptionExt<T> { unsafe fn unchecked_unwrap(self) -> T; } impl<T> UncheckedOptionExt<T> for Option<T> { #[inline] unsafe fn unchecked_unwrap(self) -> T { match self { Some(x) => x, None => unreachable(), } } } // hint::unreachable_unchecked() in release mode #[inline] unsafe fn unreachable() -> ! { if cfg!(debug_assertions) { unreachable!(); } else { core::hint::unreachable_unchecked() } } ```
/content/code_sandbox/core/src/util.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
173
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::thread_parker; use core::hint::spin_loop; // Wastes some CPU time for the given number of iterations, // using a hint to indicate to the CPU that we are spinning. #[inline] fn cpu_relax(iterations: u32) { for _ in 0..iterations { spin_loop() } } /// A counter used to perform exponential backoff in spin loops. #[derive(Default)] pub struct SpinWait { counter: u32, } impl SpinWait { /// Creates a new `SpinWait`. #[inline] pub fn new() -> Self { Self::default() } /// Resets a `SpinWait` to its initial state. #[inline] pub fn reset(&mut self) { self.counter = 0; } /// Spins until the sleep threshold has been reached. /// /// This function returns whether the sleep threshold has been reached, at /// which point further spinning has diminishing returns and the thread /// should be parked instead. /// /// The spin strategy will initially use a CPU-bound loop but will fall back /// to yielding the CPU to the OS after a few iterations. #[inline] pub fn spin(&mut self) -> bool { if self.counter >= 10 { return false; } self.counter += 1; if self.counter <= 3 { cpu_relax(1 << self.counter); } else { thread_parker::thread_yield(); } true } /// Spins without yielding the thread to the OS. /// /// Instead, the backoff is simply capped at a maximum value. This can be /// used to improve throughput in `compare_exchange` loops that have high /// contention. #[inline] pub fn spin_no_yield(&mut self) { self.counter += 1; if self.counter > 10 { self.counter = 10; } cpu_relax(1 << self.counter); } } ```
/content/code_sandbox/core/src/spinwait.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
471
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. //! This library exposes a low-level API for creating your own efficient //! synchronization primitives. //! //! # The parking lot //! //! To keep synchronization primitives small, all thread queuing and suspending //! functionality is offloaded to the *parking lot*. The idea behind this is based //! on the Webkit [`WTF::ParkingLot`](path_to_url //! class, which essentially consists of a hash table mapping of lock addresses //! to queues of parked (sleeping) threads. The Webkit parking lot was itself //! inspired by Linux [futexes](path_to_url //! but it is more powerful since it allows invoking callbacks while holding a //! queue lock. //! //! There are two main operations that can be performed on the parking lot: //! //! - *Parking* refers to suspending the thread while simultaneously enqueuing it //! on a queue keyed by some address. //! - *Unparking* refers to dequeuing a thread from a queue keyed by some address //! and resuming it. //! //! See the documentation of the individual functions for more details. //! //! # Building custom synchronization primitives //! //! Building custom synchronization primitives is very simple since the parking //! lot takes care of all the hard parts for you. A simple example for a //! custom primitive would be to integrate a `Mutex` inside another data type. //! Since a mutex only requires 2 bits, it can share space with other data. //! For example, one could create an `ArcMutex` type that combines the atomic //! reference count and the two mutex bits in the same atomic word. #![warn(missing_docs)] #![warn(rust_2018_idioms)] #![cfg_attr( all(target_env = "sgx", target_vendor = "fortanix"), feature(sgx_platform) )] #![cfg_attr( all( feature = "nightly", target_family = "wasm", target_feature = "atomics" ), feature(stdarch_wasm_atomic_wait) )] mod parking_lot; mod spinwait; mod thread_parker; mod util; mod word_lock; pub use self::parking_lot::deadlock; pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue}; pub use self::parking_lot::{ FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken, }; pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; pub use self::spinwait::SpinWait; ```
/content/code_sandbox/core/src/lib.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
579
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::spinwait::SpinWait; use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT}; use core::{ cell::Cell, mem, ptr, sync::atomic::{fence, AtomicUsize, Ordering}, }; struct ThreadData { parker: ThreadParker, // Linked list of threads in the queue. The queue is split into two parts: // the processed part and the unprocessed part. When new nodes are added to // the list, they only have the next pointer set, and queue_tail is null. // // Nodes are processed with the queue lock held, which consists of setting // the prev pointer for each node and setting the queue_tail pointer on the // first processed node of the list. // // This setup allows nodes to be added to the queue without a lock, while // still allowing O(1) removal of nodes from the processed part of the list. // The only cost is the O(n) processing, but this only needs to be done // once for each node, and therefore isn't too expensive. queue_tail: Cell<*const ThreadData>, prev: Cell<*const ThreadData>, next: Cell<*const ThreadData>, } impl ThreadData { #[inline] fn new() -> ThreadData { assert!(mem::align_of::<ThreadData>() > !QUEUE_MASK); ThreadData { parker: ThreadParker::new(), queue_tail: Cell::new(ptr::null()), prev: Cell::new(ptr::null()), next: Cell::new(ptr::null()), } } } // Invokes the given closure with a reference to the current thread `ThreadData`. #[inline] fn with_thread_data<T>(f: impl FnOnce(&ThreadData) -> T) -> T { let mut thread_data_ptr = ptr::null(); // If ThreadData is expensive to construct, then we want to use a cached // version in thread-local storage if possible. if !ThreadParker::IS_CHEAP_TO_CONSTRUCT { thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) { thread_data_ptr = tls_thread_data; } } // Otherwise just create a ThreadData on the stack let mut thread_data_storage = None; if thread_data_ptr.is_null() { thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new); } f(unsafe { &*thread_data_ptr }) } const LOCKED_BIT: usize = 1; const QUEUE_LOCKED_BIT: usize = 2; const QUEUE_MASK: usize = !3; // Word-sized lock that is used to implement the parking_lot API. Since this // can't use parking_lot, it instead manages its own queue of waiting threads. pub struct WordLock { state: AtomicUsize, } impl WordLock { /// Returns a new, unlocked, `WordLock`. pub const fn new() -> Self { WordLock { state: AtomicUsize::new(0), } } #[inline] pub fn lock(&self) { if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { return; } self.lock_slow(); } /// Must not be called on an already unlocked `WordLock`! #[inline] pub unsafe fn unlock(&self) { let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release); if state.is_queue_locked() || state.queue_head().is_null() { return; } self.unlock_slow(); } #[cold] fn lock_slow(&self) { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there is a queue on it if !state.is_locked() { match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state.queue_head().is_null() && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Get our thread data and prepare it for parking state = with_thread_data(|thread_data| { // The pthread implementation is still unsafe, so we need to surround `prepare_park` // with `unsafe {}`. #[allow(unused_unsafe)] unsafe { thread_data.parker.prepare_park(); } // Add our thread to the front of the queue let queue_head = state.queue_head(); if queue_head.is_null() { thread_data.queue_tail.set(thread_data); thread_data.prev.set(ptr::null()); } else { thread_data.queue_tail.set(ptr::null()); thread_data.prev.set(ptr::null()); thread_data.next.set(queue_head); } if let Err(x) = self.state.compare_exchange_weak( state, state.with_queue_head(thread_data), Ordering::AcqRel, Ordering::Relaxed, ) { return x; } // Sleep until we are woken up by an unlock // Ignoring unused unsafe, since it's only a few platforms where this is unsafe. #[allow(unused_unsafe)] unsafe { thread_data.parker.park(); } // Loop back and try locking again spinwait.reset(); self.state.load(Ordering::Relaxed) }); } } #[cold] fn unlock_slow(&self) { let mut state = self.state.load(Ordering::Relaxed); loop { // We just unlocked the WordLock. Just check if there is a thread // to wake up. If the queue is locked then another thread is already // taking care of waking up a thread. if state.is_queue_locked() || state.queue_head().is_null() { return; } // Try to grab the queue lock match self.state.compare_exchange_weak( state, state | QUEUE_LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } } // Now we have the queue lock and the queue is non-empty 'outer: loop { // First, we need to fill in the prev pointers for any newly added // threads. We do this until we reach a node that we previously // processed, which has a non-null queue_tail pointer. let queue_head = state.queue_head(); let mut queue_tail; let mut current = queue_head; loop { queue_tail = unsafe { (*current).queue_tail.get() }; if !queue_tail.is_null() { break; } unsafe { let next = (*current).next.get(); (*next).prev.set(current); current = next; } } // Set queue_tail on the queue head to indicate that the whole list // has prev pointers set correctly. unsafe { (*queue_head).queue_tail.set(queue_tail); } // If the WordLock is locked, then there is no point waking up a // thread now. Instead we let the next unlocker take care of waking // up a thread. if state.is_locked() { match self.state.compare_exchange_weak( state, state & !QUEUE_LOCKED_BIT, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } // Need an acquire fence before reading the new queue fence_acquire(&self.state); continue; } // Remove the last thread from the queue and unlock the queue let new_tail = unsafe { (*queue_tail).prev.get() }; if new_tail.is_null() { loop { match self.state.compare_exchange_weak( state, state & LOCKED_BIT, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } // If the compare_exchange failed because a new thread was // added to the queue then we need to re-scan the queue to // find the previous element. if state.queue_head().is_null() { continue; } else { // Need an acquire fence before reading the new queue fence_acquire(&self.state); continue 'outer; } } } else { unsafe { (*queue_head).queue_tail.set(new_tail); } self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release); } // Finally, wake up the thread we removed from the queue. Note that // we don't need to worry about any races here since the thread is // guaranteed to be sleeping right now and we are the only one who // can wake it up. unsafe { (*queue_tail).parker.unpark_lock().unpark(); } break; } } } // Thread-Sanitizer only has partial fence support, so when running under it, we // try and avoid false positives by using a discarded acquire load instead. #[inline] fn fence_acquire(a: &AtomicUsize) { if cfg!(tsan_enabled) { let _ = a.load(Ordering::Acquire); } else { fence(Ordering::Acquire); } } trait LockState { fn is_locked(self) -> bool; fn is_queue_locked(self) -> bool; fn queue_head(self) -> *const ThreadData; fn with_queue_head(self, thread_data: *const ThreadData) -> Self; } impl LockState for usize { #[inline] fn is_locked(self) -> bool { self & LOCKED_BIT != 0 } #[inline] fn is_queue_locked(self) -> bool { self & QUEUE_LOCKED_BIT != 0 } #[inline] fn queue_head(self) -> *const ThreadData { (self & QUEUE_MASK) as *const ThreadData } #[inline] fn with_queue_head(self, thread_data: *const ThreadData) -> Self { (self & !QUEUE_MASK) | thread_data as *const _ as usize } } ```
/content/code_sandbox/core/src/word_lock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,341
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::sync::atomic::{AtomicBool, Ordering}; use std::time::Instant; use std::{ io, os::fortanix_sgx::{ thread::current as current_tcs, usercalls::{ self, raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE}, }, }, thread, }; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { parked: AtomicBool, tcs: Tcs, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { ThreadParker { parked: AtomicBool::new(false), tcs: current_tcs(), } } #[inline] unsafe fn prepare_park(&self) { self.parked.store(true, Ordering::Relaxed); } #[inline] unsafe fn timed_out(&self) -> bool { self.parked.load(Ordering::Relaxed) } #[inline] unsafe fn park(&self) { while self.parked.load(Ordering::Acquire) { let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE); debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK); } } #[inline] unsafe fn park_until(&self, _timeout: Instant) -> bool { // FIXME: path_to_url panic!("timeout not supported in SGX"); } #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.parked.store(false, Ordering::Release); UnparkHandle(self.tcs) } } pub struct UnparkHandle(Tcs); impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) { let result = usercalls::send(EV_UNPARK, Some(self.0)); if cfg!(debug_assertions) { if let Err(error) = result { // `InvalidInput` may be returned if the thread we send to has // already been unparked and exited. if error.kind() != io::ErrorKind::InvalidInput { panic!("send returned an unexpected error: {:?}", error); } } } } } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/sgx.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
599
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. //! The wasm platform can't park when atomic support is not available. //! So this ThreadParker just panics on any attempt to park. use std::thread; use std::time::Instant; pub struct ThreadParker(()); impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; fn new() -> ThreadParker { ThreadParker(()) } unsafe fn prepare_park(&self) { panic!("Parking not supported on this platform"); } unsafe fn timed_out(&self) -> bool { panic!("Parking not supported on this platform"); } unsafe fn park(&self) { panic!("Parking not supported on this platform"); } unsafe fn park_until(&self, _timeout: Instant) -> bool { panic!("Parking not supported on this platform"); } unsafe fn unpark_lock(&self) -> UnparkHandle { panic!("Parking not supported on this platform"); } } pub struct UnparkHandle(()); impl super::UnparkHandleT for UnparkHandle { unsafe fn unpark(self) {} } pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/wasm.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
304
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ ptr, sync::atomic::{AtomicI32, Ordering}, }; use std::thread; use std::time::Instant; use syscall::{ call::futex, data::TimeSpec, error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT}, flag::{FUTEX_WAIT, FUTEX_WAKE}, }; const UNPARKED: i32 = 0; const PARKED: i32 = 1; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { futex: AtomicI32, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { ThreadParker { futex: AtomicI32::new(UNPARKED), } } #[inline] unsafe fn prepare_park(&self) { self.futex.store(PARKED, Ordering::Relaxed); } #[inline] unsafe fn timed_out(&self) -> bool { self.futex.load(Ordering::Relaxed) != UNPARKED } #[inline] unsafe fn park(&self) { while self.futex.load(Ordering::Acquire) != UNPARKED { self.futex_wait(None); } } #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { while self.futex.load(Ordering::Acquire) != UNPARKED { let now = Instant::now(); if timeout <= now { return false; } let diff = timeout - now; if diff.as_secs() > i64::max_value() as u64 { // Timeout overflowed, just sleep indefinitely self.park(); return true; } let ts = TimeSpec { tv_sec: diff.as_secs() as i64, tv_nsec: diff.subsec_nanos() as i32, }; self.futex_wait(Some(ts)); } true } #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.futex.store(UNPARKED, Ordering::Release); UnparkHandle { futex: self.ptr() } } } impl ThreadParker { #[inline] fn futex_wait(&self, ts: Option<TimeSpec>) { let ts_ptr = ts .as_ref() .map(|ts_ref| ts_ref as *const _) .unwrap_or(ptr::null()); let r = unsafe { futex( self.ptr(), FUTEX_WAIT, PARKED, ts_ptr as usize, ptr::null_mut(), ) }; match r { Ok(r) => debug_assert_eq!(r, 0), Err(Error { errno }) => { debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT); } } } #[inline] fn ptr(&self) -> *mut i32 { &self.futex as *const AtomicI32 as *mut i32 } } pub struct UnparkHandle { futex: *mut i32, } impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) { // The thread data may have been freed at this point, but it doesn't // matter since the syscall will just return EFAULT in that case. let r = futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut()); match r { Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1), Err(Error { errno }) => debug_assert_eq!(errno, EFAULT), } } } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/redox.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
921
```rust use cfg_if::cfg_if; use std::time::Instant; /// Trait for the platform thread parker implementation. /// /// All unsafe methods are unsafe because the Unix thread parker is based on /// pthread mutexes and condvars. Those primitives must not be moved and used /// from any other memory address than the one they were located at when they /// were initialized. As such, it's UB to call any unsafe method on /// `ThreadParkerT` if the implementing instance has moved since the last /// call to any of the unsafe methods. pub trait ThreadParkerT { type UnparkHandle: UnparkHandleT; const IS_CHEAP_TO_CONSTRUCT: bool; fn new() -> Self; /// Prepares the parker. This should be called before adding it to the queue. unsafe fn prepare_park(&self); /// Checks if the park timed out. This should be called while holding the /// queue lock after `park_until` has returned false. unsafe fn timed_out(&self) -> bool; /// Parks the thread until it is unparked. This should be called after it has /// been added to the queue, after unlocking the queue. unsafe fn park(&self); /// Parks the thread until it is unparked or the timeout is reached. This /// should be called after it has been added to the queue, after unlocking /// the queue. Returns true if we were unparked and false if we timed out. unsafe fn park_until(&self, timeout: Instant) -> bool; /// Locks the parker to prevent the target thread from exiting. This is /// necessary to ensure that thread-local `ThreadData` objects remain valid. /// This should be called while holding the queue lock. unsafe fn unpark_lock(&self) -> Self::UnparkHandle; } /// Handle for a thread that is about to be unparked. We need to mark the thread /// as unparked while holding the queue lock, but we delay the actual unparking /// until after the queue lock is released. pub trait UnparkHandleT { /// Wakes up the parked thread. This should be called after the queue lock is /// released to avoid blocking the queue for too long. /// /// This method is unsafe for the same reason as the unsafe methods in /// `ThreadParkerT`. unsafe fn unpark(self); } cfg_if! { if #[cfg(any(target_os = "linux", target_os = "android"))] { #[path = "linux.rs"] mod imp; } else if #[cfg(unix)] { #[path = "unix.rs"] mod imp; } else if #[cfg(windows)] { #[path = "windows/mod.rs"] mod imp; } else if #[cfg(target_os = "redox")] { #[path = "redox.rs"] mod imp; } else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] { #[path = "sgx.rs"] mod imp; } else if #[cfg(all( feature = "nightly", target_family = "wasm", target_feature = "atomics" ))] { #[path = "wasm_atomic.rs"] mod imp; } else if #[cfg(target_family = "wasm")] { #[path = "wasm.rs"] mod imp; } else { #[path = "generic.rs"] mod imp; } } pub use self::imp::{thread_yield, ThreadParker}; ```
/content/code_sandbox/core/src/thread_parker/mod.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
766
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ ptr, sync::atomic::{AtomicI32, Ordering}, }; use libc; use std::thread; use std::time::Instant; // x32 Linux uses a non-standard type for tv_nsec in timespec. // See path_to_url #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] #[allow(non_camel_case_types)] type tv_nsec_t = i64; #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] #[allow(non_camel_case_types)] type tv_nsec_t = libc::c_long; fn errno() -> libc::c_int { #[cfg(target_os = "linux")] unsafe { *libc::__errno_location() } #[cfg(target_os = "android")] unsafe { *libc::__errno() } } // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { futex: AtomicI32, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { ThreadParker { futex: AtomicI32::new(0), } } #[inline] unsafe fn prepare_park(&self) { self.futex.store(1, Ordering::Relaxed); } #[inline] unsafe fn timed_out(&self) -> bool { self.futex.load(Ordering::Relaxed) != 0 } #[inline] unsafe fn park(&self) { while self.futex.load(Ordering::Acquire) != 0 { self.futex_wait(None); } } #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { while self.futex.load(Ordering::Acquire) != 0 { let now = Instant::now(); if timeout <= now { return false; } let diff = timeout - now; if diff.as_secs() as libc::time_t as u64 != diff.as_secs() { // Timeout overflowed, just sleep indefinitely self.park(); return true; } // SAFETY: libc::timespec is zero initializable. let mut ts: libc::timespec = std::mem::zeroed(); ts.tv_sec = diff.as_secs() as libc::time_t; ts.tv_nsec = diff.subsec_nanos() as tv_nsec_t; self.futex_wait(Some(ts)); } true } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.futex.store(0, Ordering::Release); UnparkHandle { futex: &self.futex } } } impl ThreadParker { #[inline] fn futex_wait(&self, ts: Option<libc::timespec>) { let ts_ptr = ts .as_ref() .map(|ts_ref| ts_ref as *const _) .unwrap_or(ptr::null()); let r = unsafe { libc::syscall( libc::SYS_futex, &self.futex, libc::FUTEX_WAIT | libc::FUTEX_PRIVATE_FLAG, 1, ts_ptr, ) }; debug_assert!(r == 0 || r == -1); if r == -1 { debug_assert!( errno() == libc::EINTR || errno() == libc::EAGAIN || (ts.is_some() && errno() == libc::ETIMEDOUT) ); } } } pub struct UnparkHandle { futex: *const AtomicI32, } impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) { // The thread data may have been freed at this point, but it doesn't // matter since the syscall will just return EFAULT in that case. let r = libc::syscall( libc::SYS_futex, self.futex, libc::FUTEX_WAKE | libc::FUTEX_PRIVATE_FLAG, 1, ); debug_assert!(r == 0 || r == 1 || r == -1); if r == -1 { debug_assert_eq!(errno(), libc::EFAULT); } } } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/linux.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
1,067
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ arch::wasm32, sync::atomic::{AtomicI32, Ordering}, }; use std::time::{Duration, Instant}; use std::{convert::TryFrom, thread}; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { parked: AtomicI32, } const UNPARKED: i32 = 0; const PARKED: i32 = 1; impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { ThreadParker { parked: AtomicI32::new(UNPARKED), } } #[inline] unsafe fn prepare_park(&self) { self.parked.store(PARKED, Ordering::Relaxed); } #[inline] unsafe fn timed_out(&self) -> bool { self.parked.load(Ordering::Relaxed) == PARKED } #[inline] unsafe fn park(&self) { while self.parked.load(Ordering::Acquire) == PARKED { let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, -1); // we should have either woken up (0) or got a not-equal due to a // race (1). We should never time out (2) debug_assert!(r == 0 || r == 1); } } #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { while self.parked.load(Ordering::Acquire) == PARKED { if let Some(left) = timeout.checked_duration_since(Instant::now()) { let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value()); let r = wasm32::memory_atomic_wait32(self.ptr(), PARKED, nanos_left); debug_assert!(r == 0 || r == 1 || r == 2); } else { return false; } } true } #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.parked.store(UNPARKED, Ordering::Release); UnparkHandle(self.ptr()) } } impl ThreadParker { #[inline] fn ptr(&self) -> *mut i32 { &self.parked as *const AtomicI32 as *mut i32 } } pub struct UnparkHandle(*mut i32); impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) { let num_notified = wasm32::memory_atomic_notify(self.0 as *mut i32, 1); debug_assert!(num_notified == 0 || num_notified == 1); } } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/wasm_atomic.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
700
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. #[cfg(target_vendor = "apple")] use core::ptr; use core::{ cell::{Cell, UnsafeCell}, mem::MaybeUninit, }; use libc; use std::time::Instant; use std::{thread, time::Duration}; // x32 Linux uses a non-standard type for tv_nsec in timespec. // See path_to_url #[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))] #[allow(non_camel_case_types)] type tv_nsec_t = i64; #[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))] #[allow(non_camel_case_types)] type tv_nsec_t = libc::c_long; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { should_park: Cell<bool>, mutex: UnsafeCell<libc::pthread_mutex_t>, condvar: UnsafeCell<libc::pthread_cond_t>, initialized: Cell<bool>, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = false; #[inline] fn new() -> ThreadParker { ThreadParker { should_park: Cell::new(false), mutex: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), condvar: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER), initialized: Cell::new(false), } } #[inline] unsafe fn prepare_park(&self) { self.should_park.set(true); if !self.initialized.get() { self.init(); self.initialized.set(true); } } #[inline] unsafe fn timed_out(&self) -> bool { // We need to grab the mutex here because another thread may be // concurrently executing UnparkHandle::unpark, which is done without // holding the queue lock. let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); let should_park = self.should_park.get(); let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); should_park } #[inline] unsafe fn park(&self) { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); while self.should_park.get() { let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); debug_assert_eq!(r, 0); } let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); } #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); while self.should_park.get() { let now = Instant::now(); if timeout <= now { let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); return false; } if let Some(ts) = timeout_to_timespec(timeout - now) { let r = libc::pthread_cond_timedwait(self.condvar.get(), self.mutex.get(), &ts); if ts.tv_sec < 0 { // On some systems, negative timeouts will return EINVAL. In // that case we won't sleep and will just busy loop instead, // which is the best we can do. debug_assert!(r == 0 || r == libc::ETIMEDOUT || r == libc::EINVAL); } else { debug_assert!(r == 0 || r == libc::ETIMEDOUT); } } else { // Timeout calculation overflowed, just sleep indefinitely let r = libc::pthread_cond_wait(self.condvar.get(), self.mutex.get()); debug_assert_eq!(r, 0); } } let r = libc::pthread_mutex_unlock(self.mutex.get()); debug_assert_eq!(r, 0); true } #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { let r = libc::pthread_mutex_lock(self.mutex.get()); debug_assert_eq!(r, 0); UnparkHandle { thread_parker: self, } } } impl ThreadParker { /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. #[cfg(any(target_vendor = "apple", target_os = "android", target_os = "espidf"))] #[inline] unsafe fn init(&self) {} /// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME. #[cfg(not(any(target_vendor = "apple", target_os = "android", target_os = "espidf")))] #[inline] unsafe fn init(&self) { let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit(); let r = libc::pthread_condattr_init(attr.as_mut_ptr()); debug_assert_eq!(r, 0); let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC); debug_assert_eq!(r, 0); let r = libc::pthread_cond_init(self.condvar.get(), attr.as_ptr()); debug_assert_eq!(r, 0); let r = libc::pthread_condattr_destroy(attr.as_mut_ptr()); debug_assert_eq!(r, 0); } } impl Drop for ThreadParker { #[inline] fn drop(&mut self) { // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER. // Once it is used (locked/unlocked) or pthread_mutex_init() is called, // this behaviour no longer occurs. The same applies to condvars. unsafe { let r = libc::pthread_mutex_destroy(self.mutex.get()); debug_assert!(r == 0 || r == libc::EINVAL); let r = libc::pthread_cond_destroy(self.condvar.get()); debug_assert!(r == 0 || r == libc::EINVAL); } } } pub struct UnparkHandle { thread_parker: *const ThreadParker, } impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) { (*self.thread_parker).should_park.set(false); // We notify while holding the lock here to avoid races with the target // thread. In particular, the thread could exit after we unlock the // mutex, which would make the condvar access invalid memory. let r = libc::pthread_cond_signal((*self.thread_parker).condvar.get()); debug_assert_eq!(r, 0); let r = libc::pthread_mutex_unlock((*self.thread_parker).mutex.get()); debug_assert_eq!(r, 0); } } // Returns the current time on the clock used by pthread_cond_t as a timespec. #[cfg(target_vendor = "apple")] #[inline] fn timespec_now() -> libc::timespec { let mut now = MaybeUninit::<libc::timeval>::uninit(); let r = unsafe { libc::gettimeofday(now.as_mut_ptr(), ptr::null_mut()) }; debug_assert_eq!(r, 0); // SAFETY: We know `libc::gettimeofday` has initialized the value. let now = unsafe { now.assume_init() }; libc::timespec { tv_sec: now.tv_sec, tv_nsec: now.tv_usec as tv_nsec_t * 1000, } } #[cfg(not(target_vendor = "apple"))] #[inline] fn timespec_now() -> libc::timespec { let mut now = MaybeUninit::<libc::timespec>::uninit(); let clock = if cfg!(target_os = "android") { // Android doesn't support pthread_condattr_setclock, so we need to // specify the timeout in CLOCK_REALTIME. libc::CLOCK_REALTIME } else { libc::CLOCK_MONOTONIC }; let r = unsafe { libc::clock_gettime(clock, now.as_mut_ptr()) }; debug_assert_eq!(r, 0); // SAFETY: We know `libc::clock_gettime` has initialized the value. unsafe { now.assume_init() } } // Converts a relative timeout into an absolute timeout in the clock used by // pthread_cond_t. #[inline] fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> { // Handle overflows early on if timeout.as_secs() > libc::time_t::max_value() as u64 { return None; } let now = timespec_now(); let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t; let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t); if nsec >= 1_000_000_000 { nsec -= 1_000_000_000; sec = sec.and_then(|sec| sec.checked_add(1)); } sec.map(|sec| libc::timespec { tv_nsec: nsec, tv_sec: sec, }) } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/unix.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,067
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. //! A simple spin lock based thread parker. Used on platforms without better //! parking facilities available. use core::hint::spin_loop; use core::sync::atomic::{AtomicBool, Ordering}; use std::thread; use std::time::Instant; // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { parked: AtomicBool, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { ThreadParker { parked: AtomicBool::new(false), } } #[inline] unsafe fn prepare_park(&self) { self.parked.store(true, Ordering::Relaxed); } #[inline] unsafe fn timed_out(&self) -> bool { self.parked.load(Ordering::Relaxed) != false } #[inline] unsafe fn park(&self) { while self.parked.load(Ordering::Acquire) != false { spin_loop(); } } #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { while self.parked.load(Ordering::Acquire) != false { if Instant::now() >= timeout { return false; } spin_loop(); } true } #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { // We don't need to lock anything, just clear the state self.parked.store(false, Ordering::Release); UnparkHandle(()) } } pub struct UnparkHandle(()); impl super::UnparkHandleT for UnparkHandle { #[inline] unsafe fn unpark(self) {} } #[inline] pub fn thread_yield() { thread::yield_now(); } ```
/content/code_sandbox/core/src/thread_parker/generic.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
447
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ mem, sync::atomic::{AtomicUsize, Ordering}, }; use std::{ffi, time::Instant}; use super::bindings::*; #[allow(non_snake_case)] pub struct WaitAddress { WaitOnAddress: WaitOnAddress, WakeByAddressSingle: WakeByAddressSingle, } impl WaitAddress { #[allow(non_snake_case)] pub fn create() -> Option<WaitAddress> { let synch_dll = unsafe { GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr()) }; if synch_dll == 0 { return None; } let WaitOnAddress = unsafe { GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr())? }; let WakeByAddressSingle = unsafe { GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr())? }; Some(WaitAddress { WaitOnAddress: unsafe { mem::transmute(WaitOnAddress) }, WakeByAddressSingle: unsafe { mem::transmute(WakeByAddressSingle) }, }) } #[inline] pub fn prepare_park(&'static self, key: &AtomicUsize) { key.store(1, Ordering::Relaxed); } #[inline] pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) != 0 } #[inline] pub fn park(&'static self, key: &AtomicUsize) { while key.load(Ordering::Acquire) != 0 { let r = self.wait_on_address(key, INFINITE); debug_assert!(r == true.into()); } } #[inline] pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { while key.load(Ordering::Acquire) != 0 { let now = Instant::now(); if timeout <= now { return false; } let diff = timeout - now; let timeout = diff .as_secs() .checked_mul(1000) .and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000)) .map(|ms| { if ms > std::u32::MAX as u64 { INFINITE } else { ms as u32 } }) .unwrap_or(INFINITE); if self.wait_on_address(key, timeout) == false.into() { debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT); } } true } #[inline] pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // We don't need to lock anything, just clear the state key.store(0, Ordering::Release); UnparkHandle { key: key, waitaddress: self, } } #[inline] fn wait_on_address(&'static self, key: &AtomicUsize, timeout: u32) -> BOOL { let cmp = 1usize; unsafe { (self.WaitOnAddress)( key as *const _ as *mut ffi::c_void, &cmp as *const _ as *mut ffi::c_void, mem::size_of::<usize>(), timeout, ) } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { key: *const AtomicUsize, waitaddress: &'static WaitAddress, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. #[inline] pub fn unpark(self) { unsafe { (self.waitaddress.WakeByAddressSingle)(self.key as *mut ffi::c_void) }; } } ```
/content/code_sandbox/core/src/thread_parker/windows/waitaddress.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
936
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ ptr, sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, }; use std::time::Instant; mod bindings; mod keyed_event; mod waitaddress; enum Backend { KeyedEvent(keyed_event::KeyedEvent), WaitAddress(waitaddress::WaitAddress), } static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut()); impl Backend { #[inline] fn get() -> &'static Backend { // Fast path: use the existing object let backend_ptr = BACKEND.load(Ordering::Acquire); if !backend_ptr.is_null() { return unsafe { &*backend_ptr }; }; Backend::create() } #[cold] fn create() -> &'static Backend { // Try to create a new Backend let backend; if let Some(waitaddress) = waitaddress::WaitAddress::create() { backend = Backend::WaitAddress(waitaddress); } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() { backend = Backend::KeyedEvent(keyed_event); } else { panic!( "parking_lot requires either NT Keyed Events (WinXP+) or \ WaitOnAddress/WakeByAddress (Win8+)" ); } // Try to set our new Backend as the global one let backend_ptr = Box::into_raw(Box::new(backend)); match BACKEND.compare_exchange( ptr::null_mut(), backend_ptr, Ordering::Release, Ordering::Relaxed, ) { Ok(_) => unsafe { &*backend_ptr }, Err(global_backend_ptr) => { unsafe { // We lost the race, free our object and return the global one let _ = Box::from_raw(backend_ptr); &*global_backend_ptr } } } } } // Helper type for putting a thread to sleep until some other thread wakes it up pub struct ThreadParker { key: AtomicUsize, backend: &'static Backend, } impl super::ThreadParkerT for ThreadParker { type UnparkHandle = UnparkHandle; const IS_CHEAP_TO_CONSTRUCT: bool = true; #[inline] fn new() -> ThreadParker { // Initialize the backend here to ensure we don't get any panics // later on, which could leave synchronization primitives in a broken // state. ThreadParker { key: AtomicUsize::new(0), backend: Backend::get(), } } // Prepares the parker. This should be called before adding it to the queue. #[inline] unsafe fn prepare_park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.prepare_park(&self.key), Backend::WaitAddress(ref x) => x.prepare_park(&self.key), } } // Checks if the park timed out. This should be called while holding the // queue lock after park_until has returned false. #[inline] unsafe fn timed_out(&self) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.timed_out(&self.key), Backend::WaitAddress(ref x) => x.timed_out(&self.key), } } // Parks the thread until it is unparked. This should be called after it has // been added to the queue, after unlocking the queue. #[inline] unsafe fn park(&self) { match *self.backend { Backend::KeyedEvent(ref x) => x.park(&self.key), Backend::WaitAddress(ref x) => x.park(&self.key), } } // Parks the thread until it is unparked or the timeout is reached. This // should be called after it has been added to the queue, after unlocking // the queue. Returns true if we were unparked and false if we timed out. #[inline] unsafe fn park_until(&self, timeout: Instant) -> bool { match *self.backend { Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout), Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout), } } // Locks the parker to prevent the target thread from exiting. This is // necessary to ensure that thread-local ThreadData objects remain valid. // This should be called while holding the queue lock. #[inline] unsafe fn unpark_lock(&self) -> UnparkHandle { match *self.backend { Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)), Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)), } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub enum UnparkHandle { KeyedEvent(keyed_event::UnparkHandle), WaitAddress(waitaddress::UnparkHandle), } impl super::UnparkHandleT for UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. #[inline] unsafe fn unpark(self) { match self { UnparkHandle::KeyedEvent(x) => x.unpark(), UnparkHandle::WaitAddress(x) => x.unpark(), } } } // Yields the rest of the current timeslice to the OS #[inline] pub fn thread_yield() { unsafe { // We don't use SwitchToThread here because it doesn't consider all // threads in the system and the thread we are waiting for may not get // selected. bindings::Sleep(0); } } ```
/content/code_sandbox/core/src/thread_parker/windows/mod.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
1,332
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::thread_parker::{ThreadParker, ThreadParkerT, UnparkHandleT}; use crate::util::UncheckedOptionExt; use crate::word_lock::WordLock; use core::{ cell::{Cell, UnsafeCell}, ptr, sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, }; use smallvec::SmallVec; use std::time::{Duration, Instant}; // Don't use Instant on wasm32-unknown-unknown, it just panics. cfg_if::cfg_if! { if #[cfg(all( target_family = "wasm", target_os = "unknown", target_vendor = "unknown" ))] { #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] struct TimeoutInstant; impl TimeoutInstant { fn now() -> TimeoutInstant { TimeoutInstant } } impl core::ops::Add<Duration> for TimeoutInstant { type Output = Self; fn add(self, _rhs: Duration) -> Self::Output { TimeoutInstant } } } else { use std::time::Instant as TimeoutInstant; } } static NUM_THREADS: AtomicUsize = AtomicUsize::new(0); /// Holds the pointer to the currently active `HashTable`. /// /// # Safety /// /// Except for the initial value of null, it must always point to a valid `HashTable` instance. /// Any `HashTable` this global static has ever pointed to must never be freed. static HASHTABLE: AtomicPtr<HashTable> = AtomicPtr::new(ptr::null_mut()); // Even with 3x more buckets than threads, the memory overhead per thread is // still only a few hundred bytes per thread. const LOAD_FACTOR: usize = 3; struct HashTable { // Hash buckets for the table entries: Box<[Bucket]>, // Number of bits used for the hash function hash_bits: u32, // Previous table. This is only kept to keep leak detectors happy. _prev: *const HashTable, } impl HashTable { #[inline] fn new(num_threads: usize, prev: *const HashTable) -> Box<HashTable> { let new_size = (num_threads * LOAD_FACTOR).next_power_of_two(); let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1; let now = TimeoutInstant::now(); let mut entries = Vec::with_capacity(new_size); for i in 0..new_size { // We must ensure the seed is not zero entries.push(Bucket::new(now, i as u32 + 1)); } Box::new(HashTable { entries: entries.into_boxed_slice(), hash_bits, _prev: prev, }) } } #[repr(align(64))] struct Bucket { // Lock protecting the queue mutex: WordLock, // Linked list of threads waiting on this bucket queue_head: Cell<*const ThreadData>, queue_tail: Cell<*const ThreadData>, // Next time at which point be_fair should be set fair_timeout: UnsafeCell<FairTimeout>, } impl Bucket { #[inline] pub fn new(timeout: TimeoutInstant, seed: u32) -> Self { Self { mutex: WordLock::new(), queue_head: Cell::new(ptr::null()), queue_tail: Cell::new(ptr::null()), fair_timeout: UnsafeCell::new(FairTimeout::new(timeout, seed)), } } } struct FairTimeout { // Next time at which point be_fair should be set timeout: TimeoutInstant, // the PRNG state for calculating the next timeout seed: u32, } impl FairTimeout { #[inline] fn new(timeout: TimeoutInstant, seed: u32) -> FairTimeout { FairTimeout { timeout, seed } } // Determine whether we should force a fair unlock, and update the timeout #[inline] fn should_timeout(&mut self) -> bool { let now = TimeoutInstant::now(); if now > self.timeout { // Time between 0 and 1ms. let nanos = self.gen_u32() % 1_000_000; self.timeout = now + Duration::new(0, nanos); true } else { false } } // Pseudorandom number generator from the "Xorshift RNGs" paper by George Marsaglia. fn gen_u32(&mut self) -> u32 { self.seed ^= self.seed << 13; self.seed ^= self.seed >> 17; self.seed ^= self.seed << 5; self.seed } } struct ThreadData { parker: ThreadParker, // Key that this thread is sleeping on. This may change if the thread is // requeued to a different key. key: AtomicUsize, // Linked list of parked threads in a bucket next_in_queue: Cell<*const ThreadData>, // UnparkToken passed to this thread when it is unparked unpark_token: Cell<UnparkToken>, // ParkToken value set by the thread when it was parked park_token: Cell<ParkToken>, // Is the thread parked with a timeout? parked_with_timeout: Cell<bool>, // Extra data for deadlock detection #[cfg(feature = "deadlock_detection")] deadlock_data: deadlock::DeadlockData, } impl ThreadData { fn new() -> ThreadData { // Keep track of the total number of live ThreadData objects and resize // the hash table accordingly. let num_threads = NUM_THREADS.fetch_add(1, Ordering::Relaxed) + 1; grow_hashtable(num_threads); ThreadData { parker: ThreadParker::new(), key: AtomicUsize::new(0), next_in_queue: Cell::new(ptr::null()), unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN), park_token: Cell::new(DEFAULT_PARK_TOKEN), parked_with_timeout: Cell::new(false), #[cfg(feature = "deadlock_detection")] deadlock_data: deadlock::DeadlockData::new(), } } } // Invokes the given closure with a reference to the current thread `ThreadData`. #[inline(always)] fn with_thread_data<T>(f: impl FnOnce(&ThreadData) -> T) -> T { // Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive // to construct. Try to use a thread-local version if possible. Otherwise just // create a ThreadData on the stack let mut thread_data_storage = None; thread_local!(static THREAD_DATA: ThreadData = ThreadData::new()); let thread_data_ptr = THREAD_DATA .try_with(|x| x as *const ThreadData) .unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new)); f(unsafe { &*thread_data_ptr }) } impl Drop for ThreadData { fn drop(&mut self) { NUM_THREADS.fetch_sub(1, Ordering::Relaxed); } } /// Returns a reference to the latest hash table, creating one if it doesn't exist yet. /// The reference is valid forever. However, the `HashTable` it references might become stale /// at any point. Meaning it still exists, but it is not the instance in active use. #[inline] fn get_hashtable() -> &'static HashTable { let table = HASHTABLE.load(Ordering::Acquire); // If there is no table, create one if table.is_null() { create_hashtable() } else { // SAFETY: when not null, `HASHTABLE` always points to a `HashTable` that is never freed. unsafe { &*table } } } /// Returns a reference to the latest hash table, creating one if it doesn't exist yet. /// The reference is valid forever. However, the `HashTable` it references might become stale /// at any point. Meaning it still exists, but it is not the instance in active use. #[cold] fn create_hashtable() -> &'static HashTable { let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null())); // If this fails then it means some other thread created the hash table first. let table = match HASHTABLE.compare_exchange( ptr::null_mut(), new_table, Ordering::AcqRel, Ordering::Acquire, ) { Ok(_) => new_table, Err(old_table) => { // Free the table we created // SAFETY: `new_table` is created from `Box::into_raw` above and only freed here. unsafe { let _ = Box::from_raw(new_table); } old_table } }; // SAFETY: The `HashTable` behind `table` is never freed. It is either the table pointer we // created here, or it is one loaded from `HASHTABLE`. unsafe { &*table } } // Grow the hash table so that it is big enough for the given number of threads. // This isn't performance-critical since it is only done when a ThreadData is // created, which only happens once per thread. fn grow_hashtable(num_threads: usize) { // Lock all buckets in the existing table and get a reference to it let old_table = loop { let table = get_hashtable(); // Check if we need to resize the existing table if table.entries.len() >= LOAD_FACTOR * num_threads { return; } // Lock all buckets in the old table for bucket in &table.entries[..] { bucket.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us reading HASHTABLE and locking // the buckets. if HASHTABLE.load(Ordering::Relaxed) == table as *const _ as *mut _ { break table; } // Unlock buckets and try again for bucket in &table.entries[..] { // SAFETY: We hold the lock here, as required unsafe { bucket.mutex.unlock() }; } }; // Create the new table let mut new_table = HashTable::new(num_threads, old_table); // Move the entries from the old table to the new one for bucket in &old_table.entries[..] { // SAFETY: The park, unpark* and check_wait_graph_fast functions create only correct linked // lists. All `ThreadData` instances in these lists will remain valid as long as they are // present in the lists, meaning as long as their threads are parked. unsafe { rehash_bucket_into(bucket, &mut new_table) }; } // Publish the new table. No races are possible at this point because // any other thread trying to grow the hash table is blocked on the bucket // locks in the old table. HASHTABLE.store(Box::into_raw(new_table), Ordering::Release); // Unlock all buckets in the old table for bucket in &old_table.entries[..] { // SAFETY: We hold the lock here, as required unsafe { bucket.mutex.unlock() }; } } /// Iterate through all `ThreadData` objects in the bucket and insert them into the given table /// in the bucket their key correspond to for this table. /// /// # Safety /// /// The given `bucket` must have a correctly constructed linked list under `queue_head`, containing /// `ThreadData` instances that must stay valid at least as long as the given `table` is in use. /// /// The given `table` must only contain buckets with correctly constructed linked lists. unsafe fn rehash_bucket_into(bucket: &'static Bucket, table: &mut HashTable) { let mut current: *const ThreadData = bucket.queue_head.get(); while !current.is_null() { let next = (*current).next_in_queue.get(); let hash = hash((*current).key.load(Ordering::Relaxed), table.hash_bits); if table.entries[hash].queue_tail.get().is_null() { table.entries[hash].queue_head.set(current); } else { (*table.entries[hash].queue_tail.get()) .next_in_queue .set(current); } table.entries[hash].queue_tail.set(current); (*current).next_in_queue.set(ptr::null()); current = next; } } // Hash function for addresses #[cfg(target_pointer_width = "32")] #[inline] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B9) >> (32 - bits) } #[cfg(target_pointer_width = "64")] #[inline] fn hash(key: usize, bits: u32) -> usize { key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits) } /// Locks the bucket for the given key and returns a reference to it. /// The returned bucket must be unlocked again in order to not cause deadlocks. #[inline] fn lock_bucket(key: usize) -> &'static Bucket { loop { let hashtable = get_hashtable(); let hash = hash(key, hashtable.hash_bits); let bucket = &hashtable.entries[hash]; // Lock the bucket bucket.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { return bucket; } // Unlock the bucket and try again // SAFETY: We hold the lock here, as required unsafe { bucket.mutex.unlock() }; } } /// Locks the bucket for the given key and returns a reference to it. But checks that the key /// hasn't been changed in the meantime due to a requeue. /// The returned bucket must be unlocked again in order to not cause deadlocks. #[inline] fn lock_bucket_checked(key: &AtomicUsize) -> (usize, &'static Bucket) { loop { let hashtable = get_hashtable(); let current_key = key.load(Ordering::Relaxed); let hash = hash(current_key, hashtable.hash_bits); let bucket = &hashtable.entries[hash]; // Lock the bucket bucket.mutex.lock(); // Check that both the hash table and key are correct while the bucket // is locked. Note that the key can't change once we locked the proper // bucket for it, so we just keep trying until we have the correct key. if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ && key.load(Ordering::Relaxed) == current_key { return (current_key, bucket); } // Unlock the bucket and try again // SAFETY: We hold the lock here, as required unsafe { bucket.mutex.unlock() }; } } /// Locks the two buckets for the given pair of keys and returns references to them. /// The returned buckets must be unlocked again in order to not cause deadlocks. /// /// If both keys hash to the same value, both returned references will be to the same bucket. Be /// careful to only unlock it once in this case, always use `unlock_bucket_pair`. #[inline] fn lock_bucket_pair(key1: usize, key2: usize) -> (&'static Bucket, &'static Bucket) { loop { let hashtable = get_hashtable(); let hash1 = hash(key1, hashtable.hash_bits); let hash2 = hash(key2, hashtable.hash_bits); // Get the bucket at the lowest hash/index first let bucket1 = if hash1 <= hash2 { &hashtable.entries[hash1] } else { &hashtable.entries[hash2] }; // Lock the first bucket bucket1.mutex.lock(); // If no other thread has rehashed the table before we grabbed the lock // then we are good to go! The lock we grabbed prevents any rehashes. if HASHTABLE.load(Ordering::Relaxed) == hashtable as *const _ as *mut _ { // Now lock the second bucket and return the two buckets if hash1 == hash2 { return (bucket1, bucket1); } else if hash1 < hash2 { let bucket2 = &hashtable.entries[hash2]; bucket2.mutex.lock(); return (bucket1, bucket2); } else { let bucket2 = &hashtable.entries[hash1]; bucket2.mutex.lock(); return (bucket2, bucket1); } } // Unlock the bucket and try again // SAFETY: We hold the lock here, as required unsafe { bucket1.mutex.unlock() }; } } /// Unlock a pair of buckets /// /// # Safety /// /// Both buckets must be locked #[inline] unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) { bucket1.mutex.unlock(); if !ptr::eq(bucket1, bucket2) { bucket2.mutex.unlock(); } } /// Result of a park operation. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ParkResult { /// We were unparked by another thread with the given token. Unparked(UnparkToken), /// The validation callback returned false. Invalid, /// The timeout expired. TimedOut, } impl ParkResult { /// Returns true if we were unparked by another thread. #[inline] pub fn is_unparked(self) -> bool { if let ParkResult::Unparked(_) = self { true } else { false } } } /// Result of an unpark operation. #[derive(Copy, Clone, Default, Eq, PartialEq, Debug)] pub struct UnparkResult { /// The number of threads that were unparked. pub unparked_threads: usize, /// The number of threads that were requeued. pub requeued_threads: usize, /// Whether there are any threads remaining in the queue. This only returns /// true if a thread was unparked. pub have_more_threads: bool, /// This is set to true on average once every 0.5ms for any given key. It /// should be used to switch to a fair unlocking mechanism for a particular /// unlock. pub be_fair: bool, /// Private field so new fields can be added without breakage. _sealed: (), } /// Operation that `unpark_requeue` should perform. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum RequeueOp { /// Abort the operation without doing anything. Abort, /// Unpark one thread and requeue the rest onto the target queue. UnparkOneRequeueRest, /// Requeue all threads onto the target queue. RequeueAll, /// Unpark one thread and leave the rest parked. No requeuing is done. UnparkOne, /// Requeue one thread and leave the rest parked on the original queue. RequeueOne, } /// Operation that `unpark_filter` should perform for each thread. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum FilterOp { /// Unpark the thread and continue scanning the list of parked threads. Unpark, /// Don't unpark the thread and continue scanning the list of parked threads. Skip, /// Don't unpark the thread and stop scanning the list of parked threads. Stop, } /// A value which is passed from an unparker to a parked thread. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct UnparkToken(pub usize); /// A value associated with a parked thread which can be used by `unpark_filter`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub struct ParkToken(pub usize); /// A default unpark token to use. pub const DEFAULT_UNPARK_TOKEN: UnparkToken = UnparkToken(0); /// A default park token to use. pub const DEFAULT_PARK_TOKEN: ParkToken = ParkToken(0); /// Parks the current thread in the queue associated with the given key. /// /// The `validate` function is called while the queue is locked and can abort /// the operation by returning false. If `validate` returns true then the /// current thread is appended to the queue and the queue is unlocked. /// /// The `before_sleep` function is called after the queue is unlocked but before /// the thread is put to sleep. The thread will then sleep until it is unparked /// or the given timeout is reached. /// /// The `timed_out` function is also called while the queue is locked, but only /// if the timeout was reached. It is passed the key of the queue it was in when /// it timed out, which may be different from the original key if /// `unpark_requeue` was called. It is also passed a bool which indicates /// whether it was the last thread in the queue. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `validate` and `timed_out` functions are called while the queue is /// locked and must not panic or call into any function in `parking_lot`. /// /// The `before_sleep` function is called outside the queue lock and is allowed /// to call `unpark_one`, `unpark_all`, `unpark_requeue` or `unpark_filter`, but /// it is not allowed to call `park` or panic. #[inline] pub unsafe fn park( key: usize, validate: impl FnOnce() -> bool, before_sleep: impl FnOnce(), timed_out: impl FnOnce(usize, bool), park_token: ParkToken, timeout: Option<Instant>, ) -> ParkResult { // Grab our thread data, this also ensures that the hash table exists with_thread_data(|thread_data| { // Lock the bucket for the given key let bucket = lock_bucket(key); // If the validation function fails, just return if !validate() { // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); return ParkResult::Invalid; } // Append our thread data to the queue and unlock the bucket thread_data.parked_with_timeout.set(timeout.is_some()); thread_data.next_in_queue.set(ptr::null()); thread_data.key.store(key, Ordering::Relaxed); thread_data.park_token.set(park_token); thread_data.parker.prepare_park(); if !bucket.queue_head.get().is_null() { (*bucket.queue_tail.get()).next_in_queue.set(thread_data); } else { bucket.queue_head.set(thread_data); } bucket.queue_tail.set(thread_data); // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Invoke the pre-sleep callback before_sleep(); // Park our thread and determine whether we were woken up by an unpark // or by our timeout. Note that this isn't precise: we can still be // unparked since we are still in the queue. let unparked = match timeout { Some(timeout) => thread_data.parker.park_until(timeout), None => { thread_data.parker.park(); // call deadlock detection on_unpark hook deadlock::on_unpark(thread_data); true } }; // If we were unparked, return now if unparked { return ParkResult::Unparked(thread_data.unpark_token.get()); } // Lock our bucket again. Note that the hashtable may have been rehashed in // the meantime. Our key may also have changed if we were requeued. let (key, bucket) = lock_bucket_checked(&thread_data.key); // Now we need to check again if we were unparked or timed out. Unlike the // last check this is precise because we hold the bucket lock. if !thread_data.parker.timed_out() { // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); return ParkResult::Unparked(thread_data.unpark_token.get()); } // We timed out, so we now need to remove our thread from the queue let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut was_last_thread = true; while !current.is_null() { if current == thread_data { let next = (*current).next_in_queue.get(); link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } else { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key { was_last_thread = false; break; } scan = (*scan).next_in_queue.get(); } } // Callback to indicate that we timed out, and whether we were the // last thread on the queue. timed_out(key, was_last_thread); break; } else { if (*current).key.load(Ordering::Relaxed) == key { was_last_thread = false; } link = &(*current).next_in_queue; previous = current; current = link.get(); } } // There should be no way for our thread to have been removed from the queue // if we timed out. debug_assert!(!current.is_null()); // Unlock the bucket, we are done // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); ParkResult::TimedOut }) } /// Unparks one thread from the queue associated with the given key. /// /// The `callback` function is called while the queue is locked and before the /// target thread is woken up. The `UnparkResult` argument to the function /// indicates whether a thread was found in the queue and whether this was the /// last thread in the queue. This value is also returned by `unpark_one`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to the thread that is unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `callback` function is called while the queue is locked and must not /// panic or call into any function in `parking_lot`. /// /// The `parking_lot` functions are not re-entrant and calling this method /// from the context of an asynchronous signal handler may result in undefined /// behavior, including corruption of internal state and/or deadlocks. #[inline] pub unsafe fn unpark_one( key: usize, callback: impl FnOnce(UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); // Find a thread with a matching key and remove it from the queue let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut result = UnparkResult::default(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } else { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key { result.have_more_threads = true; break; } scan = (*scan).next_in_queue.get(); } } // Invoke the callback before waking up the thread result.unparked_threads = 1; result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); let token = callback(result); // Set the token for the target thread (*current).unpark_token.set(token); // This is a bit tricky: we first lock the ThreadParker to prevent // the thread from exiting and freeing its ThreadData if its wait // times out. Then we unlock the queue since we don't want to keep // the queue locked while we perform a system call. Finally we wake // up the parked thread. let handle = (*current).parker.unpark_lock(); // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); handle.unpark(); return result; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // No threads with a matching key were found in the bucket callback(result); // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); result } /// Unparks all threads in the queue associated with the given key. /// /// The given `UnparkToken` is passed to all unparked threads. /// /// This function returns the number of threads that were unparked. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `parking_lot` functions are not re-entrant and calling this method /// from the context of an asynchronous signal handler may result in undefined /// behavior, including corruption of internal state and/or deadlocks. #[inline] pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize { // Lock the bucket for the given key let bucket = lock_bucket(key); // Remove all threads with the given key in the bucket let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut threads = SmallVec::<[_; 8]>::new(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } // Set the token for the target thread (*current).unpark_token.set(unpark_token); // Don't wake up threads while holding the queue lock. See comment // in unpark_one. For now just record which threads we need to wake // up. threads.push((*current).parker.unpark_lock()); current = next; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Unlock the bucket // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed // from the queue. let num_threads = threads.len(); for handle in threads.into_iter() { handle.unpark(); } num_threads } /// Removes all threads from the queue associated with `key_from`, optionally /// unparks the first one and requeues the rest onto the queue associated with /// `key_to`. /// /// The `validate` function is called while both queues are locked. Its return /// value will determine which operation is performed, or whether the operation /// should be aborted. See `RequeueOp` for details about the different possible /// return values. /// /// The `callback` function is also called while both queues are locked. It is /// passed the `RequeueOp` returned by `validate` and an `UnparkResult` /// indicating whether a thread was unparked and whether there are threads still /// parked in the new queue. This `UnparkResult` value is also returned by /// `unpark_requeue`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to the thread that is unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `validate` and `callback` functions are called while the queue is locked /// and must not panic or call into any function in `parking_lot`. #[inline] pub unsafe fn unpark_requeue( key_from: usize, key_to: usize, validate: impl FnOnce() -> RequeueOp, callback: impl FnOnce(RequeueOp, UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the two buckets for the given key let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to); // If the validation function fails, just return let mut result = UnparkResult::default(); let op = validate(); if op == RequeueOp::Abort { // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); return result; } // Remove all threads with the given key in the source bucket let mut link = &bucket_from.queue_head; let mut current = bucket_from.queue_head.get(); let mut previous = ptr::null(); let mut requeue_threads: *const ThreadData = ptr::null(); let mut requeue_threads_tail: *const ThreadData = ptr::null(); let mut wakeup_thread = None; while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key_from { // Remove the thread from the queue let next = (*current).next_in_queue.get(); link.set(next); if bucket_from.queue_tail.get() == current { bucket_from.queue_tail.set(previous); } // Prepare the first thread for wakeup and requeue the rest. if (op == RequeueOp::UnparkOneRequeueRest || op == RequeueOp::UnparkOne) && wakeup_thread.is_none() { wakeup_thread = Some(current); result.unparked_threads = 1; } else { if !requeue_threads.is_null() { (*requeue_threads_tail).next_in_queue.set(current); } else { requeue_threads = current; } requeue_threads_tail = current; (*current).key.store(key_to, Ordering::Relaxed); result.requeued_threads += 1; } if op == RequeueOp::UnparkOne || op == RequeueOp::RequeueOne { // Scan the rest of the queue to see if there are any other // entries with the given key. let mut scan = next; while !scan.is_null() { if (*scan).key.load(Ordering::Relaxed) == key_from { result.have_more_threads = true; break; } scan = (*scan).next_in_queue.get(); } break; } current = next; } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Add the requeued threads to the destination bucket if !requeue_threads.is_null() { (*requeue_threads_tail).next_in_queue.set(ptr::null()); if !bucket_to.queue_head.get().is_null() { (*bucket_to.queue_tail.get()) .next_in_queue .set(requeue_threads); } else { bucket_to.queue_head.set(requeue_threads); } bucket_to.queue_tail.set(requeue_threads_tail); } // Invoke the callback before waking up the thread if result.unparked_threads != 0 { result.be_fair = (*bucket_from.fair_timeout.get()).should_timeout(); } let token = callback(op, result); // See comment in unpark_one for why we mess with the locking if let Some(wakeup_thread) = wakeup_thread { (*wakeup_thread).unpark_token.set(token); let handle = (*wakeup_thread).parker.unpark_lock(); // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); handle.unpark(); } else { // SAFETY: Both buckets are locked, as required. unlock_bucket_pair(bucket_from, bucket_to); } result } /// Unparks a number of threads from the front of the queue associated with /// `key` depending on the results of a filter function which inspects the /// `ParkToken` associated with each thread. /// /// The `filter` function is called for each thread in the queue or until /// `FilterOp::Stop` is returned. This function is passed the `ParkToken` /// associated with a particular thread, which is unparked if `FilterOp::Unpark` /// is returned. /// /// The `callback` function is also called while both queues are locked. It is /// passed an `UnparkResult` indicating the number of threads that were unparked /// and whether there are still parked threads in the queue. This `UnparkResult` /// value is also returned by `unpark_filter`. /// /// The `callback` function should return an `UnparkToken` value which will be /// passed to all threads that are unparked. If no thread is unparked then the /// returned value is ignored. /// /// # Safety /// /// You should only call this function with an address that you control, since /// you could otherwise interfere with the operation of other synchronization /// primitives. /// /// The `filter` and `callback` functions are called while the queue is locked /// and must not panic or call into any function in `parking_lot`. #[inline] pub unsafe fn unpark_filter( key: usize, mut filter: impl FnMut(ParkToken) -> FilterOp, callback: impl FnOnce(UnparkResult) -> UnparkToken, ) -> UnparkResult { // Lock the bucket for the given key let bucket = lock_bucket(key); // Go through the queue looking for threads with a matching key let mut link = &bucket.queue_head; let mut current = bucket.queue_head.get(); let mut previous = ptr::null(); let mut threads = SmallVec::<[_; 8]>::new(); let mut result = UnparkResult::default(); while !current.is_null() { if (*current).key.load(Ordering::Relaxed) == key { // Call the filter function with the thread's ParkToken let next = (*current).next_in_queue.get(); match filter((*current).park_token.get()) { FilterOp::Unpark => { // Remove the thread from the queue link.set(next); if bucket.queue_tail.get() == current { bucket.queue_tail.set(previous); } // Add the thread to our list of threads to unpark threads.push((current, None)); current = next; } FilterOp::Skip => { result.have_more_threads = true; link = &(*current).next_in_queue; previous = current; current = link.get(); } FilterOp::Stop => { result.have_more_threads = true; break; } } } else { link = &(*current).next_in_queue; previous = current; current = link.get(); } } // Invoke the callback before waking up the threads result.unparked_threads = threads.len(); if result.unparked_threads != 0 { result.be_fair = (*bucket.fair_timeout.get()).should_timeout(); } let token = callback(result); // Pass the token to all threads that are going to be unparked and prepare // them for unparking. for t in threads.iter_mut() { (*t.0).unpark_token.set(token); t.1 = Some((*t.0).parker.unpark_lock()); } // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // Now that we are outside the lock, wake up all the threads that we removed // from the queue. for (_, handle) in threads.into_iter() { handle.unchecked_unwrap().unpark(); } result } /// \[Experimental\] Deadlock detection /// /// Enabled via the `deadlock_detection` feature flag. pub mod deadlock { #[cfg(feature = "deadlock_detection")] use super::deadlock_impl; #[cfg(feature = "deadlock_detection")] pub(super) use super::deadlock_impl::DeadlockData; /// Acquire a resource identified by key in the deadlock detector /// Noop if `deadlock_detection` feature isn't enabled. /// /// # Safety /// /// Call after the resource is acquired #[inline] pub unsafe fn acquire_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] deadlock_impl::acquire_resource(_key); } /// Release a resource identified by key in the deadlock detector. /// Noop if `deadlock_detection` feature isn't enabled. /// /// # Panics /// /// Panics if the resource was already released or wasn't acquired in this thread. /// /// # Safety /// /// Call before the resource is released #[inline] pub unsafe fn release_resource(_key: usize) { #[cfg(feature = "deadlock_detection")] deadlock_impl::release_resource(_key); } /// Returns all deadlocks detected *since* the last call. /// Each cycle consist of a vector of `DeadlockedThread`. #[cfg(feature = "deadlock_detection")] #[inline] pub fn check_deadlock() -> Vec<Vec<deadlock_impl::DeadlockedThread>> { deadlock_impl::check_deadlock() } #[inline] pub(super) unsafe fn on_unpark(_td: &super::ThreadData) { #[cfg(feature = "deadlock_detection")] deadlock_impl::on_unpark(_td); } } #[cfg(feature = "deadlock_detection")] mod deadlock_impl { use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS}; use crate::thread_parker::{ThreadParkerT, UnparkHandleT}; use crate::word_lock::WordLock; use backtrace::Backtrace; use petgraph; use petgraph::graphmap::DiGraphMap; use std::cell::{Cell, UnsafeCell}; use std::collections::HashSet; use std::sync::atomic::Ordering; use std::sync::mpsc; use thread_id; /// Representation of a deadlocked thread pub struct DeadlockedThread { thread_id: usize, backtrace: Backtrace, } impl DeadlockedThread { /// The system thread id pub fn thread_id(&self) -> usize { self.thread_id } /// The thread backtrace pub fn backtrace(&self) -> &Backtrace { &self.backtrace } } pub struct DeadlockData { // Currently owned resources (keys) resources: UnsafeCell<Vec<usize>>, // Set when there's a pending callstack request deadlocked: Cell<bool>, // Sender used to report the backtrace backtrace_sender: UnsafeCell<Option<mpsc::Sender<DeadlockedThread>>>, // System thread id thread_id: usize, } impl DeadlockData { pub fn new() -> Self { DeadlockData { resources: UnsafeCell::new(Vec::new()), deadlocked: Cell::new(false), backtrace_sender: UnsafeCell::new(None), thread_id: thread_id::get(), } } } pub(super) unsafe fn on_unpark(td: &ThreadData) { if td.deadlock_data.deadlocked.get() { let sender = (*td.deadlock_data.backtrace_sender.get()).take().unwrap(); sender .send(DeadlockedThread { thread_id: td.deadlock_data.thread_id, backtrace: Backtrace::new(), }) .unwrap(); // make sure to close this sender drop(sender); // park until the end of the time td.parker.prepare_park(); td.parker.park(); unreachable!("unparked deadlocked thread!"); } } pub unsafe fn acquire_resource(key: usize) { with_thread_data(|thread_data| { (*thread_data.deadlock_data.resources.get()).push(key); }); } pub unsafe fn release_resource(key: usize) { with_thread_data(|thread_data| { let resources = &mut (*thread_data.deadlock_data.resources.get()); // There is only one situation where we can fail to find the // resource: we are currently running TLS destructors and our // ThreadData has already been freed. There isn't much we can do // about it at this point, so just ignore it. if let Some(p) = resources.iter().rposition(|x| *x == key) { resources.swap_remove(p); } }); } pub fn check_deadlock() -> Vec<Vec<DeadlockedThread>> { unsafe { // fast pass if check_wait_graph_fast() { // double check check_wait_graph_slow() } else { Vec::new() } } } // Simple algorithm that builds a wait graph f the threads and the resources, // then checks for the presence of cycles (deadlocks). // This variant isn't precise as it doesn't lock the entire table before checking unsafe fn check_wait_graph_fast() -> bool { let table = get_hashtable(); let thread_count = NUM_THREADS.load(Ordering::Relaxed); let mut graph = DiGraphMap::<usize, ()>::with_capacity(thread_count * 2, thread_count * 2); for b in &(*table).entries[..] { b.mutex.lock(); let mut current = b.queue_head.get(); while !current.is_null() { if !(*current).parked_with_timeout.get() && !(*current).deadlock_data.deadlocked.get() { // .resources are waiting for their owner for &resource in &(*(*current).deadlock_data.resources.get()) { graph.add_edge(resource, current as usize, ()); } // owner waits for resource .key graph.add_edge(current as usize, (*current).key.load(Ordering::Relaxed), ()); } current = (*current).next_in_queue.get(); } // SAFETY: We hold the lock here, as required b.mutex.unlock(); } petgraph::algo::is_cyclic_directed(&graph) } #[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] enum WaitGraphNode { Thread(*const ThreadData), Resource(usize), } use self::WaitGraphNode::*; // Contrary to the _fast variant this locks the entries table before looking for cycles. // Returns all detected thread wait cycles. // Note that once a cycle is reported it's never reported again. unsafe fn check_wait_graph_slow() -> Vec<Vec<DeadlockedThread>> { static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::new(); DEADLOCK_DETECTION_LOCK.lock(); let mut table = get_hashtable(); loop { // Lock all buckets in the old table for b in &table.entries[..] { b.mutex.lock(); } // Now check if our table is still the latest one. Another thread could // have grown the hash table between us getting and locking the hash table. let new_table = get_hashtable(); if new_table as *const _ == table as *const _ { break; } // Unlock buckets and try again for b in &table.entries[..] { // SAFETY: We hold the lock here, as required b.mutex.unlock(); } table = new_table; } let thread_count = NUM_THREADS.load(Ordering::Relaxed); let mut graph = DiGraphMap::<WaitGraphNode, ()>::with_capacity(thread_count * 2, thread_count * 2); for b in &table.entries[..] { let mut current = b.queue_head.get(); while !current.is_null() { if !(*current).parked_with_timeout.get() && !(*current).deadlock_data.deadlocked.get() { // .resources are waiting for their owner for &resource in &(*(*current).deadlock_data.resources.get()) { graph.add_edge(Resource(resource), Thread(current), ()); } // owner waits for resource .key graph.add_edge( Thread(current), Resource((*current).key.load(Ordering::Relaxed)), (), ); } current = (*current).next_in_queue.get(); } } for b in &table.entries[..] { // SAFETY: We hold the lock here, as required b.mutex.unlock(); } // find cycles let cycles = graph_cycles(&graph); let mut results = Vec::with_capacity(cycles.len()); for cycle in cycles { let (sender, receiver) = mpsc::channel(); for td in cycle { let bucket = lock_bucket((*td).key.load(Ordering::Relaxed)); (*td).deadlock_data.deadlocked.set(true); *(*td).deadlock_data.backtrace_sender.get() = Some(sender.clone()); let handle = (*td).parker.unpark_lock(); // SAFETY: We hold the lock here, as required bucket.mutex.unlock(); // unpark the deadlocked thread! // on unpark it'll notice the deadlocked flag and report back handle.unpark(); } // make sure to drop our sender before collecting results drop(sender); results.push(receiver.iter().collect()); } DEADLOCK_DETECTION_LOCK.unlock(); results } // normalize a cycle to start with the "smallest" node fn normalize_cycle<T: Ord + Copy + Clone>(input: &[T]) -> Vec<T> { let min_pos = input .iter() .enumerate() .min_by_key(|&(_, &t)| t) .map(|(p, _)| p) .unwrap_or(0); input .iter() .cycle() .skip(min_pos) .take(input.len()) .cloned() .collect() } // returns all thread cycles in the wait graph fn graph_cycles(g: &DiGraphMap<WaitGraphNode, ()>) -> Vec<Vec<*const ThreadData>> { use petgraph::visit::depth_first_search; use petgraph::visit::DfsEvent; use petgraph::visit::NodeIndexable; let mut cycles = HashSet::new(); let mut path = Vec::with_capacity(g.node_bound()); // start from threads to get the correct threads cycle let threads = g .nodes() .filter(|n| if let &Thread(_) = n { true } else { false }); depth_first_search(g, threads, |e| match e { DfsEvent::Discover(Thread(n), _) => path.push(n), DfsEvent::Finish(Thread(_), _) => { path.pop(); } DfsEvent::BackEdge(_, Thread(n)) => { let from = path.iter().rposition(|&i| i == n).unwrap(); cycles.insert(normalize_cycle(&path[from..])); } _ => (), }); cycles.iter().cloned().collect() } } #[cfg(test)] mod tests { use super::{ThreadData, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; use std::{ ptr, sync::{ atomic::{AtomicIsize, AtomicPtr, AtomicUsize, Ordering}, Arc, }, thread, time::Duration, }; /// Calls a closure for every `ThreadData` currently parked on a given key fn for_each(key: usize, mut f: impl FnMut(&ThreadData)) { let bucket = super::lock_bucket(key); let mut current: *const ThreadData = bucket.queue_head.get(); while !current.is_null() { let current_ref = unsafe { &*current }; if current_ref.key.load(Ordering::Relaxed) == key { f(current_ref); } current = current_ref.next_in_queue.get(); } // SAFETY: We hold the lock here, as required unsafe { bucket.mutex.unlock() }; } macro_rules! test { ( $( $name:ident( repeats: $repeats:expr, latches: $latches:expr, delay: $delay:expr, threads: $threads:expr, single_unparks: $single_unparks:expr); )* ) => { $(#[test] fn $name() { let delay = Duration::from_micros($delay); for _ in 0..$repeats { run_parking_test($latches, delay, $threads, $single_unparks); } })* }; } test! { unpark_all_one_fast( repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 0 ); unpark_all_hundred_fast( repeats: 100, latches: 1, delay: 0, threads: 100, single_unparks: 0 ); unpark_one_one_fast( repeats: 1000, latches: 1, delay: 0, threads: 1, single_unparks: 1 ); unpark_one_hundred_fast( repeats: 20, latches: 1, delay: 0, threads: 100, single_unparks: 100 ); unpark_one_fifty_then_fifty_all_fast( repeats: 50, latches: 1, delay: 0, threads: 100, single_unparks: 50 ); unpark_all_one( repeats: 100, latches: 1, delay: 10000, threads: 1, single_unparks: 0 ); unpark_all_hundred( repeats: 100, latches: 1, delay: 10000, threads: 100, single_unparks: 0 ); unpark_one_one( repeats: 10, latches: 1, delay: 10000, threads: 1, single_unparks: 1 ); unpark_one_fifty( repeats: 1, latches: 1, delay: 10000, threads: 50, single_unparks: 50 ); unpark_one_fifty_then_fifty_all( repeats: 2, latches: 1, delay: 10000, threads: 100, single_unparks: 50 ); hundred_unpark_all_one_fast( repeats: 100, latches: 100, delay: 0, threads: 1, single_unparks: 0 ); hundred_unpark_all_one( repeats: 1, latches: 100, delay: 10000, threads: 1, single_unparks: 0 ); } fn run_parking_test( num_latches: usize, delay: Duration, num_threads: usize, num_single_unparks: usize, ) { let mut tests = Vec::with_capacity(num_latches); for _ in 0..num_latches { let test = Arc::new(SingleLatchTest::new(num_threads)); let mut threads = Vec::with_capacity(num_threads); for _ in 0..num_threads { let test = test.clone(); threads.push(thread::spawn(move || test.run())); } tests.push((test, threads)); } for unpark_index in 0..num_single_unparks { thread::sleep(delay); for (test, _) in &tests { test.unpark_one(unpark_index); } } for (test, threads) in tests { test.finish(num_single_unparks); for thread in threads { thread.join().expect("Test thread panic"); } } } struct SingleLatchTest { semaphore: AtomicIsize, num_awake: AtomicUsize, /// Holds the pointer to the last *unprocessed* woken up thread. last_awoken: AtomicPtr<ThreadData>, /// Total number of threads participating in this test. num_threads: usize, } impl SingleLatchTest { pub fn new(num_threads: usize) -> Self { Self { // This implements a fair (FIFO) semaphore, and it starts out unavailable. semaphore: AtomicIsize::new(0), num_awake: AtomicUsize::new(0), last_awoken: AtomicPtr::new(ptr::null_mut()), num_threads, } } pub fn run(&self) { // Get one slot from the semaphore self.down(); // Report back to the test verification code that this thread woke up let this_thread_ptr = super::with_thread_data(|t| t as *const _ as *mut _); self.last_awoken.store(this_thread_ptr, Ordering::SeqCst); self.num_awake.fetch_add(1, Ordering::SeqCst); } pub fn unpark_one(&self, single_unpark_index: usize) { // last_awoken should be null at all times except between self.up() and at the bottom // of this method where it's reset to null again assert!(self.last_awoken.load(Ordering::SeqCst).is_null()); let mut queue: Vec<*mut ThreadData> = Vec::with_capacity(self.num_threads); for_each(self.semaphore_addr(), |thread_data| { queue.push(thread_data as *const _ as *mut _); }); assert!(queue.len() <= self.num_threads - single_unpark_index); let num_awake_before_up = self.num_awake.load(Ordering::SeqCst); self.up(); // Wait for a parked thread to wake up and update num_awake + last_awoken. while self.num_awake.load(Ordering::SeqCst) != num_awake_before_up + 1 { thread::yield_now(); } // At this point the other thread should have set last_awoken inside the run() method let last_awoken = self.last_awoken.load(Ordering::SeqCst); assert!(!last_awoken.is_null()); if !queue.is_empty() && queue[0] != last_awoken { panic!( "Woke up wrong thread:\n\tqueue: {:?}\n\tlast awoken: {:?}", queue, last_awoken ); } self.last_awoken.store(ptr::null_mut(), Ordering::SeqCst); } pub fn finish(&self, num_single_unparks: usize) { // The amount of threads not unparked via unpark_one let mut num_threads_left = self.num_threads.checked_sub(num_single_unparks).unwrap(); // Wake remaining threads up with unpark_all. Has to be in a loop, because there might // still be threads that has not yet parked. while num_threads_left > 0 { let mut num_waiting_on_address = 0; for_each(self.semaphore_addr(), |_thread_data| { num_waiting_on_address += 1; }); assert!(num_waiting_on_address <= num_threads_left); let num_awake_before_unpark = self.num_awake.load(Ordering::SeqCst); let num_unparked = unsafe { super::unpark_all(self.semaphore_addr(), DEFAULT_UNPARK_TOKEN) }; assert!(num_unparked >= num_waiting_on_address); assert!(num_unparked <= num_threads_left); // Wait for all unparked threads to wake up and update num_awake + last_awoken. while self.num_awake.load(Ordering::SeqCst) != num_awake_before_unpark + num_unparked { thread::yield_now() } num_threads_left = num_threads_left.checked_sub(num_unparked).unwrap(); } // By now, all threads should have been woken up assert_eq!(self.num_awake.load(Ordering::SeqCst), self.num_threads); // Make sure no thread is parked on our semaphore address let mut num_waiting_on_address = 0; for_each(self.semaphore_addr(), |_thread_data| { num_waiting_on_address += 1; }); assert_eq!(num_waiting_on_address, 0); } pub fn down(&self) { let old_semaphore_value = self.semaphore.fetch_sub(1, Ordering::SeqCst); if old_semaphore_value > 0 { // We acquired the semaphore. Done. return; } // We need to wait. let validate = || true; let before_sleep = || {}; let timed_out = |_, _| {}; unsafe { super::park( self.semaphore_addr(), validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, None, ); } } pub fn up(&self) { let old_semaphore_value = self.semaphore.fetch_add(1, Ordering::SeqCst); // Check if anyone was waiting on the semaphore. If they were, then pass ownership to them. if old_semaphore_value < 0 { // We need to continue until we have actually unparked someone. It might be that // the thread we want to pass ownership to has decremented the semaphore counter, // but not yet parked. loop { match unsafe { super::unpark_one(self.semaphore_addr(), |_| DEFAULT_UNPARK_TOKEN) .unparked_threads } { 1 => break, 0 => (), i => panic!("Should not wake up {} threads", i), } } } } fn semaphore_addr(&self) -> usize { &self.semaphore as *const _ as usize } } } ```
/content/code_sandbox/core/src/parking_lot.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
13,685
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::{ ffi, mem::{self, MaybeUninit}, ptr, }; use std::sync::atomic::{AtomicUsize, Ordering}; use std::time::Instant; const STATE_UNPARKED: usize = 0; const STATE_PARKED: usize = 1; const STATE_TIMED_OUT: usize = 2; use super::bindings::*; #[allow(non_snake_case)] pub struct KeyedEvent { handle: HANDLE, NtReleaseKeyedEvent: extern "system" fn( EventHandle: HANDLE, Key: *mut ffi::c_void, Alertable: BOOLEAN, Timeout: *mut i64, ) -> NTSTATUS, NtWaitForKeyedEvent: extern "system" fn( EventHandle: HANDLE, Key: *mut ffi::c_void, Alertable: BOOLEAN, Timeout: *mut i64, ) -> NTSTATUS, } impl KeyedEvent { #[inline] unsafe fn wait_for(&self, key: *mut ffi::c_void, timeout: *mut i64) -> NTSTATUS { (self.NtWaitForKeyedEvent)(self.handle, key, false.into(), timeout) } #[inline] unsafe fn release(&self, key: *mut ffi::c_void) -> NTSTATUS { (self.NtReleaseKeyedEvent)(self.handle, key, false.into(), ptr::null_mut()) } #[allow(non_snake_case)] pub fn create() -> Option<KeyedEvent> { let ntdll = unsafe { GetModuleHandleA(b"ntdll.dll\0".as_ptr()) }; if ntdll == 0 { return None; } let NtCreateKeyedEvent = unsafe { GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr())? }; let NtReleaseKeyedEvent = unsafe { GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr())? }; let NtWaitForKeyedEvent = unsafe { GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr())? }; let NtCreateKeyedEvent: extern "system" fn( KeyedEventHandle: *mut HANDLE, DesiredAccess: u32, ObjectAttributes: *mut ffi::c_void, Flags: u32, ) -> NTSTATUS = unsafe { mem::transmute(NtCreateKeyedEvent) }; let mut handle = MaybeUninit::uninit(); let status = NtCreateKeyedEvent( handle.as_mut_ptr(), GENERIC_READ | GENERIC_WRITE, ptr::null_mut(), 0, ); if status != STATUS_SUCCESS { return None; } Some(KeyedEvent { handle: unsafe { handle.assume_init() }, NtReleaseKeyedEvent: unsafe { mem::transmute(NtReleaseKeyedEvent) }, NtWaitForKeyedEvent: unsafe { mem::transmute(NtWaitForKeyedEvent) }, }) } #[inline] pub fn prepare_park(&'static self, key: &AtomicUsize) { key.store(STATE_PARKED, Ordering::Relaxed); } #[inline] pub fn timed_out(&'static self, key: &AtomicUsize) -> bool { key.load(Ordering::Relaxed) == STATE_TIMED_OUT } #[inline] pub unsafe fn park(&'static self, key: &AtomicUsize) { let status = self.wait_for(key as *const _ as *mut ffi::c_void, ptr::null_mut()); debug_assert_eq!(status, STATUS_SUCCESS); } #[inline] pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool { let now = Instant::now(); if timeout <= now { // If another thread unparked us, we need to call // NtWaitForKeyedEvent otherwise that thread will stay stuck at // NtReleaseKeyedEvent. if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { self.park(key); return true; } return false; } // NT uses a timeout in units of 100ns. We use a negative value to // indicate a relative timeout based on a monotonic clock. let diff = timeout - now; let value = (diff.as_secs() as i64) .checked_mul(-10000000) .and_then(|x| x.checked_sub((diff.subsec_nanos() as i64 + 99) / 100)); let mut nt_timeout = match value { Some(x) => x, None => { // Timeout overflowed, just sleep indefinitely self.park(key); return true; } }; let status = self.wait_for(key as *const _ as *mut ffi::c_void, &mut nt_timeout); if status == STATUS_SUCCESS { return true; } debug_assert_eq!(status, STATUS_TIMEOUT); // If another thread unparked us, we need to call NtWaitForKeyedEvent // otherwise that thread will stay stuck at NtReleaseKeyedEvent. if key.swap(STATE_TIMED_OUT, Ordering::Relaxed) == STATE_UNPARKED { self.park(key); return true; } false } #[inline] pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle { // If the state was STATE_PARKED then we need to wake up the thread if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED { UnparkHandle { key: key, keyed_event: self, } } else { UnparkHandle { key: ptr::null(), keyed_event: self, } } } } impl Drop for KeyedEvent { #[inline] fn drop(&mut self) { unsafe { let ok = CloseHandle(self.handle); debug_assert_eq!(ok, true.into()); } } } // Handle for a thread that is about to be unparked. We need to mark the thread // as unparked while holding the queue lock, but we delay the actual unparking // until after the queue lock is released. pub struct UnparkHandle { key: *const AtomicUsize, keyed_event: &'static KeyedEvent, } impl UnparkHandle { // Wakes up the parked thread. This should be called after the queue lock is // released to avoid blocking the queue for too long. #[inline] pub unsafe fn unpark(self) { if !self.key.is_null() { let status = self.keyed_event.release(self.key as *mut ffi::c_void); debug_assert_eq!(status, STATUS_SUCCESS); } } } ```
/content/code_sandbox/core/src/thread_parker/windows/keyed_event.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
1,544
```rust //! Manual bindings to the win32 API to avoid dependencies on windows-sys or winapi //! as these bindings will **never** change and `parking_lot_core` is a foundational //! dependency for the Rust ecosystem, so the dependencies used by it have an //! outsize affect pub const INFINITE: u32 = 4294967295; pub const ERROR_TIMEOUT: u32 = 1460; pub const GENERIC_READ: u32 = 2147483648; pub const GENERIC_WRITE: u32 = 1073741824; pub const STATUS_SUCCESS: i32 = 0; pub const STATUS_TIMEOUT: i32 = 258; pub type HANDLE = isize; pub type HINSTANCE = isize; pub type BOOL = i32; pub type BOOLEAN = u8; pub type NTSTATUS = i32; pub type FARPROC = Option<unsafe extern "system" fn() -> isize>; pub type WaitOnAddress = unsafe extern "system" fn( Address: *const std::ffi::c_void, CompareAddress: *const std::ffi::c_void, AddressSize: usize, dwMilliseconds: u32, ) -> BOOL; pub type WakeByAddressSingle = unsafe extern "system" fn(Address: *const std::ffi::c_void); windows_targets::link!("kernel32.dll" "system" fn GetLastError() -> u32); windows_targets::link!("kernel32.dll" "system" fn CloseHandle(hObject: HANDLE) -> BOOL); windows_targets::link!("kernel32.dll" "system" fn GetModuleHandleA(lpModuleName: *const u8) -> HINSTANCE); windows_targets::link!("kernel32.dll" "system" fn GetProcAddress(hModule: HINSTANCE, lpProcName: *const u8) -> FARPROC); windows_targets::link!("kernel32.dll" "system" fn Sleep(dwMilliseconds: u32) -> ()); ```
/content/code_sandbox/core/src/thread_parker/windows/bindings.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
394
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::{env, process}; #[derive(Copy, Clone)] pub struct ArgRange { current: usize, limit: usize, step: usize, } impl ArgRange { pub fn is_single(&self) -> bool { self.current.saturating_add(self.step) > self.limit } } impl Iterator for ArgRange { type Item = usize; fn next(&mut self) -> Option<usize> { if self.current <= self.limit { let result = self.current; self.current = self.current.saturating_add(self.step); Some(result) } else { None } } } fn print_usage(names: &[&str], error_msg: Option<String>) -> ! { if let Some(error) = error_msg { println!("{}", error); } println!("Usage: {} {}", env::args().next().unwrap(), names.join(" ")); println!( "Each argument can be a single value or a range in the form start:end or \ start:end:step" ); process::exit(1); } fn parse_num(names: &[&str], name: &str, value: &str) -> usize { value.parse().unwrap_or_else(|_| { print_usage( names, Some(format!("Invalid value for {}: {}", name, value)), ) }) } fn parse_one(names: &[&str], name: &str, value: &str) -> ArgRange { let components = value.split(':').collect::<Vec<_>>(); match components.len() { 1 => { let val = parse_num(names, name, components[0]); ArgRange { current: val, limit: val, step: 1, } } 2 => { let start = parse_num(names, name, components[0]); let end = parse_num(names, name, components[1]); if start > end { print_usage( names, Some(format!("Invalid range for {}: {}", name, value)), ); } ArgRange { current: start, limit: end, step: 1, } } 3 => { let start = parse_num(names, name, components[0]); let end = parse_num(names, name, components[1]); let step = parse_num(names, name, components[2]); if start > end { print_usage( names, Some(format!("Invalid range for {}: {}", name, value)), ); } ArgRange { current: start, limit: end, step: step, } } _ => print_usage( names, Some(format!("Invalid value for {}: {}", name, value)), ), } } pub fn parse(names: &[&str]) -> Vec<ArgRange> { let args = env::args().skip(1).collect::<Vec<_>>(); if args.is_empty() { print_usage(names, None); } if args.len() != names.len() { print_usage( names, Some(format!( "Invalid number of arguments (expected {}, got {})", names.len(), args.len() )), ); } let mut result = vec![]; for (name, value) in names.iter().zip(args) { result.push(parse_one(names, name, &value)); } result } ```
/content/code_sandbox/benchmark/src/args.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
763
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. mod args; use crate::args::ArgRange; #[cfg(any(windows, unix))] use std::cell::UnsafeCell; use std::{ sync::{ atomic::{AtomicBool, Ordering}, Arc, Barrier, }, thread, time::Duration, }; trait RwLock<T> { fn new(v: T) -> Self; fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R; fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R; fn name() -> &'static str; } impl<T> RwLock<T> for std::sync::RwLock<T> { fn new(v: T) -> Self { Self::new(v) } fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R, { f(&*self.read().unwrap()) } fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { f(&mut *self.write().unwrap()) } fn name() -> &'static str { "std::sync::RwLock" } } impl<T> RwLock<T> for parking_lot::RwLock<T> { fn new(v: T) -> Self { Self::new(v) } fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R, { f(&*self.read()) } fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { f(&mut *self.write()) } fn name() -> &'static str { "parking_lot::RwLock" } } impl<T: Copy> RwLock<T> for seqlock::SeqLock<T> { fn new(v: T) -> Self { Self::new(v) } fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R, { f(&self.read()) } fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { f(&mut *self.lock_write()) } fn name() -> &'static str { "seqlock::SeqLock" } } #[cfg(not(windows))] type SrwLock<T> = std::sync::RwLock<T>; #[cfg(windows)] use winapi::um::synchapi; #[cfg(windows)] struct SrwLock<T>(UnsafeCell<T>, UnsafeCell<synchapi::SRWLOCK>); #[cfg(windows)] unsafe impl<T> Sync for SrwLock<T> {} #[cfg(windows)] unsafe impl<T: Send> Send for SrwLock<T> {} #[cfg(windows)] impl<T> RwLock<T> for SrwLock<T> { fn new(v: T) -> Self { let mut h: synchapi::SRWLOCK = synchapi::SRWLOCK { Ptr: std::ptr::null_mut() }; unsafe { synchapi::InitializeSRWLock(&mut h); } SrwLock( UnsafeCell::new(v), UnsafeCell::new(h), ) } fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R, { unsafe { synchapi::AcquireSRWLockShared(self.1.get()); let res = f(&*self.0.get()); synchapi::ReleaseSRWLockShared(self.1.get()); res } } fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { unsafe { synchapi::AcquireSRWLockExclusive(self.1.get()); let res = f(&mut *self.0.get()); synchapi::ReleaseSRWLockExclusive(self.1.get()); res } } fn name() -> &'static str { "winapi_srwlock" } } #[cfg(not(unix))] type PthreadRwLock<T> = std::sync::RwLock<T>; #[cfg(unix)] struct PthreadRwLock<T>(UnsafeCell<T>, UnsafeCell<libc::pthread_rwlock_t>); #[cfg(unix)] unsafe impl<T> Sync for PthreadRwLock<T> {} #[cfg(unix)] impl<T> RwLock<T> for PthreadRwLock<T> { fn new(v: T) -> Self { PthreadRwLock( UnsafeCell::new(v), UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER), ) } fn read<F, R>(&self, f: F) -> R where F: FnOnce(&T) -> R, { unsafe { libc::pthread_rwlock_wrlock(self.1.get()); let res = f(&*self.0.get()); libc::pthread_rwlock_unlock(self.1.get()); res } } fn write<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { unsafe { libc::pthread_rwlock_wrlock(self.1.get()); let res = f(&mut *self.0.get()); libc::pthread_rwlock_unlock(self.1.get()); res } } fn name() -> &'static str { "pthread_rwlock_t" } } #[cfg(unix)] impl<T> Drop for PthreadRwLock<T> { fn drop(&mut self) { unsafe { libc::pthread_rwlock_destroy(self.1.get()); } } } fn run_benchmark<M: RwLock<f64> + Send + Sync + 'static>( num_writer_threads: usize, num_reader_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, ) -> (Vec<usize>, Vec<usize>) { let lock = Arc::new(([0u8; 300], M::new(0.0), [0u8; 300])); let keep_going = Arc::new(AtomicBool::new(true)); let barrier = Arc::new(Barrier::new(num_reader_threads + num_writer_threads)); let mut writers = vec![]; let mut readers = vec![]; for _ in 0..num_writer_threads { let barrier = barrier.clone(); let lock = lock.clone(); let keep_going = keep_going.clone(); writers.push(thread::spawn(move || { let mut local_value = 0.0; let mut value = 0.0; let mut iterations = 0usize; barrier.wait(); while keep_going.load(Ordering::Relaxed) { lock.1.write(|shared_value| { for _ in 0..work_per_critical_section { *shared_value += value; *shared_value *= 1.01; value = *shared_value; } }); for _ in 0..work_between_critical_sections { local_value += value; local_value *= 1.01; value = local_value; } iterations += 1; } (iterations, value) })); } for _ in 0..num_reader_threads { let barrier = barrier.clone(); let lock = lock.clone(); let keep_going = keep_going.clone(); readers.push(thread::spawn(move || { let mut local_value = 0.0; let mut value = 0.0; let mut iterations = 0usize; barrier.wait(); while keep_going.load(Ordering::Relaxed) { lock.1.read(|shared_value| { for _ in 0..work_per_critical_section { local_value += value; local_value *= *shared_value; value = local_value; } }); for _ in 0..work_between_critical_sections { local_value += value; local_value *= 1.01; value = local_value; } iterations += 1; } (iterations, value) })); } thread::sleep(Duration::new(seconds_per_test as u64, 0)); keep_going.store(false, Ordering::Relaxed); let run_writers = writers .into_iter() .map(|x| x.join().unwrap().0) .collect::<Vec<usize>>(); let run_readers = readers .into_iter() .map(|x| x.join().unwrap().0) .collect::<Vec<usize>>(); (run_writers, run_readers) } fn run_benchmark_iterations<M: RwLock<f64> + Send + Sync + 'static>( num_writer_threads: usize, num_reader_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, test_iterations: usize, ) { let mut writers = vec![]; let mut readers = vec![]; for _ in 0..test_iterations { let (run_writers, run_readers) = run_benchmark::<M>( num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, ); writers.extend_from_slice(&run_writers); readers.extend_from_slice(&run_readers); } let total_writers = writers.iter().fold(0f64, |a, b| a + *b as f64) / test_iterations as f64; let total_readers = readers.iter().fold(0f64, |a, b| a + *b as f64) / test_iterations as f64; println!( "{:20} - [write] {:10.3} kHz [read] {:10.3} kHz", M::name(), total_writers as f64 / seconds_per_test as f64 / 1000.0, total_readers as f64 / seconds_per_test as f64 / 1000.0 ); } fn run_all( args: &[ArgRange], first: &mut bool, num_writer_threads: usize, num_reader_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, test_iterations: usize, ) { if num_writer_threads == 0 && num_reader_threads == 0 { return; } if *first || !args[0].is_single() || !args[1].is_single() { println!( "- Running with {} writer threads and {} reader threads", num_writer_threads, num_reader_threads ); } if *first || !args[2].is_single() || !args[3].is_single() { println!( "- {} iterations inside lock, {} iterations outside lock", work_per_critical_section, work_between_critical_sections ); } if *first || !args[4].is_single() { println!("- {} seconds per test", seconds_per_test); } *first = false; run_benchmark_iterations::<parking_lot::RwLock<f64>>( num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); run_benchmark_iterations::<seqlock::SeqLock<f64>>( num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); if cfg!(windows) { run_benchmark_iterations::<std::sync::RwLock<f64>>( num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } if cfg!(unix) { run_benchmark_iterations::<PthreadRwLock<f64>>( num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } } fn main() { let args = args::parse(&[ "numWriterThreads", "numReaderThreads", "workPerCriticalSection", "workBetweenCriticalSections", "secondsPerTest", "testIterations", ]); let mut first = true; for num_writer_threads in args[0] { for num_reader_threads in args[1] { for work_per_critical_section in args[2] { for work_between_critical_sections in args[3] { for seconds_per_test in args[4] { for test_iterations in args[5] { run_all( &args, &mut first, num_writer_threads, num_reader_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } } } } } } } ```
/content/code_sandbox/benchmark/src/rwlock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,923
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. mod args; use crate::args::ArgRange; #[cfg(any(windows, unix))] use std::cell::UnsafeCell; use std::{ sync::{ atomic::{AtomicBool, Ordering}, Arc, Barrier, }, thread, time::Duration, }; trait Mutex<T> { fn new(v: T) -> Self; fn lock<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R; fn name() -> &'static str; } impl<T> Mutex<T> for std::sync::Mutex<T> { fn new(v: T) -> Self { Self::new(v) } fn lock<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { f(&mut *self.lock().unwrap()) } fn name() -> &'static str { "std::sync::Mutex" } } impl<T> Mutex<T> for parking_lot::Mutex<T> { fn new(v: T) -> Self { Self::new(v) } fn lock<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { f(&mut *self.lock()) } fn name() -> &'static str { "parking_lot::Mutex" } } #[cfg(not(windows))] type SrwLock<T> = std::sync::Mutex<T>; #[cfg(windows)] use winapi::um::synchapi; #[cfg(windows)] struct SrwLock<T>(UnsafeCell<T>, UnsafeCell<synchapi::SRWLOCK>); #[cfg(windows)] unsafe impl<T> Sync for SrwLock<T> {} #[cfg(windows)] unsafe impl<T: Send> Send for SrwLock<T> {} #[cfg(windows)] impl<T> Mutex<T> for SrwLock<T> { fn new(v: T) -> Self { let mut h: synchapi::SRWLOCK = synchapi::SRWLOCK { Ptr: std::ptr::null_mut() }; unsafe { synchapi::InitializeSRWLock(&mut h); } SrwLock( UnsafeCell::new(v), UnsafeCell::new(h), ) } fn lock<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { unsafe { synchapi::AcquireSRWLockExclusive(self.1.get()); let res = f(&mut *self.0.get()); synchapi::ReleaseSRWLockExclusive(self.1.get()); res } } fn name() -> &'static str { "winapi_srwlock" } } #[cfg(not(unix))] type PthreadMutex<T> = std::sync::Mutex<T>; #[cfg(unix)] struct PthreadMutex<T>(UnsafeCell<T>, UnsafeCell<libc::pthread_mutex_t>); #[cfg(unix)] unsafe impl<T> Sync for PthreadMutex<T> {} #[cfg(unix)] impl<T> Mutex<T> for PthreadMutex<T> { fn new(v: T) -> Self { PthreadMutex( UnsafeCell::new(v), UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER), ) } fn lock<F, R>(&self, f: F) -> R where F: FnOnce(&mut T) -> R, { unsafe { libc::pthread_mutex_lock(self.1.get()); let res = f(&mut *self.0.get()); libc::pthread_mutex_unlock(self.1.get()); res } } fn name() -> &'static str { "pthread_mutex_t" } } #[cfg(unix)] impl<T> Drop for PthreadMutex<T> { fn drop(&mut self) { unsafe { libc::pthread_mutex_destroy(self.1.get()); } } } fn run_benchmark<M: Mutex<f64> + Send + Sync + 'static>( num_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, ) -> Vec<usize> { let lock = Arc::new(([0u8; 300], M::new(0.0), [0u8; 300])); let keep_going = Arc::new(AtomicBool::new(true)); let barrier = Arc::new(Barrier::new(num_threads)); let mut threads = vec![]; for _ in 0..num_threads { let barrier = barrier.clone(); let lock = lock.clone(); let keep_going = keep_going.clone(); threads.push(thread::spawn(move || { let mut local_value = 0.0; let mut value = 0.0; let mut iterations = 0usize; barrier.wait(); while keep_going.load(Ordering::Relaxed) { lock.1.lock(|shared_value| { for _ in 0..work_per_critical_section { *shared_value += value; *shared_value *= 1.01; value = *shared_value; } }); for _ in 0..work_between_critical_sections { local_value += value; local_value *= 1.01; value = local_value; } iterations += 1; } (iterations, value) })); } thread::sleep(Duration::from_secs(seconds_per_test as u64)); keep_going.store(false, Ordering::Relaxed); threads.into_iter().map(|x| x.join().unwrap().0).collect() } fn run_benchmark_iterations<M: Mutex<f64> + Send + Sync + 'static>( num_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, test_iterations: usize, ) { let mut data = vec![]; for _ in 0..test_iterations { let run_data = run_benchmark::<M>( num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, ); data.extend_from_slice(&run_data); } let average = data.iter().fold(0f64, |a, b| a + *b as f64) / data.len() as f64; let variance = data .iter() .fold(0f64, |a, b| a + ((*b as f64 - average).powi(2))) / data.len() as f64; data.sort(); let k_hz = 1.0 / seconds_per_test as f64 / 1000.0; println!( "{:20} | {:10.3} kHz | {:10.3} kHz | {:10.3} kHz", M::name(), average * k_hz, data[data.len() / 2] as f64 * k_hz, variance.sqrt() * k_hz ); } fn run_all( args: &[ArgRange], first: &mut bool, num_threads: usize, work_per_critical_section: usize, work_between_critical_sections: usize, seconds_per_test: usize, test_iterations: usize, ) { if num_threads == 0 { return; } if *first || !args[0].is_single() { println!("- Running with {} threads", num_threads); } if *first || !args[1].is_single() || !args[2].is_single() { println!( "- {} iterations inside lock, {} iterations outside lock", work_per_critical_section, work_between_critical_sections ); } if *first || !args[3].is_single() { println!("- {} seconds per test", seconds_per_test); } *first = false; println!( "{:^20} | {:^14} | {:^14} | {:^14}", "name", "average", "median", "std.dev." ); run_benchmark_iterations::<parking_lot::Mutex<f64>>( num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); run_benchmark_iterations::<std::sync::Mutex<f64>>( num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); if cfg!(windows) { run_benchmark_iterations::<SrwLock<f64>>( num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } if cfg!(unix) { run_benchmark_iterations::<PthreadMutex<f64>>( num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } } fn main() { let args = args::parse(&[ "numThreads", "workPerCriticalSection", "workBetweenCriticalSections", "secondsPerTest", "testIterations", ]); let mut first = true; for num_threads in args[0] { for work_per_critical_section in args[1] { for work_between_critical_sections in args[2] { for seconds_per_test in args[3] { for test_iterations in args[4] { run_all( &args, &mut first, num_threads, work_per_critical_section, work_between_critical_sections, seconds_per_test, test_iterations, ); } } } } } } ```
/content/code_sandbox/benchmark/src/mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,126
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::raw_mutex::RawMutex; use lock_api::RawMutexFair; /// Raw fair mutex type backed by the parking lot. pub struct RawFairMutex(RawMutex); unsafe impl lock_api::RawMutex for RawFairMutex { const INIT: Self = RawFairMutex(<RawMutex as lock_api::RawMutex>::INIT); type GuardMarker = <RawMutex as lock_api::RawMutex>::GuardMarker; #[inline] fn lock(&self) { self.0.lock() } #[inline] fn try_lock(&self) -> bool { self.0.try_lock() } #[inline] unsafe fn unlock(&self) { self.unlock_fair() } #[inline] fn is_locked(&self) -> bool { self.0.is_locked() } } unsafe impl lock_api::RawMutexFair for RawFairMutex { #[inline] unsafe fn unlock_fair(&self) { self.0.unlock_fair() } #[inline] unsafe fn bump(&self) { self.0.bump() } } unsafe impl lock_api::RawMutexTimed for RawFairMutex { type Duration = <RawMutex as lock_api::RawMutexTimed>::Duration; type Instant = <RawMutex as lock_api::RawMutexTimed>::Instant; #[inline] fn try_lock_until(&self, timeout: Self::Instant) -> bool { self.0.try_lock_until(timeout) } #[inline] fn try_lock_for(&self, timeout: Self::Duration) -> bool { self.0.try_lock_for(timeout) } } ```
/content/code_sandbox/src/raw_fair_mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
391
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::sync::atomic::AtomicUsize; // Extension trait to add lock elision primitives to atomic types pub trait AtomicElisionExt { type IntType; // Perform a compare_exchange and start a transaction fn elision_compare_exchange_acquire( &self, current: Self::IntType, new: Self::IntType, ) -> Result<Self::IntType, Self::IntType>; // Perform a fetch_sub and end a transaction fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType; } // Indicates whether the target architecture supports lock elision #[inline] pub fn have_elision() -> bool { cfg!(all( feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64"), )) } // This implementation is never actually called because it is guarded by // have_elision(). #[cfg(not(all( feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64") )))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> { unreachable!(); } #[inline] fn elision_fetch_sub_release(&self, _: usize) -> usize { unreachable!(); } } #[cfg(all( feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64") ))] impl AtomicElisionExt for AtomicUsize { type IntType = usize; #[inline] fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> { unsafe { use core::arch::asm; let prev: usize; #[cfg(target_pointer_width = "32")] asm!( "xacquire", "lock", "cmpxchg [{:e}], {:e}", in(reg) self, in(reg) new, inout("eax") current => prev, ); #[cfg(target_pointer_width = "64")] asm!( "xacquire", "lock", "cmpxchg [{}], {}", in(reg) self, in(reg) new, inout("rax") current => prev, ); if prev == current { Ok(prev) } else { Err(prev) } } } #[inline] fn elision_fetch_sub_release(&self, val: usize) -> usize { unsafe { use core::arch::asm; let prev: usize; #[cfg(target_pointer_width = "32")] asm!( "xrelease", "lock", "xadd [{:e}], {:e}", in(reg) self, inout(reg) val.wrapping_neg() => prev, ); #[cfg(target_pointer_width = "64")] asm!( "xrelease", "lock", "xadd [{}], {}", in(reg) self, inout(reg) val.wrapping_neg() => prev, ); prev } } } ```
/content/code_sandbox/src/elision.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
735
```rust //! \[Experimental\] Deadlock detection //! //! This feature is optional and can be enabled via the `deadlock_detection` feature flag. //! //! # Example //! //! ``` //! #[cfg(feature = "deadlock_detection")] //! { // only for #[cfg] //! use std::thread; //! use std::time::Duration; //! use parking_lot::deadlock; //! //! // Create a background thread which checks for deadlocks every 10s //! thread::spawn(move || { //! loop { //! thread::sleep(Duration::from_secs(10)); //! let deadlocks = deadlock::check_deadlock(); //! if deadlocks.is_empty() { //! continue; //! } //! //! println!("{} deadlocks detected", deadlocks.len()); //! for (i, threads) in deadlocks.iter().enumerate() { //! println!("Deadlock #{}", i); //! for t in threads { //! println!("Thread Id {:#?}", t.thread_id()); //! println!("{:#?}", t.backtrace()); //! } //! } //! } //! }); //! } // only for #[cfg] //! ``` #[cfg(feature = "deadlock_detection")] pub use parking_lot_core::deadlock::check_deadlock; pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource}; #[cfg(test)] #[cfg(feature = "deadlock_detection")] mod tests { use crate::{Mutex, ReentrantMutex, RwLock}; use std::sync::{Arc, Barrier}; use std::thread::{self, sleep}; use std::time::Duration; // We need to serialize these tests since deadlock detection uses global state static DEADLOCK_DETECTION_LOCK: Mutex<()> = crate::const_mutex(()); fn check_deadlock() -> bool { use parking_lot_core::deadlock::check_deadlock; !check_deadlock().is_empty() } #[test] fn test_mutex_deadlock() { let _guard = DEADLOCK_DETECTION_LOCK.lock(); let m1: Arc<Mutex<()>> = Default::default(); let m2: Arc<Mutex<()>> = Default::default(); let m3: Arc<Mutex<()>> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); b1.wait(); let _ = m2_.lock(); }); let _t2 = thread::spawn(move || { let _g = m2.lock(); b2.wait(); let _ = m3_.lock(); }); let _t3 = thread::spawn(move || { let _g = m3.lock(); b3.wait(); let _ = m1_.lock(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_mutex_deadlock_reentrant() { let _guard = DEADLOCK_DETECTION_LOCK.lock(); let m1: Arc<Mutex<()>> = Default::default(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); let _ = m1.lock(); }); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_remutex_deadlock() { let _guard = DEADLOCK_DETECTION_LOCK.lock(); let m1: Arc<ReentrantMutex<()>> = Default::default(); let m2: Arc<ReentrantMutex<()>> = Default::default(); let m3: Arc<ReentrantMutex<()>> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.lock(); let _g = m1.lock(); b1.wait(); let _ = m2_.lock(); }); let _t2 = thread::spawn(move || { let _g = m2.lock(); let _g = m2.lock(); b2.wait(); let _ = m3_.lock(); }); let _t3 = thread::spawn(move || { let _g = m3.lock(); let _g = m3.lock(); b3.wait(); let _ = m1_.lock(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[test] fn test_rwlock_deadlock() { let _guard = DEADLOCK_DETECTION_LOCK.lock(); let m1: Arc<RwLock<()>> = Default::default(); let m2: Arc<RwLock<()>> = Default::default(); let m3: Arc<RwLock<()>> = Default::default(); let b = Arc::new(Barrier::new(4)); let m1_ = m1.clone(); let m2_ = m2.clone(); let m3_ = m3.clone(); let b1 = b.clone(); let b2 = b.clone(); let b3 = b.clone(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.read(); b1.wait(); let _g = m2_.write(); }); let _t2 = thread::spawn(move || { let _g = m2.read(); b2.wait(); let _g = m3_.write(); }); let _t3 = thread::spawn(move || { let _g = m3.read(); b3.wait(); let _ = m1_.write(); }); assert!(!check_deadlock()); b.wait(); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } #[cfg(rwlock_deadlock_detection_not_supported)] #[test] fn test_rwlock_deadlock_reentrant() { let _guard = DEADLOCK_DETECTION_LOCK.lock(); let m1: Arc<RwLock<()>> = Default::default(); assert!(!check_deadlock()); let _t1 = thread::spawn(move || { let _g = m1.read(); let _ = m1.write(); }); sleep(Duration::from_millis(50)); assert!(check_deadlock()); assert!(!check_deadlock()); } } ```
/content/code_sandbox/src/deadlock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
1,564
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use std::time::{Duration, Instant}; // Option::unchecked_unwrap pub trait UncheckedOptionExt<T> { unsafe fn unchecked_unwrap(self) -> T; } impl<T> UncheckedOptionExt<T> for Option<T> { #[inline] unsafe fn unchecked_unwrap(self) -> T { match self { Some(x) => x, None => unreachable(), } } } // hint::unreachable_unchecked() in release mode #[inline] unsafe fn unreachable() -> ! { if cfg!(debug_assertions) { unreachable!(); } else { core::hint::unreachable_unchecked() } } #[inline] pub fn to_deadline(timeout: Duration) -> Option<Instant> { Instant::now().checked_add(timeout) } ```
/content/code_sandbox/src/util.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
210
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::raw_fair_mutex::RawFairMutex; /// A mutual exclusive primitive that is always fair, useful for protecting shared data /// /// This mutex will block threads waiting for the lock to become available. The /// mutex can be statically initialized or created by the `new` /// constructor. Each mutex has a type parameter which represents the data that /// it is protecting. The data can only be accessed through the RAII guards /// returned from `lock` and `try_lock`, which guarantees that the data is only /// ever accessed when the mutex is locked. /// /// The regular mutex provided by `parking_lot` uses eventual fairness /// (after some time it will default to the fair algorithm), but eventual /// fairness does not provide the same guarantees an always fair method would. /// Fair mutexes are generally slower, but sometimes needed. /// /// In a fair mutex the waiters form a queue, and the lock is always granted to /// the next requester in the queue, in first-in first-out order. This ensures /// that one thread cannot starve others by quickly re-acquiring the lock after /// releasing it. /// /// A fair mutex may not be interesting if threads have different priorities (this is known as /// priority inversion). /// /// # Differences from the standard library `Mutex` /// /// - No poisoning, the lock is released normally on panic. /// - Only requires 1 byte of space, whereas the standard library boxes the /// `FairMutex` due to platform limitations. /// - Can be statically constructed. /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// - Efficient handling of micro-contention using adaptive spinning. /// - Allows raw locking & unlocking without a guard. /// /// # Examples /// /// ``` /// use parking_lot::FairMutex; /// use std::sync::{Arc, mpsc::channel}; /// use std::thread; /// /// const N: usize = 10; /// /// // Spawn a few threads to increment a shared variable (non-atomically), and /// // let the main thread know once all increments are done. /// // /// // Here we're using an Arc to share memory among threads, and the data inside /// // the Arc is protected with a mutex. /// let data = Arc::new(FairMutex::new(0)); /// /// let (tx, rx) = channel(); /// for _ in 0..10 { /// let (data, tx) = (Arc::clone(&data), tx.clone()); /// thread::spawn(move || { /// // The shared state can only be accessed once the lock is held. /// // Our non-atomic increment is safe because we're the only thread /// // which can access the shared state when the lock is held. /// let mut data = data.lock(); /// *data += 1; /// if *data == N { /// tx.send(()).unwrap(); /// } /// // the lock is unlocked here when `data` goes out of scope. /// }); /// } /// /// rx.recv().unwrap(); /// ``` pub type FairMutex<T> = lock_api::Mutex<RawFairMutex, T>; /// Creates a new fair mutex in an unlocked state ready for use. /// /// This allows creating a fair mutex in a constant context on stable Rust. pub const fn const_fair_mutex<T>(val: T) -> FairMutex<T> { FairMutex::const_new(<RawFairMutex as lock_api::RawMutex>::INIT, val) } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` and `DerefMut` implementations. pub type FairMutexGuard<'a, T> = lock_api::MutexGuard<'a, RawFairMutex, T>; /// An RAII mutex guard returned by `FairMutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedFairMutexGuard` and `FairMutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedFairMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawFairMutex, T>; #[cfg(test)] mod tests { use crate::FairMutex; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; #[cfg(feature = "serde")] use bincode::{deserialize, serialize}; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); #[test] fn smoke() { let m = FairMutex::new(()); drop(m.lock()); drop(m.lock()); } #[test] fn lots_and_lots() { const J: u32 = 1000; const K: u32 = 3; let m = Arc::new(FairMutex::new(0)); fn inc(m: &FairMutex<u32>) { for _ in 0..J { *m.lock() += 1; } } let (tx, rx) = channel(); for _ in 0..K { let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); } drop(tx); for _ in 0..2 * K { rx.recv().unwrap(); } assert_eq!(*m.lock(), J * K * 2); } #[test] fn try_lock() { let m = FairMutex::new(()); *m.try_lock().unwrap() = (); } #[test] fn test_into_inner() { let m = FairMutex::new(NonCopy(10)); assert_eq!(m.into_inner(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc<AtomicUsize>); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = FairMutex::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_get_mut() { let mut m = FairMutex::new(NonCopy(10)); *m.get_mut() = NonCopy(20); assert_eq!(m.into_inner(), NonCopy(20)); } #[test] fn test_mutex_arc_nested() { // Tests nested mutexes and access // to underlying data. let arc = Arc::new(FairMutex::new(1)); let arc2 = Arc::new(FairMutex::new(arc)); let (tx, rx) = channel(); let _t = thread::spawn(move || { let lock = arc2.lock(); let lock2 = lock.lock(); assert_eq!(*lock2, 1); tx.send(()).unwrap(); }); rx.recv().unwrap(); } #[test] fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(FairMutex::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || { struct Unwinder { i: Arc<FairMutex<i32>>, } impl Drop for Unwinder { fn drop(&mut self) { *self.i.lock() += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } #[test] fn test_mutex_unsized() { let mutex: &FairMutex<[i32]> = &FairMutex::new([1, 2, 3]); { let b = &mut *mutex.lock(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*mutex.lock(), comp); } #[test] fn test_mutexguard_sync() { fn sync<T: Sync>(_: T) {} let mutex = FairMutex::new(()); sync(mutex.lock()); } #[test] fn test_mutex_debug() { let mutex = FairMutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); let _lock = mutex.lock(); assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }"); } #[cfg(feature = "serde")] #[test] fn test_serde() { let contents: Vec<u8> = vec![0, 1, 2]; let mutex = FairMutex::new(contents.clone()); let serialized = serialize(&mutex).unwrap(); let deserialized: FairMutex<Vec<u8>> = deserialize(&serialized).unwrap(); assert_eq!(*(mutex.lock()), *(deserialized.lock())); assert_eq!(contents, *(deserialized.lock())); } } ```
/content/code_sandbox/src/fair_mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,193
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::util::UncheckedOptionExt; use core::{ fmt, mem, sync::atomic::{fence, AtomicU8, Ordering}, }; use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN}; const DONE_BIT: u8 = 1; const POISON_BIT: u8 = 2; const LOCKED_BIT: u8 = 4; const PARKED_BIT: u8 = 8; /// Current state of a `Once`. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum OnceState { /// A closure has not been executed yet New, /// A closure was executed but panicked. Poisoned, /// A thread is currently executing a closure. InProgress, /// A closure has completed successfully. Done, } impl OnceState { /// Returns whether the associated `Once` has been poisoned. /// /// Once an initialization routine for a `Once` has panicked it will forever /// indicate to future forced initialization routines that it is poisoned. #[inline] pub fn poisoned(self) -> bool { matches!(self, OnceState::Poisoned) } /// Returns whether the associated `Once` has successfully executed a /// closure. #[inline] pub fn done(self) -> bool { matches!(self, OnceState::Done) } } /// A synchronization primitive which can be used to run a one-time /// initialization. Useful for one-time initialization for globals, FFI or /// related functionality. /// /// # Differences from the standard library `Once` /// /// - Only requires 1 byte of space, instead of 1 word. /// - Not required to be `'static`. /// - Relaxed memory barriers in the fast path, which can significantly improve /// performance on some architectures. /// - Efficient handling of micro-contention using adaptive spinning. /// /// # Examples /// /// ``` /// use parking_lot::Once; /// /// static START: Once = Once::new(); /// /// START.call_once(|| { /// // run initialization here /// }); /// ``` pub struct Once(AtomicU8); impl Once { /// Creates a new `Once` value. #[inline] pub const fn new() -> Once { Once(AtomicU8::new(0)) } /// Returns the current state of this `Once`. #[inline] pub fn state(&self) -> OnceState { let state = self.0.load(Ordering::Acquire); if state & DONE_BIT != 0 { OnceState::Done } else if state & LOCKED_BIT != 0 { OnceState::InProgress } else if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New } } /// Performs an initialization routine once and only once. The given closure /// will be executed if this is the first time `call_once` has been called, /// and otherwise the routine will *not* be invoked. /// /// This method will block the calling thread if another initialization /// routine is currently running. /// /// When this function returns, it is guaranteed that some initialization /// has run and completed (it may not be the closure specified). It is also /// guaranteed that any memory writes performed by the executed closure can /// be reliably observed by other threads at this point (there is a /// happens-before relation between the closure and code executing after the /// return). /// /// # Examples /// /// ``` /// use parking_lot::Once; /// /// static mut VAL: usize = 0; /// static INIT: Once = Once::new(); /// /// // Accessing a `static mut` is unsafe much of the time, but if we do so /// // in a synchronized fashion (e.g. write once or read all) then we're /// // good to go! /// // /// // This function will only call `expensive_computation` once, and will /// // otherwise always return the value returned from the first invocation. /// fn get_cached_val() -> usize { /// unsafe { /// INIT.call_once(|| { /// VAL = expensive_computation(); /// }); /// VAL /// } /// } /// /// fn expensive_computation() -> usize { /// // ... /// # 2 /// } /// ``` /// /// # Panics /// /// The closure `f` will only be executed once if this is called /// concurrently amongst many threads. If that closure panics, however, then /// it will *poison* this `Once` instance, causing all future invocations of /// `call_once` to also panic. #[inline] pub fn call_once<F>(&self, f: F) where F: FnOnce(), { if self.0.load(Ordering::Acquire) == DONE_BIT { return; } let mut f = Some(f); self.call_once_slow(false, &mut |_| unsafe { f.take().unchecked_unwrap()() }); } /// Performs the same function as `call_once` except ignores poisoning. /// /// If this `Once` has been poisoned (some initialization panicked) then /// this function will continue to attempt to call initialization functions /// until one of them doesn't panic. /// /// The closure `f` is yielded a structure which can be used to query the /// state of this `Once` (whether initialization has previously panicked or /// not). #[inline] pub fn call_once_force<F>(&self, f: F) where F: FnOnce(OnceState), { if self.0.load(Ordering::Acquire) == DONE_BIT { return; } let mut f = Some(f); self.call_once_slow(true, &mut |state| unsafe { f.take().unchecked_unwrap()(state) }); } // This is a non-generic function to reduce the monomorphization cost of // using `call_once` (this isn't exactly a trivial or small implementation). // // Additionally, this is tagged with `#[cold]` as it should indeed be cold // and it helps let LLVM know that calls to this function should be off the // fast path. Essentially, this should help generate more straight line code // in LLVM. // // Finally, this takes an `FnMut` instead of a `FnOnce` because there's // currently no way to take an `FnOnce` and call it via virtual dispatch // without some allocation overhead. #[cold] fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) { let mut spinwait = SpinWait::new(); let mut state = self.0.load(Ordering::Relaxed); loop { // If another thread called the closure, we're done if state & DONE_BIT != 0 { // An acquire fence is needed here since we didn't load the // state with Ordering::Acquire. fence(Ordering::Acquire); return; } // If the state has been poisoned and we aren't forcing, then panic if state & POISON_BIT != 0 && !ignore_poison { // Need the fence here as well for the same reason fence(Ordering::Acquire); panic!("Once instance has previously been poisoned"); } // Grab the lock if it isn't locked, even if there is a queue on it. // We also clear the poison bit since we are going to try running // the closure again. if state & LOCKED_BIT == 0 { match self.0.compare_exchange_weak( state, (state | LOCKED_BIT) & !POISON_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => break, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.0.load(Ordering::Relaxed); continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.0.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { state = x; continue; } } // Park our thread until we are woken up by the thread that owns the // lock. let addr = self as *const _ as usize; let validate = || self.0.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; let before_sleep = || {}; let timed_out = |_, _| unreachable!(); unsafe { parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, None, ); } // Loop back and check if the done bit was set spinwait.reset(); state = self.0.load(Ordering::Relaxed); } struct PanicGuard<'a>(&'a Once); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { // Mark the state as poisoned, unlock it and unpark all threads. let once = self.0; let state = once.0.swap(POISON_BIT, Ordering::Release); if state & PARKED_BIT != 0 { let addr = once as *const _ as usize; unsafe { parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); } } } } // At this point we have the lock, so run the closure. Make sure we // properly clean up if the closure panicks. let guard = PanicGuard(self); let once_state = if state & POISON_BIT != 0 { OnceState::Poisoned } else { OnceState::New }; f(once_state); mem::forget(guard); // Now unlock the state, set the done bit and unpark all threads let state = self.0.swap(DONE_BIT, Ordering::Release); if state & PARKED_BIT != 0 { let addr = self as *const _ as usize; unsafe { parking_lot_core::unpark_all(addr, DEFAULT_UNPARK_TOKEN); } } } } impl Default for Once { #[inline] fn default() -> Once { Once::new() } } impl fmt::Debug for Once { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Once") .field("state", &self.state()) .finish() } } #[cfg(test)] mod tests { use crate::Once; use std::panic; use std::sync::mpsc::channel; use std::thread; #[test] fn smoke_once() { static O: Once = Once::new(); let mut a = 0; O.call_once(|| a += 1); assert_eq!(a, 1); O.call_once(|| a += 1); assert_eq!(a, 1); } #[test] fn stampede_once() { static O: Once = Once::new(); static mut RUN: bool = false; let (tx, rx) = channel(); for _ in 0..10 { let tx = tx.clone(); thread::spawn(move || { for _ in 0..4 { thread::yield_now() } unsafe { O.call_once(|| { assert!(!RUN); RUN = true; }); assert!(RUN); } tx.send(()).unwrap(); }); } unsafe { O.call_once(|| { assert!(!RUN); RUN = true; }); assert!(RUN); } for _ in 0..10 { rx.recv().unwrap(); } } #[test] fn poison_bad() { static O: Once = Once::new(); // poison the once let t = panic::catch_unwind(|| { O.call_once(|| panic!()); }); assert!(t.is_err()); // poisoning propagates let t = panic::catch_unwind(|| { O.call_once(|| {}); }); assert!(t.is_err()); // we can subvert poisoning, however let mut called = false; O.call_once_force(|p| { called = true; assert!(p.poisoned()) }); assert!(called); // once any success happens, we stop propagating the poison O.call_once(|| {}); } #[test] fn wait_for_force_to_finish() { static O: Once = Once::new(); // poison the once let t = panic::catch_unwind(|| { O.call_once(|| panic!()); }); assert!(t.is_err()); // make sure someone's waiting inside the once via a force let (tx1, rx1) = channel(); let (tx2, rx2) = channel(); let t1 = thread::spawn(move || { O.call_once_force(|p| { assert!(p.poisoned()); tx1.send(()).unwrap(); rx2.recv().unwrap(); }); }); rx1.recv().unwrap(); // put another waiter on the once let t2 = thread::spawn(|| { let mut called = false; O.call_once(|| { called = true; }); assert!(!called); }); tx2.send(()).unwrap(); assert!(t1.join().is_ok()); assert!(t2.join().is_ok()); } #[test] fn test_once_debug() { static O: Once = Once::new(); assert_eq!(format!("{:?}", O), "Once { state: New }"); } } ```
/content/code_sandbox/src/once.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
3,119
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::raw_rwlock::RawRwLock; /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any /// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// /// This lock uses a task-fair locking policy which avoids both reader and /// writer starvation. This means that readers trying to acquire the lock will /// block even if the lock is unlocked when there are writers waiting to acquire /// the lock. Because of this, attempts to recursively acquire a read lock /// within a single thread may result in a deadlock. /// /// The type parameter `T` represents the data that this lock protects. It is /// required that `T` satisfies `Send` to be shared across threads and `Sync` to /// allow concurrent access through readers. The RAII guards returned from the /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) /// to allow access to the contained of the lock. /// /// # Fairness /// /// A typical unfair lock can often end up in a situation where a single thread /// quickly acquires and releases the same lock in succession, which can starve /// other threads waiting to acquire the rwlock. While this improves throughput /// because it doesn't force a context switch when a thread tries to re-acquire /// a rwlock it has just released, this can starve other threads. /// /// This rwlock uses [eventual fairness](path_to_url /// to ensure that the lock will be fair on average without sacrificing /// throughput. This is done by forcing a fair unlock on average every 0.5ms, /// which will force the lock to go to the next thread waiting for the rwlock. /// /// Additionally, any critical section longer than 1ms will always use a fair /// unlock, which has a negligible impact on throughput considering the length /// of the critical section. /// /// You can also force a fair unlock by calling `RwLockReadGuard::unlock_fair` /// or `RwLockWriteGuard::unlock_fair` when unlocking a mutex instead of simply /// dropping the guard. /// /// # Differences from the standard library `RwLock` /// /// - Supports atomically downgrading a write lock into a read lock. /// - Task-fair locking policy instead of an unspecified platform default. /// - No poisoning, the lock is released normally on panic. /// - Only requires 1 word of space, whereas the standard library boxes the /// `RwLock` due to platform limitations. /// - Can be statically constructed. /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// - Efficient handling of micro-contention using adaptive spinning. /// - Allows raw locking & unlocking without a guard. /// - Supports eventual fairness so that the rwlock is fair on average. /// - Optionally allows making the rwlock fair by calling /// `RwLockReadGuard::unlock_fair` and `RwLockWriteGuard::unlock_fair`. /// /// # Examples /// /// ``` /// use parking_lot::RwLock; /// /// let lock = RwLock::new(5); /// /// // many reader locks can be held at once /// { /// let r1 = lock.read(); /// let r2 = lock.read(); /// assert_eq!(*r1, 5); /// assert_eq!(*r2, 5); /// } // read locks are dropped at this point /// /// // only one write lock may be held, however /// { /// let mut w = lock.write(); /// *w += 1; /// assert_eq!(*w, 6); /// } // write lock is dropped here /// ``` pub type RwLock<T> = lock_api::RwLock<RawRwLock, T>; /// Creates a new instance of an `RwLock<T>` which is unlocked. /// /// This allows creating a `RwLock<T>` in a constant context on stable Rust. pub const fn const_rwlock<T>(val: T) -> RwLock<T> { RwLock::const_new(<RawRwLock as lock_api::RawRwLock>::INIT, val) } /// RAII structure used to release the shared read access of a lock when /// dropped. pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; /// RAII structure used to release the exclusive write access of a lock when /// dropped. pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedRwLockReadGuard<'a, T> = lock_api::MappedRwLockReadGuard<'a, RawRwLock, T>; /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, RawRwLock, T>; /// RAII structure used to release the upgradable read access of a lock when /// dropped. pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>; #[cfg(test)] mod tests { use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard}; use rand::Rng; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::time::Duration; #[cfg(feature = "serde")] use bincode::{deserialize, serialize}; #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); #[test] fn smoke() { let l = RwLock::new(()); drop(l.read()); drop(l.write()); drop(l.upgradable_read()); drop((l.read(), l.read())); drop((l.read(), l.upgradable_read())); drop(l.write()); } #[test] fn frob() { const N: u32 = 10; const M: u32 = 1000; let r = Arc::new(RwLock::new(())); let (tx, rx) = channel::<()>(); for _ in 0..N { let tx = tx.clone(); let r = r.clone(); thread::spawn(move || { let mut rng = rand::thread_rng(); for _ in 0..M { if rng.gen_bool(1.0 / N as f64) { drop(r.write()); } else { drop(r.read()); } } drop(tx); }); } drop(tx); let _ = rx.recv(); } #[test] fn test_rw_arc_no_poison_wr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_ww() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.write(); panic!(); }) .join(); let lock = arc.write(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_rr() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 1); } #[test] fn test_rw_arc_no_poison_rw() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _: Result<(), _> = thread::spawn(move || { let _lock = arc2.read(); panic!() }) .join(); let lock = arc.write(); assert_eq!(*lock, 1); } #[test] fn test_ruw_arc() { let arc = Arc::new(RwLock::new(0)); let arc2 = arc.clone(); let (tx, rx) = channel(); thread::spawn(move || { for _ in 0..10 { let mut lock = arc2.write(); let tmp = *lock; *lock = -1; thread::yield_now(); *lock = tmp + 1; } tx.send(()).unwrap(); }); let mut children = Vec::new(); // Upgradable readers try to catch the writer in the act and also // try to touch the value for _ in 0..5 { let arc3 = arc.clone(); children.push(thread::spawn(move || { let lock = arc3.upgradable_read(); let tmp = *lock; assert!(tmp >= 0); thread::yield_now(); let mut lock = RwLockUpgradableReadGuard::upgrade(lock); assert_eq!(tmp, *lock); *lock = -1; thread::yield_now(); *lock = tmp + 1; })); } // Readers try to catch the writers in the act for _ in 0..5 { let arc4 = arc.clone(); children.push(thread::spawn(move || { let lock = arc4.read(); assert!(*lock >= 0); })); } // Wait for children to pass their asserts for r in children { assert!(r.join().is_ok()); } // Wait for writer to finish rx.recv().unwrap(); let lock = arc.read(); assert_eq!(*lock, 15); } #[test] fn test_rw_arc() { let arc = Arc::new(RwLock::new(0)); let arc2 = arc.clone(); let (tx, rx) = channel(); thread::spawn(move || { let mut lock = arc2.write(); for _ in 0..10 { let tmp = *lock; *lock = -1; thread::yield_now(); *lock = tmp + 1; } tx.send(()).unwrap(); }); // Readers try to catch the writer in the act let mut children = Vec::new(); for _ in 0..5 { let arc3 = arc.clone(); children.push(thread::spawn(move || { let lock = arc3.read(); assert!(*lock >= 0); })); } // Wait for children to pass their asserts for r in children { assert!(r.join().is_ok()); } // Wait for writer to finish rx.recv().unwrap(); let lock = arc.read(); assert_eq!(*lock, 10); } #[test] fn test_rw_arc_access_in_unwind() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || { struct Unwinder { i: Arc<RwLock<isize>>, } impl Drop for Unwinder { fn drop(&mut self) { let mut lock = self.i.write(); *lock += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); let lock = arc.read(); assert_eq!(*lock, 2); } #[test] fn test_rwlock_unsized() { let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]); { let b = &mut *rw.write(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*rw.read(), comp); } #[test] fn test_rwlock_try_read() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let read_result = lock.try_read(); assert!( read_result.is_some(), "try_read should succeed while read_guard is in scope" ); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let read_result = lock.try_read(); assert!( read_result.is_some(), "try_read should succeed while upgrade_guard is in scope" ); drop(upgrade_guard); } { let write_guard = lock.write(); let read_result = lock.try_read(); assert!( read_result.is_none(), "try_read should fail while write_guard is in scope" ); drop(write_guard); } } #[test] fn test_rwlock_try_write() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while read_guard is in scope" ); assert!(lock.is_locked()); assert!(!lock.is_locked_exclusive()); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while upgrade_guard is in scope" ); assert!(lock.is_locked()); assert!(!lock.is_locked_exclusive()); drop(upgrade_guard); } { let write_guard = lock.write(); let write_result = lock.try_write(); assert!( write_result.is_none(), "try_write should fail while write_guard is in scope" ); assert!(lock.is_locked()); assert!(lock.is_locked_exclusive()); drop(write_guard); } } #[test] fn test_rwlock_try_upgrade() { let lock = RwLock::new(0isize); { let read_guard = lock.read(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_some(), "try_upgradable_read should succeed while read_guard is in scope" ); drop(read_guard); } { let upgrade_guard = lock.upgradable_read(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_none(), "try_upgradable_read should fail while upgrade_guard is in scope" ); drop(upgrade_guard); } { let write_guard = lock.write(); let upgrade_result = lock.try_upgradable_read(); assert!( upgrade_result.is_none(), "try_upgradable should fail while write_guard is in scope" ); drop(write_guard); } } #[test] fn test_into_inner() { let m = RwLock::new(NonCopy(10)); assert_eq!(m.into_inner(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc<AtomicUsize>); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = RwLock::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_get_mut() { let mut m = RwLock::new(NonCopy(10)); *m.get_mut() = NonCopy(20); assert_eq!(m.into_inner(), NonCopy(20)); } #[test] fn test_rwlockguard_sync() { fn sync<T: Sync>(_: T) {} let rwlock = RwLock::new(()); sync(rwlock.read()); sync(rwlock.write()); } #[test] fn test_rwlock_downgrade() { let x = Arc::new(RwLock::new(0)); let mut handles = Vec::new(); for _ in 0..8 { let x = x.clone(); handles.push(thread::spawn(move || { for _ in 0..100 { let mut writer = x.write(); *writer += 1; let cur_val = *writer; let reader = RwLockWriteGuard::downgrade(writer); assert_eq!(cur_val, *reader); } })); } for handle in handles { handle.join().unwrap() } assert_eq!(*x.read(), 800); } #[test] fn test_rwlock_recursive() { let arc = Arc::new(RwLock::new(1)); let arc2 = arc.clone(); let lock1 = arc.read(); let t = thread::spawn(move || { let _lock = arc2.write(); }); if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) { thread::sleep(Duration::from_millis(100)); } else { // FIXME: path_to_url for _ in 0..100 { thread::yield_now(); } } // A normal read would block here since there is a pending writer let lock2 = arc.read_recursive(); // Unblock the thread and join it. drop(lock1); drop(lock2); t.join().unwrap(); } #[test] fn test_rwlock_debug() { let x = RwLock::new(vec![0u8, 10]); assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }"); let _lock = x.write(); assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }"); } #[test] fn test_clone() { let rwlock = RwLock::new(Arc::new(1)); let a = rwlock.read_recursive(); let b = a.clone(); assert_eq!(Arc::strong_count(&b), 2); } #[cfg(feature = "serde")] #[test] fn test_serde() { let contents: Vec<u8> = vec![0, 1, 2]; let mutex = RwLock::new(contents.clone()); let serialized = serialize(&mutex).unwrap(); let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap(); assert_eq!(*(mutex.read()), *(deserialized.read())); assert_eq!(contents, *(deserialized.read())); } #[test] fn test_issue_203() { struct Bar(RwLock<()>); impl Drop for Bar { fn drop(&mut self) { let _n = self.0.write(); } } thread_local! { static B: Bar = Bar(RwLock::new(())); } thread::spawn(|| { B.with(|_| ()); let a = RwLock::new(()); let _a = a.read(); }) .join() .unwrap(); } #[test] fn test_rw_write_is_locked() { let lock = RwLock::new(0isize); { let _read_guard = lock.read(); assert!(lock.is_locked()); assert!(!lock.is_locked_exclusive()); } { let _write_guard = lock.write(); assert!(lock.is_locked()); assert!(lock.is_locked_exclusive()); } } #[test] #[cfg(feature = "arc_lock")] fn test_issue_430() { let lock = std::sync::Arc::new(RwLock::new(0)); let mut rl = lock.upgradable_read_arc(); rl.with_upgraded(|_| { println!("lock upgrade"); }); rl.with_upgraded(|_| { println!("lock upgrade"); }); drop(lock); } } ```
/content/code_sandbox/src/rwlock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
4,689
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::{deadlock, util}; use core::{ sync::atomic::{AtomicU8, Ordering}, time::Duration, }; use lock_api::RawMutex as RawMutex_; use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN}; use std::time::Instant; // UnparkToken used to indicate that that the target thread should attempt to // lock the mutex again as soon as it is unparked. pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0); // UnparkToken used to indicate that the mutex is being handed off to the target // thread directly without unlocking it. pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1); /// This bit is set in the `state` of a `RawMutex` when that mutex is locked by some thread. const LOCKED_BIT: u8 = 0b01; /// This bit is set in the `state` of a `RawMutex` just before parking a thread. A thread is being /// parked if it wants to lock the mutex, but it is currently being held by some other thread. const PARKED_BIT: u8 = 0b10; /// Raw mutex type backed by the parking lot. pub struct RawMutex { /// This atomic integer holds the current state of the mutex instance. Only the two lowest bits /// are used. See `LOCKED_BIT` and `PARKED_BIT` for the bitmask for these bits. /// /// # State table: /// /// PARKED_BIT | LOCKED_BIT | Description /// 0 | 0 | The mutex is not locked, nor is anyone waiting for it. /// -----------+------------+your_sha256_hash-- /// 0 | 1 | The mutex is locked by exactly one thread. No other thread is /// | | waiting for it. /// -----------+------------+your_sha256_hash-- /// 1 | 0 | The mutex is not locked. One or more thread is parked or about to /// | | park. At least one of the parked threads are just about to be /// | | unparked, or a thread heading for parking might abort the park. /// -----------+------------+your_sha256_hash-- /// 1 | 1 | The mutex is locked by exactly one thread. One or more thread is /// | | parked or about to park, waiting for the lock to become available. /// | | In this state, PARKED_BIT is only ever cleared when a bucket lock /// | | is held (i.e. in a parking_lot_core callback). This ensures that /// | | we never end up in a situation where there are parked threads but /// | | PARKED_BIT is not set (which would result in those threads /// | | potentially never getting woken up). state: AtomicU8, } unsafe impl lock_api::RawMutex for RawMutex { const INIT: RawMutex = RawMutex { state: AtomicU8::new(0), }; type GuardMarker = crate::GuardMarker; #[inline] fn lock(&self) { if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_err() { self.lock_slow(None); } unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } #[inline] fn try_lock(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & LOCKED_BIT != 0 { return false; } match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; return true; } Err(x) => state = x, } } } #[inline] unsafe fn unlock(&self) { deadlock::release_resource(self as *const _ as usize); if self .state .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_slow(false); } #[inline] fn is_locked(&self) -> bool { let state = self.state.load(Ordering::Relaxed); state & LOCKED_BIT != 0 } } unsafe impl lock_api::RawMutexFair for RawMutex { #[inline] unsafe fn unlock_fair(&self) { deadlock::release_resource(self as *const _ as usize); if self .state .compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_slow(true); } #[inline] unsafe fn bump(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_slow(); } } } unsafe impl lock_api::RawMutexTimed for RawMutex { type Duration = Duration; type Instant = Instant; #[inline] fn try_lock_until(&self, timeout: Instant) -> bool { let result = if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_slow(Some(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } #[inline] fn try_lock_for(&self, timeout: Duration) -> bool { let result = if self .state .compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_slow(util::to_deadline(timeout)) }; if result { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; } result } } impl RawMutex { // Used by Condvar when requeuing threads to us, must be called while // holding the queue lock. #[inline] pub(crate) fn mark_parked_if_locked(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & LOCKED_BIT == 0 { return false; } match self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } } // Used by Condvar when requeuing threads to us, must be called while // holding the queue lock. #[inline] pub(crate) fn mark_parked(&self) { self.state.fetch_or(PARKED_BIT, Ordering::Relaxed); } #[cold] fn lock_slow(&self, timeout: Option<Instant>) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Grab the lock if it isn't locked, even if there is a queue on it if state & LOCKED_BIT == 0 { match self.state.compare_exchange_weak( state, state | LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } continue; } // If there is no queue, try spinning a few times if state & PARKED_BIT == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { state = x; continue; } } // Park our thread until we are woken up by an unlock let addr = self as *const _ as usize; let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; // SAFETY: // * `addr` is an address we control. // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. // * `before_sleep` does not call `park`, nor does it panic. match unsafe { parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ) } { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } // Loop back and try locking again spinwait.reset(); state = self.state.load(Ordering::Relaxed); } } #[cold] fn unlock_slow(&self, force_fair: bool) { // Unpark one thread and leave the parked bit set if there might // still be parked threads on this address. let addr = self as *const _ as usize; let callback = |result: UnparkResult| { // If we are using a fair unlock then we should keep the // mutex locked and hand it off to the unparked thread. if result.unparked_threads != 0 && (force_fair || result.be_fair) { // Clear the parked bit if there are no more parked // threads. if !result.have_more_threads { self.state.store(LOCKED_BIT, Ordering::Relaxed); } return TOKEN_HANDOFF; } // Clear the locked bit, and the parked bit as well if there // are no more parked threads. if result.have_more_threads { self.state.store(PARKED_BIT, Ordering::Release); } else { self.state.store(0, Ordering::Release); } TOKEN_NORMAL }; // SAFETY: // * `addr` is an address we control. // * `callback` does not panic or call into any function of `parking_lot`. unsafe { parking_lot_core::unpark_one(addr, callback); } } #[cold] fn bump_slow(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; self.unlock_slow(true); self.lock(); } } ```
/content/code_sandbox/src/raw_mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,547
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. //! This library provides implementations of `Mutex`, `RwLock`, `Condvar` and //! `Once` that are smaller, faster and more flexible than those in the Rust //! standard library. It also provides a `ReentrantMutex` type. #![warn(missing_docs)] #![warn(rust_2018_idioms)] mod condvar; mod elision; mod fair_mutex; mod mutex; mod once; mod raw_fair_mutex; mod raw_mutex; mod raw_rwlock; mod remutex; mod rwlock; mod util; #[cfg(feature = "deadlock_detection")] pub mod deadlock; #[cfg(not(feature = "deadlock_detection"))] mod deadlock; // If deadlock detection is enabled, we cannot allow lock guards to be sent to // other threads. #[cfg(all(feature = "send_guard", feature = "deadlock_detection"))] compile_error!("the `send_guard` and `deadlock_detection` features cannot be used together"); #[cfg(feature = "send_guard")] type GuardMarker = lock_api::GuardSend; #[cfg(not(feature = "send_guard"))] type GuardMarker = lock_api::GuardNoSend; pub use self::condvar::{Condvar, WaitTimeoutResult}; pub use self::fair_mutex::{const_fair_mutex, FairMutex, FairMutexGuard, MappedFairMutexGuard}; pub use self::mutex::{const_mutex, MappedMutexGuard, Mutex, MutexGuard}; pub use self::once::{Once, OnceState}; pub use self::raw_fair_mutex::RawFairMutex; pub use self::raw_mutex::RawMutex; pub use self::raw_rwlock::RawRwLock; pub use self::remutex::{ const_reentrant_mutex, MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard, }; pub use self::rwlock::{ const_rwlock, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockUpgradableReadGuard, RwLockWriteGuard, }; pub use ::lock_api; #[cfg(feature = "arc_lock")] pub use self::lock_api::{ArcMutexGuard, ArcReentrantMutexGuard, ArcRwLockReadGuard, ArcRwLockUpgradableReadGuard, ArcRwLockWriteGuard}; ```
/content/code_sandbox/src/lib.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
520
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::mutex::MutexGuard; use crate::raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL}; use crate::{deadlock, util}; use core::{ fmt, ptr, sync::atomic::{AtomicPtr, Ordering}, }; use lock_api::RawMutex as RawMutex_; use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN}; use std::ops::DerefMut; use std::time::{Duration, Instant}; /// A type indicating whether a timed wait on a condition variable returned /// due to a time out or not. #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WaitTimeoutResult(bool); impl WaitTimeoutResult { /// Returns whether the wait was known to have timed out. #[inline] pub fn timed_out(self) -> bool { self.0 } } /// A Condition Variable /// /// Condition variables represent the ability to block a thread such that it /// consumes no CPU time while waiting for an event to occur. Condition /// variables are typically associated with a boolean predicate (a condition) /// and a mutex. The predicate is always verified inside of the mutex before /// determining that thread must block. /// /// Note that this module places one additional restriction over the system /// condition variables: each condvar can be used with only one mutex at a /// time. Any attempt to use multiple mutexes on the same condition variable /// simultaneously will result in a runtime panic. However it is possible to /// switch to a different mutex if there are no threads currently waiting on /// the condition variable. /// /// # Differences from the standard library `Condvar` /// /// - No spurious wakeups: A wait will only return a non-timeout result if it /// was woken up by `notify_one` or `notify_all`. /// - `Condvar::notify_all` will only wake up a single thread, the rest are /// requeued to wait for the `Mutex` to be unlocked by the thread that was /// woken up. /// - Only requires 1 word of space, whereas the standard library boxes the /// `Condvar` due to platform limitations. /// - Can be statically constructed. /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// /// # Examples /// /// ``` /// use parking_lot::{Mutex, Condvar}; /// use std::sync::Arc; /// use std::thread; /// /// let pair = Arc::new((Mutex::new(false), Condvar::new())); /// let pair2 = pair.clone(); /// /// // Inside of our lock, spawn a new thread, and then wait for it to start /// thread::spawn(move|| { /// let &(ref lock, ref cvar) = &*pair2; /// let mut started = lock.lock(); /// *started = true; /// cvar.notify_one(); /// }); /// /// // wait for the thread to start up /// let &(ref lock, ref cvar) = &*pair; /// let mut started = lock.lock(); /// if !*started { /// cvar.wait(&mut started); /// } /// // Note that we used an if instead of a while loop above. This is only /// // possible because parking_lot's Condvar will never spuriously wake up. /// // This means that wait() will only return after notify_one or notify_all is /// // called. /// ``` pub struct Condvar { state: AtomicPtr<RawMutex>, } impl Condvar { /// Creates a new condition variable which is ready to be waited on and /// notified. #[inline] pub const fn new() -> Condvar { Condvar { state: AtomicPtr::new(ptr::null_mut()), } } /// Wakes up one blocked thread on this condvar. /// /// Returns whether a thread was woken up. /// /// If there is a blocked thread on this condition variable, then it will /// be woken up from its call to `wait` or `wait_timeout`. Calls to /// `notify_one` are not buffered in any way. /// /// To wake up all threads, see `notify_all()`. /// /// # Examples /// /// ``` /// use parking_lot::Condvar; /// /// let condvar = Condvar::new(); /// /// // do something with condvar, share it with other threads /// /// if !condvar.notify_one() { /// println!("Nobody was listening for this."); /// } /// ``` #[inline] pub fn notify_one(&self) -> bool { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return false; } self.notify_one_slow(state) } #[cold] fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if unsafe { (*mutex).mark_parked_if_locked() } { RequeueOp::RequeueOne } else { RequeueOp::UnparkOne } }; let callback = |_op, result: UnparkResult| { // Clear our state if there are no more waiting threads if !result.have_more_threads { self.state.store(ptr::null_mut(), Ordering::Relaxed); } TOKEN_NORMAL }; let res = unsafe { parking_lot_core::unpark_requeue(from, to, validate, callback) }; res.unparked_threads + res.requeued_threads != 0 } /// Wakes up all blocked threads on this condvar. /// /// Returns the number of threads woken up. /// /// This method will ensure that any current waiters on the condition /// variable are awoken. Calls to `notify_all()` are not buffered in any /// way. /// /// To wake up only one thread, see `notify_one()`. #[inline] pub fn notify_all(&self) -> usize { // Nothing to do if there are no waiting threads let state = self.state.load(Ordering::Relaxed); if state.is_null() { return 0; } self.notify_all_slow(state) } #[cold] fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize { // Unpark one thread and requeue the rest onto the mutex let from = self as *const _ as usize; let to = mutex as usize; let validate = || { // Make sure that our atomic state still points to the same // mutex. If not then it means that all threads on the current // mutex were woken up and a new waiting thread switched to a // different mutex. In that case we can get away with doing // nothing. if self.state.load(Ordering::Relaxed) != mutex { return RequeueOp::Abort; } // Clear our state since we are going to unpark or requeue all // threads. self.state.store(ptr::null_mut(), Ordering::Relaxed); // Unpark one thread if the mutex is unlocked, otherwise just // requeue everything to the mutex. This is safe to do here // since unlocking the mutex when the parked bit is set requires // locking the queue. There is the possibility of a race if the // mutex gets locked after we check, but that doesn't matter in // this case. if unsafe { (*mutex).mark_parked_if_locked() } { RequeueOp::RequeueAll } else { RequeueOp::UnparkOneRequeueRest } }; let callback = |op, result: UnparkResult| { // If we requeued threads to the mutex, mark it as having // parked threads. The RequeueAll case is already handled above. if op == RequeueOp::UnparkOneRequeueRest && result.requeued_threads != 0 { unsafe { (*mutex).mark_parked() }; } TOKEN_NORMAL }; let res = unsafe { parking_lot_core::unpark_requeue(from, to, validate, callback) }; res.unparked_threads + res.requeued_threads } /// Blocks the current thread until this condition variable receives a /// notification. /// /// This function will atomically unlock the mutex specified (represented by /// `mutex_guard`) and block the current thread. This means that any calls /// to `notify_*()` which happen logically after the mutex is unlocked are /// candidates to wake this thread up. When this function call returns, the /// lock specified will have been re-acquired. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<'_, T>) { self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None); } /// Waits on this condition variable for a notification, timing out after /// the specified time instant. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked roughly until `timeout` is reached. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_until<T: ?Sized>( &self, mutex_guard: &mut MutexGuard<'_, T>, timeout: Instant, ) -> WaitTimeoutResult { self.wait_until_internal( unsafe { MutexGuard::mutex(mutex_guard).raw() }, Some(timeout), ) } // This is a non-generic function to reduce the monomorphization cost of // using `wait_until`. fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult { let result; let mut bad_mutex = false; let mut requeued = false; { let addr = self as *const _ as usize; let lock_addr = mutex as *const _ as *mut _; let validate = || { // Ensure we don't use two different mutexes with the same // Condvar at the same time. This is done while locked to // avoid races with notify_one let state = self.state.load(Ordering::Relaxed); if state.is_null() { self.state.store(lock_addr, Ordering::Relaxed); } else if state != lock_addr { bad_mutex = true; return false; } true }; let before_sleep = || { // Unlock the mutex before sleeping... unsafe { mutex.unlock() }; }; let timed_out = |k, was_last_thread| { // If we were requeued to a mutex, then we did not time out. // We'll just park ourselves on the mutex again when we try // to lock it later. requeued = k != addr; // If we were the last thread on the queue then we need to // clear our state. This is normally done by the // notify_{one,all} functions when not timing out. if !requeued && was_last_thread { self.state.store(ptr::null_mut(), Ordering::Relaxed); } }; result = unsafe { parking_lot_core::park( addr, validate, before_sleep, timed_out, DEFAULT_PARK_TOKEN, timeout, ) }; } // Panic if we tried to use multiple mutexes with a Condvar. Note // that at this point the MutexGuard is still locked. It will be // unlocked by the unwinding logic. if bad_mutex { panic!("attempted to use a condition variable with more than one mutex"); } // ... and re-lock it once we are done sleeping if result == ParkResult::Unparked(TOKEN_HANDOFF) { unsafe { deadlock::acquire_resource(mutex as *const _ as usize) }; } else { mutex.lock(); } WaitTimeoutResult(!(result.is_unparked() || requeued)) } /// Waits on this condition variable for a notification, timing out after a /// specified duration. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked for roughly no longer than `timeout`. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. #[inline] pub fn wait_for<T: ?Sized>( &self, mutex_guard: &mut MutexGuard<'_, T>, timeout: Duration, ) -> WaitTimeoutResult { let deadline = util::to_deadline(timeout); self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, deadline) } #[inline] fn wait_while_until_internal<T, F>( &self, mutex_guard: &mut MutexGuard<'_, T>, mut condition: F, timeout: Option<Instant>, ) -> WaitTimeoutResult where T: ?Sized, F: FnMut(&mut T) -> bool, { let mut result = WaitTimeoutResult(false); while !result.timed_out() && condition(mutex_guard.deref_mut()) { result = self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, timeout); } result } /// Blocks the current thread until this condition variable receives a /// notification. If the provided condition evaluates to `false`, then the /// thread is no longer blocked and the operation is completed. If the /// condition evaluates to `true`, then the thread is blocked again and /// waits for another notification before repeating this process. /// /// This function will atomically unlock the mutex specified (represented by /// `mutex_guard`) and block the current thread. This means that any calls /// to `notify_*()` which happen logically after the mutex is unlocked are /// candidates to wake this thread up. When this function call returns, the /// lock specified will have been re-acquired. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_while<T, F>(&self, mutex_guard: &mut MutexGuard<'_, T>, condition: F) where T: ?Sized, F: FnMut(&mut T) -> bool, { self.wait_while_until_internal(mutex_guard, condition, None); } /// Waits on this condition variable for a notification, timing out after /// the specified time instant. If the provided condition evaluates to /// `false`, then the thread is no longer blocked and the operation is /// completed. If the condition evaluates to `true`, then the thread is /// blocked again and waits for another notification before repeating /// this process. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked roughly until `timeout` is reached. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. /// /// # Panics /// /// This function will panic if another thread is waiting on the `Condvar` /// with a different `Mutex` object. #[inline] pub fn wait_while_until<T, F>( &self, mutex_guard: &mut MutexGuard<'_, T>, condition: F, timeout: Instant, ) -> WaitTimeoutResult where T: ?Sized, F: FnMut(&mut T) -> bool, { self.wait_while_until_internal(mutex_guard, condition, Some(timeout)) } /// Waits on this condition variable for a notification, timing out after a /// specified duration. If the provided condition evaluates to `false`, /// then the thread is no longer blocked and the operation is completed. /// If the condition evaluates to `true`, then the thread is blocked again /// and waits for another notification before repeating this process. /// /// The semantics of this function are equivalent to `wait()` except that /// the thread will be blocked for roughly no longer than `timeout`. This /// method should not be used for precise timing due to anomalies such as /// preemption or platform differences that may not cause the maximum /// amount of time waited to be precisely `timeout`. /// /// Note that the best effort is made to ensure that the time waited is /// measured with a monotonic clock, and not affected by the changes made to /// the system time. /// /// The returned `WaitTimeoutResult` value indicates if the timeout is /// known to have elapsed. /// /// Like `wait`, the lock specified will be re-acquired when this function /// returns, regardless of whether the timeout elapsed or not. #[inline] pub fn wait_while_for<T: ?Sized, F>( &self, mutex_guard: &mut MutexGuard<'_, T>, condition: F, timeout: Duration, ) -> WaitTimeoutResult where F: FnMut(&mut T) -> bool, { let deadline = util::to_deadline(timeout); self.wait_while_until_internal(mutex_guard, condition, deadline) } } impl Default for Condvar { #[inline] fn default() -> Condvar { Condvar::new() } } impl fmt::Debug for Condvar { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Condvar { .. }") } } #[cfg(test)] mod tests { use crate::{Condvar, Mutex, MutexGuard}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; use std::thread::sleep; use std::thread::JoinHandle; use std::time::Duration; use std::time::Instant; #[test] fn smoke() { let c = Condvar::new(); c.notify_one(); c.notify_all(); } #[test] fn notify_one() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); } #[test] fn notify_all() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let (lock, cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let (lock, cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; cond.notify_all(); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } } #[test] fn notify_one_return_true() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); assert!(c2.notify_one()); }); c.wait(&mut g); } #[test] fn notify_one_return_false() { let m = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let _t = thread::spawn(move || { let _g = m.lock(); assert!(!c.notify_one()); }); } #[test] fn notify_all_return() { const N: usize = 10; let data = Arc::new((Mutex::new(0), Condvar::new())); let (tx, rx) = channel(); for _ in 0..N { let data = data.clone(); let tx = tx.clone(); thread::spawn(move || { let (lock, cond) = &*data; let mut cnt = lock.lock(); *cnt += 1; if *cnt == N { tx.send(()).unwrap(); } while *cnt != 0 { cond.wait(&mut cnt); } tx.send(()).unwrap(); }); } drop(tx); let (lock, cond) = &*data; rx.recv().unwrap(); let mut cnt = lock.lock(); *cnt = 0; assert_eq!(cond.notify_all(), N); drop(cnt); for _ in 0..N { rx.recv().unwrap(); } assert_eq!(cond.notify_all(), 0); } #[test] fn wait_for() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_for(&mut g, Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_for(&mut g, Duration::from_secs(u64::max_value())); assert!(!timeout_res.timed_out()); drop(g); } #[test] fn wait_until() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let no_timeout = c.wait_until(&mut g, Instant::now() + Duration::from_millis(1)); assert!(no_timeout.timed_out()); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); let timeout_res = c.wait_until( &mut g, Instant::now() + Duration::from_millis(u32::max_value() as u64), ); assert!(!timeout_res.timed_out()); drop(g); } fn spawn_wait_while_notifier( mutex: Arc<Mutex<u32>>, cv: Arc<Condvar>, num_iters: u32, timeout: Option<Instant>, ) -> JoinHandle<()> { thread::spawn(move || { for epoch in 1..=num_iters { // spin to wait for main test thread to block // before notifying it to wake back up and check // its condition. let mut sleep_backoff = Duration::from_millis(1); let _mutex_guard = loop { let mutex_guard = mutex.lock(); if let Some(timeout) = timeout { if Instant::now() >= timeout { return; } } if *mutex_guard == epoch { break mutex_guard; } drop(mutex_guard); // give main test thread a good chance to // acquire the lock before this thread does. sleep(sleep_backoff); sleep_backoff *= 2; }; cv.notify_one(); } }) } #[test] fn wait_while_until_internal_does_not_wait_if_initially_false() { let mutex = Arc::new(Mutex::new(0)); let cv = Arc::new(Condvar::new()); let condition = |counter: &mut u32| { *counter += 1; false }; let mut mutex_guard = mutex.lock(); let timeout_result = cv.wait_while_until_internal(&mut mutex_guard, condition, None); assert!(!timeout_result.timed_out()); assert!(*mutex_guard == 1); } #[test] fn wait_while_until_internal_times_out_before_false() { let mutex = Arc::new(Mutex::new(0)); let cv = Arc::new(Condvar::new()); let num_iters = 3; let condition = |counter: &mut u32| { *counter += 1; true }; let mut mutex_guard = mutex.lock(); let timeout = Some(Instant::now() + Duration::from_millis(500)); let handle = spawn_wait_while_notifier(mutex.clone(), cv.clone(), num_iters, timeout); let timeout_result = cv.wait_while_until_internal(&mut mutex_guard, condition, timeout); assert!(timeout_result.timed_out()); assert!(*mutex_guard == num_iters + 1); // prevent deadlock with notifier drop(mutex_guard); handle.join().unwrap(); } #[test] fn wait_while_until_internal() { let mutex = Arc::new(Mutex::new(0)); let cv = Arc::new(Condvar::new()); let num_iters = 4; let condition = |counter: &mut u32| { *counter += 1; *counter <= num_iters }; let mut mutex_guard = mutex.lock(); let handle = spawn_wait_while_notifier(mutex.clone(), cv.clone(), num_iters, None); let timeout_result = cv.wait_while_until_internal(&mut mutex_guard, condition, None); assert!(!timeout_result.timed_out()); assert!(*mutex_guard == num_iters + 1); let timeout_result = cv.wait_while_until_internal(&mut mutex_guard, condition, None); handle.join().unwrap(); assert!(!timeout_result.timed_out()); assert!(*mutex_guard == num_iters + 2); } #[test] #[should_panic] fn two_mutexes() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); // Make sure we don't leave the child thread dangling struct PanicGuard<'a>(&'a Condvar); impl<'a> Drop for PanicGuard<'a> { fn drop(&mut self) { self.0.notify_one(); } } let (tx, rx) = channel(); let g = m.lock(); let _t = thread::spawn(move || { let mut g = m2.lock(); tx.send(()).unwrap(); c2.wait(&mut g); }); drop(g); rx.recv().unwrap(); let _g = m.lock(); let _guard = PanicGuard(&c); c.wait(&mut m3.lock()); } #[test] fn two_mutexes_disjoint() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let m3 = Arc::new(Mutex::new(())); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let mut g = m.lock(); let _t = thread::spawn(move || { let _g = m2.lock(); c2.notify_one(); }); c.wait(&mut g); drop(g); let _ = c.wait_for(&mut m3.lock(), Duration::from_millis(1)); } #[test] fn test_debug_condvar() { let c = Condvar::new(); assert_eq!(format!("{:?}", c), "Condvar { .. }"); } #[test] fn test_condvar_requeue() { let m = Arc::new(Mutex::new(())); let m2 = m.clone(); let c = Arc::new(Condvar::new()); let c2 = c.clone(); let t = thread::spawn(move || { let mut g = m2.lock(); c2.wait(&mut g); }); let mut g = m.lock(); while !c.notify_one() { // Wait for the thread to get into wait() MutexGuard::bump(&mut g); // Yield, so the other thread gets a chance to do something. // (At least Miri needs this, because it doesn't preempt threads.) thread::yield_now(); } // The thread should have been requeued to the mutex, which we wake up now. drop(g); t.join().unwrap(); } #[test] fn test_issue_129() { let locks = Arc::new((Mutex::new(()), Condvar::new())); let (tx, rx) = channel(); for _ in 0..4 { let locks = locks.clone(); let tx = tx.clone(); thread::spawn(move || { let mut guard = locks.0.lock(); locks.1.wait(&mut guard); locks.1.wait_for(&mut guard, Duration::from_millis(1)); locks.1.notify_one(); tx.send(()).unwrap(); }); } thread::sleep(Duration::from_millis(100)); locks.1.notify_one(); for _ in 0..4 { assert_eq!(rx.recv_timeout(Duration::from_millis(500)), Ok(())); } } } /// This module contains an integration test that is heavily inspired from WebKit's own integration /// tests for it's own Condvar. #[cfg(test)] mod webkit_queue_test { use crate::{Condvar, Mutex, MutexGuard}; use std::{collections::VecDeque, sync::Arc, thread, time::Duration}; #[derive(Clone, Copy)] enum Timeout { Bounded(Duration), Forever, } #[derive(Clone, Copy)] enum NotifyStyle { One, All, } struct Queue { items: VecDeque<usize>, should_continue: bool, } impl Queue { fn new() -> Self { Self { items: VecDeque::new(), should_continue: true, } } } fn wait<T: ?Sized>( condition: &Condvar, lock: &mut MutexGuard<'_, T>, predicate: impl Fn(&mut MutexGuard<'_, T>) -> bool, timeout: &Timeout, ) { while !predicate(lock) { match timeout { Timeout::Forever => condition.wait(lock), Timeout::Bounded(bound) => { condition.wait_for(lock, *bound); } } } } fn notify(style: NotifyStyle, condition: &Condvar, should_notify: bool) { match style { NotifyStyle::One => { condition.notify_one(); } NotifyStyle::All => { if should_notify { condition.notify_all(); } } } } fn run_queue_test( num_producers: usize, num_consumers: usize, max_queue_size: usize, messages_per_producer: usize, notify_style: NotifyStyle, timeout: Timeout, delay: Duration, ) { let input_queue = Arc::new(Mutex::new(Queue::new())); let empty_condition = Arc::new(Condvar::new()); let full_condition = Arc::new(Condvar::new()); let output_vec = Arc::new(Mutex::new(vec![])); let consumers = (0..num_consumers) .map(|_| { consumer_thread( input_queue.clone(), empty_condition.clone(), full_condition.clone(), timeout, notify_style, output_vec.clone(), max_queue_size, ) }) .collect::<Vec<_>>(); let producers = (0..num_producers) .map(|_| { producer_thread( messages_per_producer, input_queue.clone(), empty_condition.clone(), full_condition.clone(), timeout, notify_style, max_queue_size, ) }) .collect::<Vec<_>>(); thread::sleep(delay); for producer in producers.into_iter() { producer.join().expect("Producer thread panicked"); } { let mut input_queue = input_queue.lock(); input_queue.should_continue = false; } empty_condition.notify_all(); for consumer in consumers.into_iter() { consumer.join().expect("Consumer thread panicked"); } let mut output_vec = output_vec.lock(); assert_eq!(output_vec.len(), num_producers * messages_per_producer); output_vec.sort(); for msg_idx in 0..messages_per_producer { for producer_idx in 0..num_producers { assert_eq!(msg_idx, output_vec[msg_idx * num_producers + producer_idx]); } } } fn consumer_thread( input_queue: Arc<Mutex<Queue>>, empty_condition: Arc<Condvar>, full_condition: Arc<Condvar>, timeout: Timeout, notify_style: NotifyStyle, output_queue: Arc<Mutex<Vec<usize>>>, max_queue_size: usize, ) -> thread::JoinHandle<()> { thread::spawn(move || loop { let (should_notify, result) = { let mut queue = input_queue.lock(); wait( &empty_condition, &mut queue, |state| -> bool { !state.items.is_empty() || !state.should_continue }, &timeout, ); if queue.items.is_empty() && !queue.should_continue { return; } let should_notify = queue.items.len() == max_queue_size; let result = queue.items.pop_front(); std::mem::drop(queue); (should_notify, result) }; notify(notify_style, &full_condition, should_notify); if let Some(result) = result { output_queue.lock().push(result); } }) } fn producer_thread( num_messages: usize, queue: Arc<Mutex<Queue>>, empty_condition: Arc<Condvar>, full_condition: Arc<Condvar>, timeout: Timeout, notify_style: NotifyStyle, max_queue_size: usize, ) -> thread::JoinHandle<()> { thread::spawn(move || { for message in 0..num_messages { let should_notify = { let mut queue = queue.lock(); wait( &full_condition, &mut queue, |state| state.items.len() < max_queue_size, &timeout, ); let should_notify = queue.items.is_empty(); queue.items.push_back(message); std::mem::drop(queue); should_notify }; notify(notify_style, &empty_condition, should_notify); } }) } macro_rules! run_queue_tests { ( $( $name:ident( num_producers: $num_producers:expr, num_consumers: $num_consumers:expr, max_queue_size: $max_queue_size:expr, messages_per_producer: $messages_per_producer:expr, notification_style: $notification_style:expr, timeout: $timeout:expr, delay_seconds: $delay_seconds:expr); )* ) => { $(#[test] fn $name() { let delay = Duration::from_secs($delay_seconds); run_queue_test( $num_producers, $num_consumers, $max_queue_size, $messages_per_producer, $notification_style, $timeout, delay, ); })* }; } run_queue_tests! { sanity_check_queue( num_producers: 1, num_consumers: 1, max_queue_size: 1, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Bounded(Duration::from_secs(1)), delay_seconds: 0 ); sanity_check_queue_timeout( num_producers: 1, num_consumers: 1, max_queue_size: 1, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); new_test_without_timeout_5( num_producers: 1, num_consumers: 5, max_queue_size: 1, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); one_producer_one_consumer_one_slot( num_producers: 1, num_consumers: 1, max_queue_size: 1, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); one_producer_one_consumer_one_slot_timeout( num_producers: 1, num_consumers: 1, max_queue_size: 1, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 1 ); one_producer_one_consumer_hundred_slots( num_producers: 1, num_consumers: 1, max_queue_size: 100, messages_per_producer: 1_000_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_one_consumer_one_slot( num_producers: 10, num_consumers: 1, max_queue_size: 1, messages_per_producer: 10000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_one_consumer_hundred_slots_notify_all( num_producers: 10, num_consumers: 1, max_queue_size: 100, messages_per_producer: 10000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_one_consumer_hundred_slots_notify_one( num_producers: 10, num_consumers: 1, max_queue_size: 100, messages_per_producer: 10000, notification_style: NotifyStyle::One, timeout: Timeout::Forever, delay_seconds: 0 ); one_producer_ten_consumers_one_slot( num_producers: 1, num_consumers: 10, max_queue_size: 1, messages_per_producer: 10000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); one_producer_ten_consumers_hundred_slots_notify_all( num_producers: 1, num_consumers: 10, max_queue_size: 100, messages_per_producer: 100_000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); one_producer_ten_consumers_hundred_slots_notify_one( num_producers: 1, num_consumers: 10, max_queue_size: 100, messages_per_producer: 100_000, notification_style: NotifyStyle::One, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_ten_consumers_one_slot( num_producers: 10, num_consumers: 10, max_queue_size: 1, messages_per_producer: 50000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_ten_consumers_hundred_slots_notify_all( num_producers: 10, num_consumers: 10, max_queue_size: 100, messages_per_producer: 50000, notification_style: NotifyStyle::All, timeout: Timeout::Forever, delay_seconds: 0 ); ten_producers_ten_consumers_hundred_slots_notify_one( num_producers: 10, num_consumers: 10, max_queue_size: 100, messages_per_producer: 50000, notification_style: NotifyStyle::One, timeout: Timeout::Forever, delay_seconds: 0 ); } } ```
/content/code_sandbox/src/condvar.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
9,539
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::raw_mutex::RawMutex; /// A mutual exclusion primitive useful for protecting shared data /// /// This mutex will block threads waiting for the lock to become available. The /// mutex can be statically initialized or created by the `new` /// constructor. Each mutex has a type parameter which represents the data that /// it is protecting. The data can only be accessed through the RAII guards /// returned from `lock` and `try_lock`, which guarantees that the data is only /// ever accessed when the mutex is locked. /// /// # Fairness /// /// A typical unfair lock can often end up in a situation where a single thread /// quickly acquires and releases the same mutex in succession, which can starve /// other threads waiting to acquire the mutex. While this improves throughput /// because it doesn't force a context switch when a thread tries to re-acquire /// a mutex it has just released, this can starve other threads. /// /// This mutex uses [eventual fairness](path_to_url /// to ensure that the lock will be fair on average without sacrificing /// throughput. This is done by forcing a fair unlock on average every 0.5ms, /// which will force the lock to go to the next thread waiting for the mutex. /// /// Additionally, any critical section longer than 1ms will always use a fair /// unlock, which has a negligible impact on throughput considering the length /// of the critical section. /// /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when /// unlocking a mutex instead of simply dropping the `MutexGuard`. /// /// # Differences from the standard library `Mutex` /// /// - No poisoning, the lock is released normally on panic. /// - Only requires 1 byte of space, whereas the standard library boxes the /// `Mutex` due to platform limitations. /// - Can be statically constructed. /// - Does not require any drop glue when dropped. /// - Inline fast path for the uncontended case. /// - Efficient handling of micro-contention using adaptive spinning. /// - Allows raw locking & unlocking without a guard. /// - Supports eventual fairness so that the mutex is fair on average. /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`. /// /// # Examples /// /// ``` /// use parking_lot::Mutex; /// use std::sync::{Arc, mpsc::channel}; /// use std::thread; /// /// const N: usize = 10; /// /// // Spawn a few threads to increment a shared variable (non-atomically), and /// // let the main thread know once all increments are done. /// // /// // Here we're using an Arc to share memory among threads, and the data inside /// // the Arc is protected with a mutex. /// let data = Arc::new(Mutex::new(0)); /// /// let (tx, rx) = channel(); /// for _ in 0..10 { /// let (data, tx) = (Arc::clone(&data), tx.clone()); /// thread::spawn(move || { /// // The shared state can only be accessed once the lock is held. /// // Our non-atomic increment is safe because we're the only thread /// // which can access the shared state when the lock is held. /// let mut data = data.lock(); /// *data += 1; /// if *data == N { /// tx.send(()).unwrap(); /// } /// // the lock is unlocked here when `data` goes out of scope. /// }); /// } /// /// rx.recv().unwrap(); /// ``` pub type Mutex<T> = lock_api::Mutex<RawMutex, T>; /// Creates a new mutex in an unlocked state ready for use. /// /// This allows creating a mutex in a constant context on stable Rust. pub const fn const_mutex<T>(val: T) -> Mutex<T> { Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val) } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` and `DerefMut` implementations. pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>; /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>; #[cfg(test)] mod tests { use crate::{Condvar, Mutex}; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; #[cfg(feature = "serde")] use bincode::{deserialize, serialize}; struct Packet<T>(Arc<(Mutex<T>, Condvar)>); #[derive(Eq, PartialEq, Debug)] struct NonCopy(i32); unsafe impl<T: Send> Send for Packet<T> {} unsafe impl<T> Sync for Packet<T> {} #[test] fn smoke() { let m = Mutex::new(()); drop(m.lock()); drop(m.lock()); } #[test] fn lots_and_lots() { const J: u32 = 1000; const K: u32 = 3; let m = Arc::new(Mutex::new(0)); fn inc(m: &Mutex<u32>) { for _ in 0..J { *m.lock() += 1; } } let (tx, rx) = channel(); for _ in 0..K { let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); let tx2 = tx.clone(); let m2 = m.clone(); thread::spawn(move || { inc(&m2); tx2.send(()).unwrap(); }); } drop(tx); for _ in 0..2 * K { rx.recv().unwrap(); } assert_eq!(*m.lock(), J * K * 2); } #[test] fn try_lock() { let m = Mutex::new(()); *m.try_lock().unwrap() = (); } #[test] fn test_into_inner() { let m = Mutex::new(NonCopy(10)); assert_eq!(m.into_inner(), NonCopy(10)); } #[test] fn test_into_inner_drop() { struct Foo(Arc<AtomicUsize>); impl Drop for Foo { fn drop(&mut self) { self.0.fetch_add(1, Ordering::SeqCst); } } let num_drops = Arc::new(AtomicUsize::new(0)); let m = Mutex::new(Foo(num_drops.clone())); assert_eq!(num_drops.load(Ordering::SeqCst), 0); { let _inner = m.into_inner(); assert_eq!(num_drops.load(Ordering::SeqCst), 0); } assert_eq!(num_drops.load(Ordering::SeqCst), 1); } #[test] fn test_get_mut() { let mut m = Mutex::new(NonCopy(10)); *m.get_mut() = NonCopy(20); assert_eq!(m.into_inner(), NonCopy(20)); } #[test] fn test_mutex_arc_condvar() { let packet = Packet(Arc::new((Mutex::new(false), Condvar::new()))); let packet2 = Packet(packet.0.clone()); let (tx, rx) = channel(); let _t = thread::spawn(move || { // wait until parent gets in rx.recv().unwrap(); let (lock, cvar) = &*packet2.0; let mut lock = lock.lock(); *lock = true; cvar.notify_one(); }); let (lock, cvar) = &*packet.0; let mut lock = lock.lock(); tx.send(()).unwrap(); assert!(!*lock); while !*lock { cvar.wait(&mut lock); } } #[test] fn test_mutex_arc_nested() { // Tests nested mutexes and access // to underlying data. let arc = Arc::new(Mutex::new(1)); let arc2 = Arc::new(Mutex::new(arc)); let (tx, rx) = channel(); let _t = thread::spawn(move || { let lock = arc2.lock(); let lock2 = lock.lock(); assert_eq!(*lock2, 1); tx.send(()).unwrap(); }); rx.recv().unwrap(); } #[test] fn test_mutex_arc_access_in_unwind() { let arc = Arc::new(Mutex::new(1)); let arc2 = arc.clone(); let _ = thread::spawn(move || { struct Unwinder { i: Arc<Mutex<i32>>, } impl Drop for Unwinder { fn drop(&mut self) { *self.i.lock() += 1; } } let _u = Unwinder { i: arc2 }; panic!(); }) .join(); let lock = arc.lock(); assert_eq!(*lock, 2); } #[test] fn test_mutex_unsized() { let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]); { let b = &mut *mutex.lock(); b[0] = 4; b[2] = 5; } let comp: &[i32] = &[4, 2, 5]; assert_eq!(&*mutex.lock(), comp); } #[test] fn test_mutexguard_sync() { fn sync<T: Sync>(_: T) {} let mutex = Mutex::new(()); sync(mutex.lock()); } #[test] fn test_mutex_debug() { let mutex = Mutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }"); let _lock = mutex.lock(); assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }"); } #[cfg(feature = "serde")] #[test] fn test_serde() { let contents: Vec<u8> = vec![0, 1, 2]; let mutex = Mutex::new(contents.clone()); let serialized = serialize(&mutex).unwrap(); let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap(); assert_eq!(*(mutex.lock()), *(deserialized.lock())); assert_eq!(contents, *(deserialized.lock())); } } ```
/content/code_sandbox/src/mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
2,476
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::raw_mutex::RawMutex; use core::num::NonZeroUsize; use lock_api::{self, GetThreadId}; /// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`. pub struct RawThreadId; unsafe impl GetThreadId for RawThreadId { const INIT: RawThreadId = RawThreadId; fn nonzero_thread_id(&self) -> NonZeroUsize { // The address of a thread-local variable is guaranteed to be unique to the // current thread, and is also guaranteed to be non-zero. The variable has to have a // non-zero size to guarantee it has a unique address for each thread. thread_local!(static KEY: u8 = 0); KEY.with(|x| { NonZeroUsize::new(x as *const _ as usize) .expect("thread-local variable address is null") }) } } /// A mutex which can be recursively locked by a single thread. /// /// This type is identical to `Mutex` except for the following points: /// /// - Locking multiple times from the same thread will work correctly instead of /// deadlocking. /// - `ReentrantMutexGuard` does not give mutable references to the locked data. /// Use a `RefCell` if you need this. /// /// See [`Mutex`](crate::Mutex) for more details about the underlying mutex /// primitive. pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>; /// Creates a new reentrant mutex in an unlocked state ready for use. /// /// This allows creating a reentrant mutex in a constant context on stable Rust. pub const fn const_reentrant_mutex<T>(val: T) -> ReentrantMutex<T> { ReentrantMutex::const_new( <RawMutex as lock_api::RawMutex>::INIT, <RawThreadId as lock_api::GetThreadId>::INIT, val, ) } /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure /// is dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` implementation. pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. pub type MappedReentrantMutexGuard<'a, T> = lock_api::MappedReentrantMutexGuard<'a, RawMutex, RawThreadId, T>; #[cfg(test)] mod tests { use crate::ReentrantMutex; use crate::ReentrantMutexGuard; use std::cell::RefCell; use std::sync::mpsc::channel; use std::sync::Arc; use std::thread; #[cfg(feature = "serde")] use bincode::{deserialize, serialize}; #[test] fn smoke() { let m = ReentrantMutex::new(2); { let a = m.lock(); { let b = m.lock(); { let c = m.lock(); assert_eq!(*c, 2); } assert_eq!(*b, 2); } assert_eq!(*a, 2); } } #[test] fn is_mutex() { let m = Arc::new(ReentrantMutex::new(RefCell::new(0))); let m2 = m.clone(); let lock = m.lock(); let child = thread::spawn(move || { let lock = m2.lock(); assert_eq!(*lock.borrow(), 4950); }); for i in 0..100 { let lock = m.lock(); *lock.borrow_mut() += i; } drop(lock); child.join().unwrap(); } #[test] fn trylock_works() { let m = Arc::new(ReentrantMutex::new(())); let m2 = m.clone(); let _lock = m.try_lock(); let _lock2 = m.try_lock(); thread::spawn(move || { let lock = m2.try_lock(); assert!(lock.is_none()); }) .join() .unwrap(); let _lock3 = m.try_lock(); } #[test] fn test_reentrant_mutex_debug() { let mutex = ReentrantMutex::new(vec![0u8, 10]); assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }"); } #[test] fn test_reentrant_mutex_bump() { let mutex = Arc::new(ReentrantMutex::new(())); let mutex2 = mutex.clone(); let mut guard = mutex.lock(); let (tx, rx) = channel(); thread::spawn(move || { let _guard = mutex2.lock(); tx.send(()).unwrap(); }); // `bump()` repeatedly until the thread starts up and requests the lock while rx.try_recv().is_err() { ReentrantMutexGuard::bump(&mut guard); } } #[cfg(feature = "serde")] #[test] fn test_serde() { let contents: Vec<u8> = vec![0, 1, 2]; let mutex = ReentrantMutex::new(contents.clone()); let serialized = serialize(&mutex).unwrap(); let deserialized: ReentrantMutex<Vec<u8>> = deserialize(&serialized).unwrap(); assert_eq!(*(mutex.lock()), *(deserialized.lock())); assert_eq!(contents, *(deserialized.lock())); } } ```
/content/code_sandbox/src/remutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
1,324
```rust fn main() { let cfg = autocfg::new(); println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rustc-check-cfg=cfg(has_const_fn_trait_bound)"); if cfg.probe_rustc_version(1, 61) { println!("cargo:rustc-cfg=has_const_fn_trait_bound"); } } ```
/content/code_sandbox/lock_api/build.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
81
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::elision::{have_elision, AtomicElisionExt}; use crate::raw_mutex::{TOKEN_HANDOFF, TOKEN_NORMAL}; use crate::util; use core::{ cell::Cell, sync::atomic::{AtomicUsize, Ordering}, }; use lock_api::{RawRwLock as RawRwLock_, RawRwLockUpgrade}; use parking_lot_core::{ self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken, }; use std::time::{Duration, Instant}; // This reader-writer lock implementation is based on Boost's upgrade_mutex: // path_to_url#L432 // // This implementation uses 2 wait queues, one at key [addr] and one at key // [addr + 1]. The primary queue is used for all new waiting threads, and the // secondary queue is used by the thread which has acquired WRITER_BIT but is // waiting for the remaining readers to exit the lock. // // This implementation is fair between readers and writers since it uses the // order in which threads first started queuing to alternate between read phases // and write phases. In particular is it not vulnerable to write starvation // since readers will block if there is a pending writer. // There is at least one thread in the main queue. const PARKED_BIT: usize = 0b0001; // There is a parked thread holding WRITER_BIT. WRITER_BIT must be set. const WRITER_PARKED_BIT: usize = 0b0010; // A reader is holding an upgradable lock. The reader count must be non-zero and // WRITER_BIT must not be set. const UPGRADABLE_BIT: usize = 0b0100; // If the reader count is zero: a writer is currently holding an exclusive lock. // Otherwise: a writer is waiting for the remaining readers to exit the lock. const WRITER_BIT: usize = 0b1000; // Mask of bits used to count readers. const READERS_MASK: usize = !0b1111; // Base unit for counting readers. const ONE_READER: usize = 0b10000; // Token indicating what type of lock a queued thread is trying to acquire const TOKEN_SHARED: ParkToken = ParkToken(ONE_READER); const TOKEN_EXCLUSIVE: ParkToken = ParkToken(WRITER_BIT); const TOKEN_UPGRADABLE: ParkToken = ParkToken(ONE_READER | UPGRADABLE_BIT); /// Raw reader-writer lock type backed by the parking lot. pub struct RawRwLock { state: AtomicUsize, } unsafe impl lock_api::RawRwLock for RawRwLock { const INIT: RawRwLock = RawRwLock { state: AtomicUsize::new(0), }; type GuardMarker = crate::GuardMarker; #[inline] fn lock_exclusive(&self) { if self .state .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_err() { let result = self.lock_exclusive_slow(None); debug_assert!(result); } self.deadlock_acquire(); } #[inline] fn try_lock_exclusive(&self) -> bool { if self .state .compare_exchange(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { self.deadlock_acquire(); true } else { false } } #[inline] unsafe fn unlock_exclusive(&self) { self.deadlock_release(); if self .state .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_exclusive_slow(false); } #[inline] fn lock_shared(&self) { if !self.try_lock_shared_fast(false) { let result = self.lock_shared_slow(false, None); debug_assert!(result); } self.deadlock_acquire(); } #[inline] fn try_lock_shared(&self) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.try_lock_shared_slow(false) }; if result { self.deadlock_acquire(); } result } #[inline] unsafe fn unlock_shared(&self) { self.deadlock_release(); let state = if have_elision() { self.state.elision_fetch_sub_release(ONE_READER) } else { self.state.fetch_sub(ONE_READER, Ordering::Release) }; if state & (READERS_MASK | WRITER_PARKED_BIT) == (ONE_READER | WRITER_PARKED_BIT) { self.unlock_shared_slow(); } } #[inline] fn is_locked(&self) -> bool { let state = self.state.load(Ordering::Relaxed); state & (WRITER_BIT | READERS_MASK) != 0 } #[inline] fn is_locked_exclusive(&self) -> bool { let state = self.state.load(Ordering::Relaxed); state & (WRITER_BIT) != 0 } } unsafe impl lock_api::RawRwLockFair for RawRwLock { #[inline] unsafe fn unlock_shared_fair(&self) { // Shared unlocking is always fair in this implementation. self.unlock_shared(); } #[inline] unsafe fn unlock_exclusive_fair(&self) { self.deadlock_release(); if self .state .compare_exchange(WRITER_BIT, 0, Ordering::Release, Ordering::Relaxed) .is_ok() { return; } self.unlock_exclusive_slow(true); } #[inline] unsafe fn bump_shared(&self) { if self.state.load(Ordering::Relaxed) & (READERS_MASK | WRITER_BIT) == ONE_READER | WRITER_BIT { self.bump_shared_slow(); } } #[inline] unsafe fn bump_exclusive(&self) { if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 { self.bump_exclusive_slow(); } } } unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { #[inline] unsafe fn downgrade(&self) { let state = self .state .fetch_add(ONE_READER - WRITER_BIT, Ordering::Release); // Wake up parked shared and upgradable threads if there are any if state & PARKED_BIT != 0 { self.downgrade_slow(); } } } unsafe impl lock_api::RawRwLockTimed for RawRwLock { type Duration = Duration; type Instant = Instant; #[inline] fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.lock_shared_slow(false, util::to_deadline(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool { let result = if self.try_lock_shared_fast(false) { true } else { self.lock_shared_slow(false, Some(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] fn try_lock_exclusive_for(&self, timeout: Duration) -> bool { let result = if self .state .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_exclusive_slow(util::to_deadline(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] fn try_lock_exclusive_until(&self, timeout: Instant) -> bool { let result = if self .state .compare_exchange_weak(0, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed) .is_ok() { true } else { self.lock_exclusive_slow(Some(timeout)) }; if result { self.deadlock_acquire(); } result } } unsafe impl lock_api::RawRwLockRecursive for RawRwLock { #[inline] fn lock_shared_recursive(&self) { if !self.try_lock_shared_fast(true) { let result = self.lock_shared_slow(true, None); debug_assert!(result); } self.deadlock_acquire(); } #[inline] fn try_lock_shared_recursive(&self) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.try_lock_shared_slow(true) }; if result { self.deadlock_acquire(); } result } } unsafe impl lock_api::RawRwLockRecursiveTimed for RawRwLock { #[inline] fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.lock_shared_slow(true, util::to_deadline(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool { let result = if self.try_lock_shared_fast(true) { true } else { self.lock_shared_slow(true, Some(timeout)) }; if result { self.deadlock_acquire(); } result } } unsafe impl lock_api::RawRwLockUpgrade for RawRwLock { #[inline] fn lock_upgradable(&self) { if !self.try_lock_upgradable_fast() { let result = self.lock_upgradable_slow(None); debug_assert!(result); } self.deadlock_acquire(); } #[inline] fn try_lock_upgradable(&self) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.try_lock_upgradable_slow() }; if result { self.deadlock_acquire(); } result } #[inline] unsafe fn unlock_upgradable(&self) { self.deadlock_release(); let state = self.state.load(Ordering::Relaxed); #[allow(clippy::collapsible_if)] if state & PARKED_BIT == 0 { if self .state .compare_exchange_weak( state, state - (ONE_READER | UPGRADABLE_BIT), Ordering::Release, Ordering::Relaxed, ) .is_ok() { return; } } self.unlock_upgradable_slow(false); } #[inline] unsafe fn upgrade(&self) { let state = self.state.fetch_sub( (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Acquire, ); if state & READERS_MASK != ONE_READER { let result = self.upgrade_slow(None); debug_assert!(result); } } #[inline] unsafe fn try_upgrade(&self) -> bool { if self .state .compare_exchange_weak( ONE_READER | UPGRADABLE_BIT, WRITER_BIT, Ordering::Acquire, Ordering::Relaxed, ) .is_ok() { true } else { self.try_upgrade_slow() } } } unsafe impl lock_api::RawRwLockUpgradeFair for RawRwLock { #[inline] unsafe fn unlock_upgradable_fair(&self) { self.deadlock_release(); let state = self.state.load(Ordering::Relaxed); #[allow(clippy::collapsible_if)] if state & PARKED_BIT == 0 { if self .state .compare_exchange_weak( state, state - (ONE_READER | UPGRADABLE_BIT), Ordering::Release, Ordering::Relaxed, ) .is_ok() { return; } } self.unlock_upgradable_slow(false); } #[inline] unsafe fn bump_upgradable(&self) { if self.state.load(Ordering::Relaxed) == ONE_READER | UPGRADABLE_BIT | PARKED_BIT { self.bump_upgradable_slow(); } } } unsafe impl lock_api::RawRwLockUpgradeDowngrade for RawRwLock { #[inline] unsafe fn downgrade_upgradable(&self) { let state = self.state.fetch_sub(UPGRADABLE_BIT, Ordering::Relaxed); // Wake up parked upgradable threads if there are any if state & PARKED_BIT != 0 { self.downgrade_slow(); } } #[inline] unsafe fn downgrade_to_upgradable(&self) { let state = self.state.fetch_add( (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Release, ); // Wake up parked shared threads if there are any if state & PARKED_BIT != 0 { self.downgrade_to_upgradable_slow(); } } } unsafe impl lock_api::RawRwLockUpgradeTimed for RawRwLock { #[inline] fn try_lock_upgradable_until(&self, timeout: Instant) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.lock_upgradable_slow(Some(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] fn try_lock_upgradable_for(&self, timeout: Duration) -> bool { let result = if self.try_lock_upgradable_fast() { true } else { self.lock_upgradable_slow(util::to_deadline(timeout)) }; if result { self.deadlock_acquire(); } result } #[inline] unsafe fn try_upgrade_until(&self, timeout: Instant) -> bool { let state = self.state.fetch_sub( (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed, ); if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(Some(timeout)) } } #[inline] unsafe fn try_upgrade_for(&self, timeout: Duration) -> bool { let state = self.state.fetch_sub( (ONE_READER | UPGRADABLE_BIT) - WRITER_BIT, Ordering::Relaxed, ); if state & READERS_MASK == ONE_READER { true } else { self.upgrade_slow(util::to_deadline(timeout)) } } } impl RawRwLock { #[inline(always)] fn try_lock_shared_fast(&self, recursive: bool) -> bool { let state = self.state.load(Ordering::Relaxed); // We can't allow grabbing a shared lock if there is a writer, even if // the writer is still waiting for the remaining readers to exit. if state & WRITER_BIT != 0 { // To allow recursive locks, we make an exception and allow readers // to skip ahead of a pending writer to avoid deadlocking, at the // cost of breaking the fairness guarantees. if !recursive || state & READERS_MASK == 0 { return false; } } // Use hardware lock elision to avoid cache conflicts when multiple // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && state == 0 { self.state .elision_compare_exchange_acquire(0, ONE_READER) .is_ok() } else if let Some(new_state) = state.checked_add(ONE_READER) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() } else { false } } #[cold] fn try_lock_shared_slow(&self, recursive: bool) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { // This mirrors the condition in try_lock_shared_fast #[allow(clippy::collapsible_if)] if state & WRITER_BIT != 0 { if !recursive || state & READERS_MASK == 0 { return false; } } if have_elision() && state == 0 { match self.state.elision_compare_exchange_acquire(0, ONE_READER) { Ok(_) => return true, Err(x) => state = x, } } else { match self.state.compare_exchange_weak( state, state .checked_add(ONE_READER) .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } } } #[inline(always)] fn try_lock_upgradable_fast(&self) -> bool { let state = self.state.load(Ordering::Relaxed); // We can't grab an upgradable lock if there is already a writer or // upgradable reader. if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { return false; } if let Some(new_state) = state.checked_add(ONE_READER | UPGRADABLE_BIT) { self.state .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) .is_ok() } else { false } } #[cold] fn try_lock_upgradable_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { // This mirrors the condition in try_lock_upgradable_fast if state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { return false; } match self.state.compare_exchange_weak( state, state .checked_add(ONE_READER | UPGRADABLE_BIT) .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } } #[cold] fn lock_exclusive_slow(&self, timeout: Option<Instant>) -> bool { let try_lock = |state: &mut usize| { loop { if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { return false; } // Grab WRITER_BIT if it isn't set, even if there are parked threads. match self.state.compare_exchange_weak( *state, *state | WRITER_BIT, Ordering::Acquire, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => *state = x, } } }; // Step 1: grab exclusive ownership of WRITER_BIT let timed_out = !self.lock_common( timeout, TOKEN_EXCLUSIVE, try_lock, WRITER_BIT | UPGRADABLE_BIT, ); if timed_out { return false; } // Step 2: wait for all remaining readers to exit the lock. self.wait_for_readers(timeout, 0) } #[cold] fn unlock_exclusive_slow(&self, force_fair: bool) { // There are threads to unpark. Try to unpark as many as we can. let callback = |mut new_state, result: UnparkResult| { // If we are using a fair unlock then we should keep the // rwlock locked and hand it off to the unparked threads. if result.unparked_threads != 0 && (force_fair || result.be_fair) { if result.have_more_threads { new_state |= PARKED_BIT; } self.state.store(new_state, Ordering::Release); TOKEN_HANDOFF } else { // Clear the parked bit if there are no more parked threads. if result.have_more_threads { self.state.store(PARKED_BIT, Ordering::Release); } else { self.state.store(0, Ordering::Release); } TOKEN_NORMAL } }; // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { self.wake_parked_threads(0, callback); } } #[cold] fn lock_shared_slow(&self, recursive: bool, timeout: Option<Instant>) -> bool { let try_lock = |state: &mut usize| { let mut spinwait_shared = SpinWait::new(); loop { // Use hardware lock elision to avoid cache conflicts when multiple // readers try to acquire the lock. We only do this if the lock is // completely empty since elision handles conflicts poorly. if have_elision() && *state == 0 { match self.state.elision_compare_exchange_acquire(0, ONE_READER) { Ok(_) => return true, Err(x) => *state = x, } } // This is the same condition as try_lock_shared_fast #[allow(clippy::collapsible_if)] if *state & WRITER_BIT != 0 { if !recursive || *state & READERS_MASK == 0 { return false; } } if self .state .compare_exchange_weak( *state, state .checked_add(ONE_READER) .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) .is_ok() { return true; } // If there is high contention on the reader count then we want // to leave some time between attempts to acquire the lock to // let other threads make progress. spinwait_shared.spin_no_yield(); *state = self.state.load(Ordering::Relaxed); } }; self.lock_common(timeout, TOKEN_SHARED, try_lock, WRITER_BIT) } #[cold] fn unlock_shared_slow(&self) { // At this point WRITER_PARKED_BIT is set and READER_MASK is empty. We // just need to wake up a potentially sleeping pending writer. // Using the 2nd key at addr + 1 let addr = self as *const _ as usize + 1; let callback = |_result: UnparkResult| { // Clear the WRITER_PARKED_BIT here since there can only be one // parked writer thread. self.state.fetch_and(!WRITER_PARKED_BIT, Ordering::Relaxed); TOKEN_NORMAL }; // SAFETY: // * `addr` is an address we control. // * `callback` does not panic or call into any function of `parking_lot`. unsafe { parking_lot_core::unpark_one(addr, callback); } } #[cold] fn lock_upgradable_slow(&self, timeout: Option<Instant>) -> bool { let try_lock = |state: &mut usize| { let mut spinwait_shared = SpinWait::new(); loop { if *state & (WRITER_BIT | UPGRADABLE_BIT) != 0 { return false; } if self .state .compare_exchange_weak( *state, state .checked_add(ONE_READER | UPGRADABLE_BIT) .expect("RwLock reader count overflow"), Ordering::Acquire, Ordering::Relaxed, ) .is_ok() { return true; } // If there is high contention on the reader count then we want // to leave some time between attempts to acquire the lock to // let other threads make progress. spinwait_shared.spin_no_yield(); *state = self.state.load(Ordering::Relaxed); } }; self.lock_common( timeout, TOKEN_UPGRADABLE, try_lock, WRITER_BIT | UPGRADABLE_BIT, ) } #[cold] fn unlock_upgradable_slow(&self, force_fair: bool) { // Just release the lock if there are no parked threads. let mut state = self.state.load(Ordering::Relaxed); while state & PARKED_BIT == 0 { match self.state.compare_exchange_weak( state, state - (ONE_READER | UPGRADABLE_BIT), Ordering::Release, Ordering::Relaxed, ) { Ok(_) => return, Err(x) => state = x, } } // There are threads to unpark. Try to unpark as many as we can. let callback = |new_state, result: UnparkResult| { // If we are using a fair unlock then we should keep the // rwlock locked and hand it off to the unparked threads. let mut state = self.state.load(Ordering::Relaxed); if force_fair || result.be_fair { // Fall back to normal unpark on overflow. Panicking is // not allowed in parking_lot callbacks. while let Some(mut new_state) = (state - (ONE_READER | UPGRADABLE_BIT)).checked_add(new_state) { if result.have_more_threads { new_state |= PARKED_BIT; } else { new_state &= !PARKED_BIT; } match self.state.compare_exchange_weak( state, new_state, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return TOKEN_HANDOFF, Err(x) => state = x, } } } // Otherwise just release the upgradable lock and update PARKED_BIT. loop { let mut new_state = state - (ONE_READER | UPGRADABLE_BIT); if result.have_more_threads { new_state |= PARKED_BIT; } else { new_state &= !PARKED_BIT; } match self.state.compare_exchange_weak( state, new_state, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return TOKEN_NORMAL, Err(x) => state = x, } } }; // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { self.wake_parked_threads(0, callback); } } #[cold] fn try_upgrade_slow(&self) -> bool { let mut state = self.state.load(Ordering::Relaxed); loop { if state & READERS_MASK != ONE_READER { return false; } match self.state.compare_exchange_weak( state, state - (ONE_READER | UPGRADABLE_BIT) + WRITER_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { Ok(_) => return true, Err(x) => state = x, } } } #[cold] fn upgrade_slow(&self, timeout: Option<Instant>) -> bool { self.wait_for_readers(timeout, ONE_READER | UPGRADABLE_BIT) } #[cold] fn downgrade_slow(&self) { // We only reach this point if PARKED_BIT is set. let callback = |_, result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { self.wake_parked_threads(ONE_READER, callback); } } #[cold] fn downgrade_to_upgradable_slow(&self) { // We only reach this point if PARKED_BIT is set. let callback = |_, result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; // SAFETY: `callback` does not panic or call into any function of `parking_lot`. unsafe { self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); } } #[cold] unsafe fn bump_shared_slow(&self) { self.unlock_shared(); self.lock_shared(); } #[cold] fn bump_exclusive_slow(&self) { self.deadlock_release(); self.unlock_exclusive_slow(true); self.lock_exclusive(); } #[cold] fn bump_upgradable_slow(&self) { self.deadlock_release(); self.unlock_upgradable_slow(true); self.lock_upgradable(); } /// Common code for waking up parked threads after releasing `WRITER_BIT` or /// `UPGRADABLE_BIT`. /// /// # Safety /// /// `callback` must uphold the requirements of the `callback` parameter to /// `parking_lot_core::unpark_filter`. Meaning no panics or calls into any function in /// `parking_lot`. #[inline] unsafe fn wake_parked_threads( &self, new_state: usize, callback: impl FnOnce(usize, UnparkResult) -> UnparkToken, ) { // We must wake up at least one upgrader or writer if there is one, // otherwise they may end up parked indefinitely since unlock_shared // does not call wake_parked_threads. let new_state = Cell::new(new_state); let addr = self as *const _ as usize; let filter = |ParkToken(token)| { let s = new_state.get(); // If we are waking up a writer, don't wake anything else. if s & WRITER_BIT != 0 { return FilterOp::Stop; } // Otherwise wake *all* readers and one upgrader/writer. if token & (UPGRADABLE_BIT | WRITER_BIT) != 0 && s & UPGRADABLE_BIT != 0 { // Skip writers and upgradable readers if we already have // a writer/upgradable reader. FilterOp::Skip } else { new_state.set(s + token); FilterOp::Unpark } }; let callback = |result| callback(new_state.get(), result); // SAFETY: // * `addr` is an address we control. // * `filter` does not panic or call into any function of `parking_lot`. // * `callback` safety responsibility is on caller parking_lot_core::unpark_filter(addr, filter, callback); } // Common code for waiting for readers to exit the lock after acquiring // WRITER_BIT. #[inline] fn wait_for_readers(&self, timeout: Option<Instant>, prev_value: usize) -> bool { // At this point WRITER_BIT is already set, we just need to wait for the // remaining readers to exit the lock. let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Acquire); while state & READERS_MASK != 0 { // Spin a few times to wait for readers to exit if spinwait.spin() { state = self.state.load(Ordering::Acquire); continue; } // Set the parked bit if state & WRITER_PARKED_BIT == 0 { if let Err(x) = self.state.compare_exchange_weak( state, state | WRITER_PARKED_BIT, Ordering::Acquire, Ordering::Acquire, ) { state = x; continue; } } // Park our thread until we are woken up by an unlock // Using the 2nd key at addr + 1 let addr = self as *const _ as usize + 1; let validate = || { let state = self.state.load(Ordering::Relaxed); state & READERS_MASK != 0 && state & WRITER_PARKED_BIT != 0 }; let before_sleep = || {}; let timed_out = |_, _| {}; // SAFETY: // * `addr` is an address we control. // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. // * `before_sleep` does not call `park`, nor does it panic. let park_result = unsafe { parking_lot_core::park( addr, validate, before_sleep, timed_out, TOKEN_EXCLUSIVE, timeout, ) }; match park_result { // We still need to re-check the state if we are unparked // since a previous writer timing-out could have allowed // another reader to sneak in before we parked. ParkResult::Unparked(_) | ParkResult::Invalid => { state = self.state.load(Ordering::Acquire); continue; } // Timeout expired ParkResult::TimedOut => { // We need to release WRITER_BIT and revert back to // our previous value. We also wake up any threads that // might be waiting on WRITER_BIT. let state = self.state.fetch_add( prev_value.wrapping_sub(WRITER_BIT | WRITER_PARKED_BIT), Ordering::Relaxed, ); if state & PARKED_BIT != 0 { let callback = |_, result: UnparkResult| { // Clear the parked bit if there no more parked threads if !result.have_more_threads { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } TOKEN_NORMAL }; // SAFETY: `callback` does not panic or call any function of `parking_lot`. unsafe { self.wake_parked_threads(ONE_READER | UPGRADABLE_BIT, callback); } } return false; } } } true } /// Common code for acquiring a lock #[inline] fn lock_common( &self, timeout: Option<Instant>, token: ParkToken, mut try_lock: impl FnMut(&mut usize) -> bool, validate_flags: usize, ) -> bool { let mut spinwait = SpinWait::new(); let mut state = self.state.load(Ordering::Relaxed); loop { // Attempt to grab the lock if try_lock(&mut state) { return true; } // If there are no parked threads, try spinning a few times. if state & (PARKED_BIT | WRITER_PARKED_BIT) == 0 && spinwait.spin() { state = self.state.load(Ordering::Relaxed); continue; } // Set the parked bit if state & PARKED_BIT == 0 { if let Err(x) = self.state.compare_exchange_weak( state, state | PARKED_BIT, Ordering::Relaxed, Ordering::Relaxed, ) { state = x; continue; } } // Park our thread until we are woken up by an unlock let addr = self as *const _ as usize; let validate = || { let state = self.state.load(Ordering::Relaxed); state & PARKED_BIT != 0 && (state & validate_flags != 0) }; let before_sleep = || {}; let timed_out = |_, was_last_thread| { // Clear the parked bit if we were the last parked thread if was_last_thread { self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed); } }; // SAFETY: // * `addr` is an address we control. // * `validate`/`timed_out` does not panic or call into any function of `parking_lot`. // * `before_sleep` does not call `park`, nor does it panic. let park_result = unsafe { parking_lot_core::park(addr, validate, before_sleep, timed_out, token, timeout) }; match park_result { // The thread that unparked us passed the lock on to us // directly without unlocking it. ParkResult::Unparked(TOKEN_HANDOFF) => return true, // We were unparked normally, try acquiring the lock again ParkResult::Unparked(_) => (), // The validation function failed, try locking again ParkResult::Invalid => (), // Timeout expired ParkResult::TimedOut => return false, } // Loop back and try locking again spinwait.reset(); state = self.state.load(Ordering::Relaxed); } } #[inline] fn deadlock_acquire(&self) { unsafe { deadlock::acquire_resource(self as *const _ as usize) }; unsafe { deadlock::acquire_resource(self as *const _ as usize + 1) }; } #[inline] fn deadlock_release(&self) { unsafe { deadlock::release_resource(self as *const _ as usize) }; unsafe { deadlock::release_resource(self as *const _ as usize + 1) }; } } ```
/content/code_sandbox/src/raw_rwlock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
8,191
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. //! This library provides type-safe and fully-featured [`Mutex`] and [`RwLock`] //! types which wrap a simple raw mutex or rwlock type. This has several //! benefits: not only does it eliminate a large portion of the work in //! implementing custom lock types, it also allows users to write code which is //! generic with regards to different lock implementations. //! //! Basic usage of this crate is very straightforward: //! //! 1. Create a raw lock type. This should only contain the lock state, not any //! data protected by the lock. //! 2. Implement the `RawMutex` trait for your custom lock type. //! 3. Export your mutex as a type alias for `lock_api::Mutex`, and //! your mutex guard as a type alias for `lock_api::MutexGuard`. //! See the [example](#example) below for details. //! //! This process is similar for [`RwLock`]s, except that two guards need to be //! exported instead of one. (Or 3 guards if your type supports upgradable read //! locks, see [extension traits](#extension-traits) below for details) //! //! # Example //! //! ``` //! use lock_api::{RawMutex, Mutex, GuardSend}; //! use std::sync::atomic::{AtomicBool, Ordering}; //! //! // 1. Define our raw lock type //! pub struct RawSpinlock(AtomicBool); //! //! // 2. Implement RawMutex for this type //! unsafe impl RawMutex for RawSpinlock { //! const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false)); //! //! // A spinlock guard can be sent to another thread and unlocked there //! type GuardMarker = GuardSend; //! //! fn lock(&self) { //! // Note: This isn't the best way of implementing a spinlock, but it //! // suffices for the sake of this example. //! while !self.try_lock() {} //! } //! //! fn try_lock(&self) -> bool { //! self.0 //! .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) //! .is_ok() //! } //! //! unsafe fn unlock(&self) { //! self.0.store(false, Ordering::Release); //! } //! } //! //! // 3. Export the wrappers. This are the types that your users will actually use. //! pub type Spinlock<T> = lock_api::Mutex<RawSpinlock, T>; //! pub type SpinlockGuard<'a, T> = lock_api::MutexGuard<'a, RawSpinlock, T>; //! ``` //! //! # Extension traits //! //! In addition to basic locking & unlocking functionality, you have the option //! of exposing additional functionality in your lock types by implementing //! additional traits for it. Examples of extension features include: //! //! - Fair unlocking (`RawMutexFair`, `RawRwLockFair`) //! - Lock timeouts (`RawMutexTimed`, `RawRwLockTimed`) //! - Downgradable write locks (`RawRwLockDowngradable`) //! - Recursive read locks (`RawRwLockRecursive`) //! - Upgradable read locks (`RawRwLockUpgrade`) //! //! The `Mutex` and `RwLock` wrappers will automatically expose this additional //! functionality if the raw lock type implements these extension traits. //! //! # Cargo features //! //! This crate supports three cargo features: //! //! - `owning_ref`: Allows your lock types to be used with the `owning_ref` crate. //! - `arc_lock`: Enables locking from an `Arc`. This enables types such as `ArcMutexGuard`. Note that this //! requires the `alloc` crate to be present. #![no_std] #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![warn(missing_docs)] #![warn(rust_2018_idioms)] #[macro_use] extern crate scopeguard; #[cfg(feature = "arc_lock")] extern crate alloc; /// Marker type which indicates that the Guard type for a lock is `Send`. pub struct GuardSend(()); /// Marker type which indicates that the Guard type for a lock is not `Send`. #[allow(dead_code)] pub struct GuardNoSend(*mut ()); unsafe impl Sync for GuardNoSend {} mod mutex; pub use crate::mutex::*; #[cfg(feature = "atomic_usize")] mod remutex; #[cfg(feature = "atomic_usize")] pub use crate::remutex::*; mod rwlock; pub use crate::rwlock::*; ```
/content/code_sandbox/lock_api/src/lib.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
997
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::cell::UnsafeCell; use core::fmt; use core::marker::PhantomData; use core::mem; use core::ops::{Deref, DerefMut}; #[cfg(feature = "arc_lock")] use alloc::sync::Arc; #[cfg(feature = "arc_lock")] use core::mem::ManuallyDrop; #[cfg(feature = "arc_lock")] use core::ptr; #[cfg(feature = "owning_ref")] use owning_ref::StableAddress; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Basic operations for a mutex. /// /// Types implementing this trait can be used by `Mutex` to form a safe and /// fully-functioning mutex type. /// /// # Safety /// /// Implementations of this trait must ensure that the mutex is actually /// exclusive: a lock can't be acquired while the mutex is already locked. pub unsafe trait RawMutex { /// Initial value for an unlocked mutex. // A non-constant const item is a legacy way to supply an initialized value to downstream // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Marker type which determines whether a lock guard should be `Send`. Use /// one of the `GuardSend` or `GuardNoSend` helper types here. type GuardMarker; /// Acquires this mutex, blocking the current thread until it is able to do so. fn lock(&self); /// Attempts to acquire this mutex without blocking. Returns `true` /// if the lock was successfully acquired and `false` otherwise. fn try_lock(&self) -> bool; /// Unlocks this mutex. /// /// # Safety /// /// This method may only be called if the mutex is held in the current context, i.e. it must /// be paired with a successful call to [`lock`], [`try_lock`], [`try_lock_for`] or [`try_lock_until`]. /// /// [`lock`]: RawMutex::lock /// [`try_lock`]: RawMutex::try_lock /// [`try_lock_for`]: RawMutexTimed::try_lock_for /// [`try_lock_until`]: RawMutexTimed::try_lock_until unsafe fn unlock(&self); /// Checks whether the mutex is currently locked. #[inline] fn is_locked(&self) -> bool { let acquired_lock = self.try_lock(); if acquired_lock { // Safety: The lock has been successfully acquired above. unsafe { self.unlock(); } } !acquired_lock } } /// Additional methods for mutexes which support fair unlocking. /// /// Fair unlocking means that a lock is handed directly over to the next waiting /// thread if there is one, without giving other threads the opportunity to /// "steal" the lock in the meantime. This is typically slower than unfair /// unlocking, but may be necessary in certain circumstances. pub unsafe trait RawMutexFair: RawMutex { /// Unlocks this mutex using a fair unlock protocol. /// /// # Safety /// /// This method may only be called if the mutex is held in the current context, see /// the documentation of [`unlock`](RawMutex::unlock). unsafe fn unlock_fair(&self); /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. /// /// # Safety /// /// This method may only be called if the mutex is held in the current context, see /// the documentation of [`unlock`](RawMutex::unlock). unsafe fn bump(&self) { self.unlock_fair(); self.lock(); } } /// Additional methods for mutexes which support locking with timeouts. /// /// The `Duration` and `Instant` types are specified as associated types so that /// this trait is usable even in `no_std` environments. pub unsafe trait RawMutexTimed: RawMutex { /// Duration type used for `try_lock_for`. type Duration; /// Instant type used for `try_lock_until`. type Instant; /// Attempts to acquire this lock until a timeout is reached. fn try_lock_for(&self, timeout: Self::Duration) -> bool; /// Attempts to acquire this lock until a timeout is reached. fn try_lock_until(&self, timeout: Self::Instant) -> bool; } /// A mutual exclusion primitive useful for protecting shared data /// /// This mutex will block threads waiting for the lock to become available. The /// mutex can also be statically initialized or created via a `new` /// constructor. Each mutex has a type parameter which represents the data that /// it is protecting. The data can only be accessed through the RAII guards /// returned from `lock` and `try_lock`, which guarantees that the data is only /// ever accessed when the mutex is locked. pub struct Mutex<R, T: ?Sized> { raw: R, data: UnsafeCell<T>, } unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {} unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {} impl<R: RawMutex, T> Mutex<R, T> { /// Creates a new mutex in an unlocked state ready for use. #[cfg(has_const_fn_trait_bound)] #[inline] pub const fn new(val: T) -> Mutex<R, T> { Mutex { raw: R::INIT, data: UnsafeCell::new(val), } } /// Creates a new mutex in an unlocked state ready for use. #[cfg(not(has_const_fn_trait_bound))] #[inline] pub fn new(val: T) -> Mutex<R, T> { Mutex { raw: R::INIT, data: UnsafeCell::new(val), } } /// Consumes this mutex, returning the underlying data. #[inline] pub fn into_inner(self) -> T { self.data.into_inner() } } impl<R, T> Mutex<R, T> { /// Creates a new mutex based on a pre-existing raw mutex. #[inline] pub const fn from_raw(raw_mutex: R, val: T) -> Mutex<R, T> { Mutex { raw: raw_mutex, data: UnsafeCell::new(val), } } /// Creates a new mutex based on a pre-existing raw mutex. /// /// This allows creating a mutex in a constant context on stable Rust. /// /// This method is a legacy alias for [`from_raw`](Self::from_raw). #[inline] pub const fn const_new(raw_mutex: R, val: T) -> Mutex<R, T> { Self::from_raw(raw_mutex, val) } } impl<R: RawMutex, T: ?Sized> Mutex<R, T> { /// Creates a new `MutexGuard` without checking if the mutex is locked. /// /// # Safety /// /// This method must only be called if the thread logically holds the lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[inline] pub unsafe fn make_guard_unchecked(&self) -> MutexGuard<'_, R, T> { MutexGuard { mutex: self, marker: PhantomData, } } /// Acquires a mutex, blocking the current thread until it is able to do so. /// /// This function will block the local thread until it is available to acquire /// the mutex. Upon returning, the thread is the only thread with the mutex /// held. An RAII guard is returned to allow scoped unlock of the lock. When /// the guard goes out of scope, the mutex will be unlocked. /// /// Attempts to lock a mutex in the thread which already holds the lock will /// result in a deadlock. #[inline] pub fn lock(&self) -> MutexGuard<'_, R, T> { self.raw.lock(); // SAFETY: The lock is held, as required. unsafe { self.make_guard_unchecked() } } /// Attempts to acquire this lock. /// /// If the lock could not be acquired at this time, then `None` is returned. /// Otherwise, an RAII guard is returned. The lock will be unlocked when the /// guard is dropped. /// /// This function does not block. #[inline] pub fn try_lock(&self) -> Option<MutexGuard<'_, R, T>> { if self.raw.try_lock() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Returns a mutable reference to the underlying data. /// /// Since this call borrows the `Mutex` mutably, no actual locking needs to /// take place---the mutable borrow statically guarantees no locks exist. #[inline] pub fn get_mut(&mut self) -> &mut T { unsafe { &mut *self.data.get() } } /// Checks whether the mutex is currently locked. #[inline] pub fn is_locked(&self) -> bool { self.raw.is_locked() } /// Forcibly unlocks the mutex. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `MutexGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `MutexGuard` but that guard has been discarded using `mem::forget`. /// Behavior is undefined if a mutex is unlocked when not locked. #[inline] pub unsafe fn force_unlock(&self) { self.raw.unlock(); } /// Returns the underlying raw mutex object. /// /// Note that you will most likely need to import the `RawMutex` trait from /// `lock_api` to be able to call functions on the raw mutex. /// /// # Safety /// /// This method is unsafe because it allows unlocking a mutex while /// still holding a reference to a `MutexGuard`. #[inline] pub unsafe fn raw(&self) -> &R { &self.raw } /// Returns a raw pointer to the underlying data. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `MutexGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// You must ensure that there are no data races when dereferencing the /// returned pointer, for example if the current thread logically owns /// a `MutexGuard` but that guard has been discarded using `mem::forget`. #[inline] pub fn data_ptr(&self) -> *mut T { self.data.get() } /// Creates a new `ArcMutexGuard` without checking if the mutex is locked. /// /// # Safety /// /// This method must only be called if the thread logically holds the lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[cfg(feature = "arc_lock")] #[inline] unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcMutexGuard<R, T> { ArcMutexGuard { mutex: self.clone(), marker: PhantomData, } } /// Acquires a lock through an `Arc`. /// /// This method is similar to the `lock` method; however, it requires the `Mutex` to be inside of an `Arc` /// and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn lock_arc(self: &Arc<Self>) -> ArcMutexGuard<R, T> { self.raw.lock(); // SAFETY: the locking guarantee is upheld unsafe { self.make_arc_guard_unchecked() } } /// Attempts to acquire a lock through an `Arc`. /// /// This method is similar to the `try_lock` method; however, it requires the `Mutex` to be inside of an /// `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcMutexGuard<R, T>> { if self.raw.try_lock() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } } impl<R: RawMutexFair, T: ?Sized> Mutex<R, T> { /// Forcibly unlocks the mutex using a fair unlock protocol. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `MutexGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `MutexGuard` but that guard has been discarded using `mem::forget`. /// Behavior is undefined if a mutex is unlocked when not locked. #[inline] pub unsafe fn force_unlock_fair(&self) { self.raw.unlock_fair(); } } impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> { /// Attempts to acquire this lock until a timeout is reached. /// /// If the lock could not be acquired before the timeout expired, then /// `None` is returned. Otherwise, an RAII guard is returned. The lock will /// be unlocked when the guard is dropped. #[inline] pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<'_, R, T>> { if self.raw.try_lock_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock until a timeout is reached. /// /// If the lock could not be acquired before the timeout expired, then /// `None` is returned. Otherwise, an RAII guard is returned. The lock will /// be unlocked when the guard is dropped. #[inline] pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<'_, R, T>> { if self.raw.try_lock_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock through an `Arc` until a timeout is reached. /// /// This method is similar to the `try_lock_for` method; however, it requires the `Mutex` to be inside of an /// `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc_for(self: &Arc<Self>, timeout: R::Duration) -> Option<ArcMutexGuard<R, T>> { if self.raw.try_lock_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock through an `Arc` until a timeout is reached. /// /// This method is similar to the `try_lock_until` method; however, it requires the `Mutex` to be inside of /// an `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcMutexGuard<R, T>> { if self.raw.try_lock_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } } impl<R: RawMutex, T: ?Sized + Default> Default for Mutex<R, T> { #[inline] fn default() -> Mutex<R, T> { Mutex::new(Default::default()) } } impl<R: RawMutex, T> From<T> for Mutex<R, T> { #[inline] fn from(t: T) -> Mutex<R, T> { Mutex::new(t) } } impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.try_lock() { Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(), None => { struct LockedPlaceholder; impl fmt::Debug for LockedPlaceholder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("<locked>") } } f.debug_struct("Mutex") .field("data", &LockedPlaceholder) .finish() } } } } // Copied and modified from serde #[cfg(feature = "serde")] impl<R, T> Serialize for Mutex<R, T> where R: RawMutex, T: Serialize + ?Sized, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.lock().serialize(serializer) } } #[cfg(feature = "serde")] impl<'de, R, T> Deserialize<'de> for Mutex<R, T> where R: RawMutex, T: Deserialize<'de> + ?Sized, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Deserialize::deserialize(deserializer).map(Mutex::new) } } /// An RAII implementation of a "scoped lock" of a mutex. When this structure is /// dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` and `DerefMut` implementations. #[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> { mutex: &'a Mutex<R, T>, marker: PhantomData<(&'a mut T, R::GuardMarker)>, } unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MutexGuard<'a, R, T> {} impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { /// Returns a reference to the original `Mutex` object. pub fn mutex(s: &Self) -> &'a Mutex<R, T> { s.mutex } /// Makes a new `MappedMutexGuard` for a component of the locked data. /// /// This operation cannot fail as the `MutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U, { let raw = &s.mutex.raw; let data = f(unsafe { &mut *s.mutex.data.get() }); mem::forget(s); MappedMutexGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedMutexGuard` for a component of the /// locked data. The original guard is returned if the closure returns `None`. /// /// This operation cannot fail as the `MutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { let raw = &s.mutex.raw; let data = match f(unsafe { &mut *s.mutex.data.get() }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedMutexGuard { raw, data, marker: PhantomData, }) } /// Temporarily unlocks the mutex to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock(); } defer!(s.mutex.raw.lock()); f() } /// Leaks the mutex guard and returns a mutable reference to the data /// protected by the mutex. /// /// This will leave the `Mutex` in a locked state. #[inline] pub fn leak(s: Self) -> &'a mut T { let r = unsafe { &mut *s.mutex.data.get() }; mem::forget(s); r } } impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// By default, mutexes are unfair and allow the current thread to re-lock /// the mutex before another has the chance to acquire the lock, even if /// that thread has been blocked on the mutex for a long time. This is the /// default because it allows much higher throughput as it avoids forcing a /// context switch on every mutex unlock. This can result in one thread /// acquiring a mutex many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `MutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock_fair(); } mem::forget(s); } /// Temporarily unlocks the mutex to execute the given function. /// /// The mutex is unlocked using a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock_fair(); } defer!(s.mutex.raw.lock()); f() } /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.bump(); } } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MutexGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.mutex.data.get() } } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.data.get() } } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: A MutexGuard always holds the lock. unsafe { self.mutex.raw.unlock(); } } } impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {} /// An RAII mutex guard returned by the `Arc` locking operations on `Mutex`. /// /// This is similar to the `MutexGuard` struct, except instead of using a reference to unlock the `Mutex` it /// uses an `Arc<Mutex>`. This has several advantages, most notably that it has an `'static` lifetime. #[cfg(feature = "arc_lock")] #[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct ArcMutexGuard<R: RawMutex, T: ?Sized> { mutex: Arc<Mutex<R, T>>, marker: PhantomData<*const ()>, } #[cfg(feature = "arc_lock")] unsafe impl<R: RawMutex + Send + Sync, T: Send + ?Sized> Send for ArcMutexGuard<R, T> where R::GuardMarker: Send { } #[cfg(feature = "arc_lock")] unsafe impl<R: RawMutex + Sync, T: Sync + ?Sized> Sync for ArcMutexGuard<R, T> where R::GuardMarker: Sync { } #[cfg(feature = "arc_lock")] impl<R: RawMutex, T: ?Sized> ArcMutexGuard<R, T> { /// Returns a reference to the `Mutex` this is guarding, contained in its `Arc`. #[inline] pub fn mutex(s: &Self) -> &Arc<Mutex<R, T>> { &s.mutex } /// Unlocks the mutex and returns the `Arc` that was held by the [`ArcMutexGuard`]. #[inline] pub fn into_arc(s: Self) -> Arc<Mutex<R, T>> { // Safety: Skip our Drop impl and manually unlock the mutex. let arc = unsafe { ptr::read(&s.mutex) }; mem::forget(s); unsafe { arc.raw.unlock(); } arc } /// Temporarily unlocks the mutex to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock(); } defer!(s.mutex.raw.lock()); f() } } #[cfg(feature = "arc_lock")] impl<R: RawMutexFair, T: ?Sized> ArcMutexGuard<R, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// This is functionally identical to the `unlock_fair` method on [`MutexGuard`]. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock_fair(); } // SAFETY: make sure the Arc gets it reference decremented let mut s = ManuallyDrop::new(s); unsafe { ptr::drop_in_place(&mut s.mutex) }; } /// Temporarily unlocks the mutex to execute the given function. /// /// This is functionally identical to the `unlocked_fair` method on [`MutexGuard`]. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.unlock_fair(); } defer!(s.mutex.raw.lock()); f() } /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This is functionally identical to the `bump` method on [`MutexGuard`]. #[inline] pub fn bump(s: &mut Self) { // Safety: A MutexGuard always holds the lock. unsafe { s.mutex.raw.bump(); } } } #[cfg(feature = "arc_lock")] impl<R: RawMutex, T: ?Sized> Deref for ArcMutexGuard<R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.mutex.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawMutex, T: ?Sized> DerefMut for ArcMutexGuard<R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.mutex.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawMutex, T: ?Sized> Drop for ArcMutexGuard<R, T> { #[inline] fn drop(&mut self) { // Safety: A MutexGuard always holds the lock. unsafe { self.mutex.raw.unlock(); } } } /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. #[clippy::has_significant_drop] #[must_use = "if unused the Mutex will immediately unlock"] pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> { raw: &'a R, data: *mut T, marker: PhantomData<&'a mut T>, } unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedMutexGuard<'a, R, T> { } unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + Send + 'a> Send for MappedMutexGuard<'a, R, T> where R::GuardMarker: Send { } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { /// Makes a new `MappedMutexGuard` for a component of the locked data. /// /// This operation cannot fail as the `MappedMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MappedMutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedMutexGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U, { let raw = s.raw; let data = f(unsafe { &mut *s.data }); mem::forget(s); MappedMutexGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedMutexGuard` for a component of the /// locked data. The original guard is returned if the closure returns `None`. /// /// This operation cannot fail as the `MappedMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MappedMutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedMutexGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { let raw = s.raw; let data = match f(unsafe { &mut *s.data }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedMutexGuard { raw, data, marker: PhantomData, }) } } impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// By default, mutexes are unfair and allow the current thread to re-lock /// the mutex before another has the chance to acquire the lock, even if /// that thread has been blocked on the mutex for a long time. This is the /// default because it allows much higher throughput as it avoids forcing a /// context switch on every mutex unlock. This can result in one thread /// acquiring a mutex many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `MutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MutexGuard always holds the lock. unsafe { s.raw.unlock_fair(); } mem::forget(s); } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Deref for MappedMutexGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.data } } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.data } } } impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: A MappedMutexGuard always holds the lock. unsafe { self.raw.unlock(); } } } impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MappedMutexGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {} ```
/content/code_sandbox/lock_api/src/mutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
8,238
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use core::cell::UnsafeCell; use core::fmt; use core::marker::PhantomData; use core::mem; use core::ops::{Deref, DerefMut}; #[cfg(feature = "arc_lock")] use alloc::sync::Arc; #[cfg(feature = "arc_lock")] use core::mem::ManuallyDrop; #[cfg(feature = "arc_lock")] use core::ptr; #[cfg(feature = "owning_ref")] use owning_ref::StableAddress; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Basic operations for a reader-writer lock. /// /// Types implementing this trait can be used by `RwLock` to form a safe and /// fully-functioning `RwLock` type. /// /// # Safety /// /// Implementations of this trait must ensure that the `RwLock` is actually /// exclusive: an exclusive lock can't be acquired while an exclusive or shared /// lock exists, and a shared lock can't be acquire while an exclusive lock /// exists. pub unsafe trait RawRwLock { /// Initial value for an unlocked `RwLock`. // A non-constant const item is a legacy way to supply an initialized value to downstream // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Marker type which determines whether a lock guard should be `Send`. Use /// one of the `GuardSend` or `GuardNoSend` helper types here. type GuardMarker; /// Acquires a shared lock, blocking the current thread until it is able to do so. fn lock_shared(&self); /// Attempts to acquire a shared lock without blocking. fn try_lock_shared(&self) -> bool; /// Releases a shared lock. /// /// # Safety /// /// This method may only be called if a shared lock is held in the current context. unsafe fn unlock_shared(&self); /// Acquires an exclusive lock, blocking the current thread until it is able to do so. fn lock_exclusive(&self); /// Attempts to acquire an exclusive lock without blocking. fn try_lock_exclusive(&self) -> bool; /// Releases an exclusive lock. /// /// # Safety /// /// This method may only be called if an exclusive lock is held in the current context. unsafe fn unlock_exclusive(&self); /// Checks if this `RwLock` is currently locked in any way. #[inline] fn is_locked(&self) -> bool { let acquired_lock = self.try_lock_exclusive(); if acquired_lock { // Safety: A lock was successfully acquired above. unsafe { self.unlock_exclusive(); } } !acquired_lock } /// Check if this `RwLock` is currently exclusively locked. fn is_locked_exclusive(&self) -> bool { let acquired_lock = self.try_lock_shared(); if acquired_lock { // Safety: A shared lock was successfully acquired above. unsafe { self.unlock_shared(); } } !acquired_lock } } /// Additional methods for `RwLock`s which support fair unlocking. /// /// Fair unlocking means that a lock is handed directly over to the next waiting /// thread if there is one, without giving other threads the opportunity to /// "steal" the lock in the meantime. This is typically slower than unfair /// unlocking, but may be necessary in certain circumstances. pub unsafe trait RawRwLockFair: RawRwLock { /// Releases a shared lock using a fair unlock protocol. /// /// # Safety /// /// This method may only be called if a shared lock is held in the current context. unsafe fn unlock_shared_fair(&self); /// Releases an exclusive lock using a fair unlock protocol. /// /// # Safety /// /// This method may only be called if an exclusive lock is held in the current context. unsafe fn unlock_exclusive_fair(&self); /// Temporarily yields a shared lock to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_shared_fair` followed /// by `lock_shared`, however it can be much more efficient in the case where there /// are no waiting threads. /// /// # Safety /// /// This method may only be called if a shared lock is held in the current context. unsafe fn bump_shared(&self) { self.unlock_shared_fair(); self.lock_shared(); } /// Temporarily yields an exclusive lock to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed /// by `lock_exclusive`, however it can be much more efficient in the case where there /// are no waiting threads. /// /// # Safety /// /// This method may only be called if an exclusive lock is held in the current context. unsafe fn bump_exclusive(&self) { self.unlock_exclusive_fair(); self.lock_exclusive(); } } /// Additional methods for `RwLock`s which support atomically downgrading an /// exclusive lock to a shared lock. pub unsafe trait RawRwLockDowngrade: RawRwLock { /// Atomically downgrades an exclusive lock into a shared lock without /// allowing any thread to take an exclusive lock in the meantime. /// /// # Safety /// /// This method may only be called if an exclusive lock is held in the current context. unsafe fn downgrade(&self); } /// Additional methods for `RwLock`s which support locking with timeouts. /// /// The `Duration` and `Instant` types are specified as associated types so that /// this trait is usable even in `no_std` environments. pub unsafe trait RawRwLockTimed: RawRwLock { /// Duration type used for `try_lock_for`. type Duration; /// Instant type used for `try_lock_until`. type Instant; /// Attempts to acquire a shared lock until a timeout is reached. fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool; /// Attempts to acquire a shared lock until a timeout is reached. fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool; /// Attempts to acquire an exclusive lock until a timeout is reached. fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool; /// Attempts to acquire an exclusive lock until a timeout is reached. fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool; } /// Additional methods for `RwLock`s which support recursive read locks. /// /// These are guaranteed to succeed without blocking if /// another read lock is held at the time of the call. This allows a thread /// to recursively lock a `RwLock`. However using this method can cause /// writers to starve since readers no longer block if a writer is waiting /// for the lock. pub unsafe trait RawRwLockRecursive: RawRwLock { /// Acquires a shared lock without deadlocking in case of a recursive lock. fn lock_shared_recursive(&self); /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock. fn try_lock_shared_recursive(&self) -> bool; } /// Additional methods for `RwLock`s which support recursive read locks and timeouts. pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed { /// Attempts to acquire a shared lock until a timeout is reached, without /// deadlocking in case of a recursive lock. fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool; /// Attempts to acquire a shared lock until a timeout is reached, without /// deadlocking in case of a recursive lock. fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool; } /// Additional methods for `RwLock`s which support atomically upgrading a shared /// lock to an exclusive lock. /// /// This requires acquiring a special "upgradable read lock" instead of a /// normal shared lock. There may only be one upgradable lock at any time, /// otherwise deadlocks could occur when upgrading. pub unsafe trait RawRwLockUpgrade: RawRwLock { /// Acquires an upgradable lock, blocking the current thread until it is able to do so. fn lock_upgradable(&self); /// Attempts to acquire an upgradable lock without blocking. fn try_lock_upgradable(&self) -> bool; /// Releases an upgradable lock. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn unlock_upgradable(&self); /// Upgrades an upgradable lock to an exclusive lock. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn upgrade(&self); /// Attempts to upgrade an upgradable lock to an exclusive lock without /// blocking. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn try_upgrade(&self) -> bool; } /// Additional methods for `RwLock`s which support upgradable locks and fair /// unlocking. pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair { /// Releases an upgradable lock using a fair unlock protocol. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn unlock_upgradable_fair(&self); /// Temporarily yields an upgradable lock to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed /// by `lock_upgradable`, however it can be much more efficient in the case where there /// are no waiting threads. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn bump_upgradable(&self) { self.unlock_upgradable_fair(); self.lock_upgradable(); } } /// Additional methods for `RwLock`s which support upgradable locks and lock /// downgrading. pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade { /// Downgrades an upgradable lock to a shared lock. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn downgrade_upgradable(&self); /// Downgrades an exclusive lock to an upgradable lock. /// /// # Safety /// /// This method may only be called if an exclusive lock is held in the current context. unsafe fn downgrade_to_upgradable(&self); } /// Additional methods for `RwLock`s which support upgradable locks and locking /// with timeouts. pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed { /// Attempts to acquire an upgradable lock until a timeout is reached. fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool; /// Attempts to acquire an upgradable lock until a timeout is reached. fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool; /// Attempts to upgrade an upgradable lock to an exclusive lock until a /// timeout is reached. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool; /// Attempts to upgrade an upgradable lock to an exclusive lock until a /// timeout is reached. /// /// # Safety /// /// This method may only be called if an upgradable lock is held in the current context. unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool; } /// A reader-writer lock /// /// This type of lock allows a number of readers or at most one writer at any /// point in time. The write portion of this lock typically allows modification /// of the underlying data (exclusive access) and the read portion of this lock /// typically allows for read-only access (shared access). /// /// The type parameter `T` represents the data that this lock protects. It is /// required that `T` satisfies `Send` to be shared across threads and `Sync` to /// allow concurrent access through readers. The RAII guards returned from the /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) /// to allow access to the contained of the lock. pub struct RwLock<R, T: ?Sized> { raw: R, data: UnsafeCell<T>, } // Copied and modified from serde #[cfg(feature = "serde")] impl<R, T> Serialize for RwLock<R, T> where R: RawRwLock, T: Serialize + ?Sized, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.read().serialize(serializer) } } #[cfg(feature = "serde")] impl<'de, R, T> Deserialize<'de> for RwLock<R, T> where R: RawRwLock, T: Deserialize<'de> + ?Sized, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Deserialize::deserialize(deserializer).map(RwLock::new) } } unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {} unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {} impl<R: RawRwLock, T> RwLock<R, T> { /// Creates a new instance of an `RwLock<T>` which is unlocked. #[cfg(has_const_fn_trait_bound)] #[inline] pub const fn new(val: T) -> RwLock<R, T> { RwLock { data: UnsafeCell::new(val), raw: R::INIT, } } /// Creates a new instance of an `RwLock<T>` which is unlocked. #[cfg(not(has_const_fn_trait_bound))] #[inline] pub fn new(val: T) -> RwLock<R, T> { RwLock { data: UnsafeCell::new(val), raw: R::INIT, } } /// Consumes this `RwLock`, returning the underlying data. #[inline] #[allow(unused_unsafe)] pub fn into_inner(self) -> T { unsafe { self.data.into_inner() } } } impl<R, T> RwLock<R, T> { /// Creates a new new instance of an `RwLock<T>` based on a pre-existing /// `RawRwLock<T>`. #[inline] pub const fn from_raw(raw_rwlock: R, val: T) -> RwLock<R, T> { RwLock { data: UnsafeCell::new(val), raw: raw_rwlock, } } /// Creates a new new instance of an `RwLock<T>` based on a pre-existing /// `RawRwLock<T>`. /// /// This allows creating a `RwLock<T>` in a constant context on stable /// Rust. /// /// This method is a legacy alias for [`from_raw`](Self::from_raw). #[inline] pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> { Self::from_raw(raw_rwlock, val) } } impl<R: RawRwLock, T: ?Sized> RwLock<R, T> { /// Creates a new `RwLockReadGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds a read lock. /// /// This function does not increment the read count of the lock. Calling this function when a /// guard has already been produced is undefined behaviour unless the guard was forgotten /// with `mem::forget`. #[inline] pub unsafe fn make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T> { RwLockReadGuard { rwlock: self, marker: PhantomData, } } /// Creates a new `RwLockReadGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds a write lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[inline] pub unsafe fn make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T> { RwLockWriteGuard { rwlock: self, marker: PhantomData, } } /// Locks this `RwLock` with shared read access, blocking the current thread /// until it can be acquired. /// /// The calling thread will be blocked until there are no more writers which /// hold the lock. There may be other readers currently inside the lock when /// this method returns. /// /// Note that attempts to recursively acquire a read lock on a `RwLock` when /// the current thread already holds one may result in a deadlock. /// /// Returns an RAII guard which will release this thread's shared access /// once it is dropped. #[inline] pub fn read(&self) -> RwLockReadGuard<'_, R, T> { self.raw.lock_shared(); // SAFETY: The lock is held, as required. unsafe { self.make_read_guard_unchecked() } } /// Attempts to acquire this `RwLock` with shared read access. /// /// If the access could not be granted at this time, then `None` is returned. /// Otherwise, an RAII guard is returned which will release the shared access /// when it is dropped. /// /// This function does not block. #[inline] pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Locks this `RwLock` with exclusive write access, blocking the current /// thread until it can be acquired. /// /// This function will not return while other writers or other readers /// currently have access to the lock. /// /// Returns an RAII guard which will drop the write access of this `RwLock` /// when dropped. #[inline] pub fn write(&self) -> RwLockWriteGuard<'_, R, T> { self.raw.lock_exclusive(); // SAFETY: The lock is held, as required. unsafe { self.make_write_guard_unchecked() } } /// Attempts to lock this `RwLock` with exclusive write access. /// /// If the lock could not be acquired at this time, then `None` is returned. /// Otherwise, an RAII guard is returned which will release the lock when /// it is dropped. /// /// This function does not block. #[inline] pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> { if self.raw.try_lock_exclusive() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_write_guard_unchecked() }) } else { None } } /// Returns a mutable reference to the underlying data. /// /// Since this call borrows the `RwLock` mutably, no actual locking needs to /// take place---the mutable borrow statically guarantees no locks exist. #[inline] pub fn get_mut(&mut self) -> &mut T { unsafe { &mut *self.data.get() } } /// Checks whether this `RwLock` is currently locked in any way. #[inline] pub fn is_locked(&self) -> bool { self.raw.is_locked() } /// Check if this `RwLock` is currently exclusively locked. #[inline] pub fn is_locked_exclusive(&self) -> bool { self.raw.is_locked_exclusive() } /// Forcibly unlocks a read lock. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `RwLockReadGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. #[inline] pub unsafe fn force_unlock_read(&self) { self.raw.unlock_shared(); } /// Forcibly unlocks a write lock. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `RwLockWriteGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. #[inline] pub unsafe fn force_unlock_write(&self) { self.raw.unlock_exclusive(); } /// Returns the underlying raw reader-writer lock object. /// /// Note that you will most likely need to import the `RawRwLock` trait from /// `lock_api` to be able to call functions on the raw /// reader-writer lock. /// /// # Safety /// /// This method is unsafe because it allows unlocking a mutex while /// still holding a reference to a lock guard. pub unsafe fn raw(&self) -> &R { &self.raw } /// Returns a raw pointer to the underlying data. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object /// alive, for example when dealing with FFI. /// /// # Safety /// /// You must ensure that there are no data races when dereferencing the /// returned pointer, for example if the current thread logically owns a /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded /// using `mem::forget`. #[inline] pub fn data_ptr(&self) -> *mut T { self.data.get() } /// Creates a new `RwLockReadGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds a read lock. /// /// This function does not increment the read count of the lock. Calling this function when a /// guard has already been produced is undefined behaviour unless the guard was forgotten /// with `mem::forget`.` #[cfg(feature = "arc_lock")] #[inline] pub unsafe fn make_arc_read_guard_unchecked(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { ArcRwLockReadGuard { rwlock: self.clone(), marker: PhantomData, } } /// Creates a new `RwLockWriteGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds a write lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[cfg(feature = "arc_lock")] #[inline] pub unsafe fn make_arc_write_guard_unchecked(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { ArcRwLockWriteGuard { rwlock: self.clone(), marker: PhantomData, } } /// Locks this `RwLock` with read access, through an `Arc`. /// /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc` /// and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { self.raw.lock_shared(); // SAFETY: locking guarantee is upheld unsafe { self.make_arc_read_guard_unchecked() } } /// Attempts to lock this `RwLock` with read access, through an `Arc`. /// /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an /// `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } /// Locks this `RwLock` with write access, through an `Arc`. /// /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc` /// and the resulting write guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { self.raw.lock_exclusive(); // SAFETY: locking guarantee is upheld unsafe { self.make_arc_write_guard_unchecked() } } /// Attempts to lock this `RwLock` with writ access, through an `Arc`. /// /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an /// `Arc` and the resulting write guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>> { if self.raw.try_lock_exclusive() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_write_guard_unchecked() }) } else { None } } } impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> { /// Forcibly unlocks a read lock using a fair unlock protocol. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `RwLockReadGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. #[inline] pub unsafe fn force_unlock_read_fair(&self) { self.raw.unlock_shared_fair(); } /// Forcibly unlocks a write lock using a fair unlock protocol. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `RwLockWriteGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. #[inline] pub unsafe fn force_unlock_write_fair(&self) { self.raw.unlock_exclusive_fair(); } } impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> { /// Attempts to acquire this `RwLock` with shared read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. #[inline] pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with shared read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. #[inline] pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with exclusive write access until a /// timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the exclusive access when it is dropped. #[inline] pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> { if self.raw.try_lock_exclusive_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_write_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with exclusive write access until a /// timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the exclusive access when it is dropped. #[inline] pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> { if self.raw.try_lock_exclusive_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_write_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an /// `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of /// an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of /// an `Arc` and the resulting write guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_write_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockWriteGuard<R, T>> { if self.raw.try_lock_exclusive_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_write_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of /// an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_write_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockWriteGuard<R, T>> { if self.raw.try_lock_exclusive_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_write_guard_unchecked() }) } else { None } } } impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> { /// Locks this `RwLock` with shared read access, blocking the current thread /// until it can be acquired. /// /// The calling thread will be blocked until there are no more writers which /// hold the lock. There may be other readers currently inside the lock when /// this method returns. /// /// Unlike `read`, this method is guaranteed to succeed without blocking if /// another read lock is held at the time of the call. This allows a thread /// to recursively lock a `RwLock`. However using this method can cause /// writers to starve since readers no longer block if a writer is waiting /// for the lock. /// /// Returns an RAII guard which will release this thread's shared access /// once it is dropped. #[inline] pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> { self.raw.lock_shared_recursive(); // SAFETY: The lock is held, as required. unsafe { self.make_read_guard_unchecked() } } /// Attempts to acquire this `RwLock` with shared read access. /// /// If the access could not be granted at this time, then `None` is returned. /// Otherwise, an RAII guard is returned which will release the shared access /// when it is dropped. /// /// This method is guaranteed to succeed if another read lock is held at the /// time of the call. See the documentation for `read_recursive` for details. /// /// This function does not block. #[inline] pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared_recursive() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Locks this `RwLock` with shared read access, through an `Arc`. /// /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of /// an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { self.raw.lock_shared_recursive(); // SAFETY: locking guarantee is upheld unsafe { self.make_arc_read_guard_unchecked() } } /// Attempts to lock this `RwLock` with shared read access, through an `Arc`. /// /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside /// of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared_recursive() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } } impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> { /// Attempts to acquire this `RwLock` with shared read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. /// /// This method is guaranteed to succeed without blocking if another read /// lock is held at the time of the call. See the documentation for /// `read_recursive` for details. #[inline] pub fn try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared_recursive_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with shared read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. #[inline] pub fn try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>> { if self.raw.try_lock_shared_recursive_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_read_guard_unchecked() }) } else { None } } /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_arc_recursive_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared_recursive_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_read_arc_recursive_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockReadGuard<R, T>> { if self.raw.try_lock_shared_recursive_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_read_guard_unchecked() }) } else { None } } } impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> { /// Creates a new `RwLockUpgradableReadGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds an upgradable read lock. /// /// This function does not increment the read count of the lock. Calling this function when a /// guard has already been produced is undefined behaviour unless the guard was forgotten /// with `mem::forget`. #[inline] pub unsafe fn make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T> { RwLockUpgradableReadGuard { rwlock: self, marker: PhantomData, } } /// Locks this `RwLock` with upgradable read access, blocking the current thread /// until it can be acquired. /// /// The calling thread will be blocked until there are no more writers or other /// upgradable reads which hold the lock. There may be other readers currently /// inside the lock when this method returns. /// /// Returns an RAII guard which will release this thread's shared access /// once it is dropped. #[inline] pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> { self.raw.lock_upgradable(); // SAFETY: The lock is held, as required. unsafe { self.make_upgradable_guard_unchecked() } } /// Attempts to acquire this `RwLock` with upgradable read access. /// /// If the access could not be granted at this time, then `None` is returned. /// Otherwise, an RAII guard is returned which will release the shared access /// when it is dropped. /// /// This function does not block. #[inline] pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { if self.raw.try_lock_upgradable() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_upgradable_guard_unchecked() }) } else { None } } /// Creates a new `ArcRwLockUpgradableReadGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds an upgradable read lock. /// /// This function does not increment the read count of the lock. Calling this function when a /// guard has already been produced is undefined behaviour unless the guard was forgotten /// with `mem::forget`.` #[cfg(feature = "arc_lock")] #[inline] pub unsafe fn make_upgradable_arc_guard_unchecked( self: &Arc<Self>, ) -> ArcRwLockUpgradableReadGuard<R, T> { ArcRwLockUpgradableReadGuard { rwlock: self.clone(), marker: PhantomData, } } /// Locks this `RwLock` with upgradable read access, through an `Arc`. /// /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> { self.raw.lock_upgradable(); // SAFETY: locking guarantee is upheld unsafe { self.make_upgradable_arc_guard_unchecked() } } /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`. /// /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { if self.raw.try_lock_upgradable() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) } else { None } } } impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> { /// Attempts to acquire this `RwLock` with upgradable read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. #[inline] pub fn try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { if self.raw.try_lock_upgradable_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_upgradable_guard_unchecked() }) } else { None } } /// Attempts to acquire this `RwLock` with upgradable read access until a timeout /// is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. Otherwise, an RAII guard is returned which will /// release the shared access when it is dropped. #[inline] pub fn try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { if self.raw.try_lock_upgradable_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_upgradable_guard_unchecked() }) } else { None } } /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_upgradable_read_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { if self.raw.try_lock_upgradable_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) } else { None } } /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be /// inside of an `Arc` and the resulting read guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_upgradable_read_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { if self.raw.try_lock_upgradable_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) } else { None } } } impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> { #[inline] fn default() -> RwLock<R, T> { RwLock::new(Default::default()) } } impl<R: RawRwLock, T> From<T> for RwLock<R, T> { #[inline] fn from(t: T) -> RwLock<R, T> { RwLock::new(t) } } impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut d = f.debug_struct("RwLock"); match self.try_read() { Some(guard) => d.field("data", &&*guard), None => { // Additional format_args! here is to remove quotes around <locked> in debug output. d.field("data", &format_args!("<locked>")) } }; d.finish() } } /// RAII structure used to release the shared read access of a lock when /// dropped. #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { rwlock: &'a RwLock<R, T>, marker: PhantomData<(&'a T, R::GuardMarker)>, } unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockReadGuard<'_, R, T> {} impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { /// Returns a reference to the original reader-writer lock object. pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { s.rwlock } /// Make a new `MappedRwLockReadGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockReadGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U, { let raw = &s.rwlock.raw; let data = f(unsafe { &*s.rwlock.data.get() }); mem::forget(s); MappedRwLockReadGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedRwLockReadGuard` for a component of the /// locked data. Returns the original guard if the closure returns `None`. /// /// This operation cannot fail as the `RwLockReadGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `RwLockReadGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>, { let raw = &s.rwlock.raw; let data = match f(unsafe { &*s.rwlock.data.get() }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData, }) } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared(); } defer!(s.rwlock.raw.lock_shared()); f() } } impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// By default, `RwLock` is unfair and allow the current thread to re-lock /// the `RwLock` before another has the chance to acquire the lock, even if /// that thread has been blocked on the `RwLock` for a long time. This is /// the default because it allows much higher throughput as it avoids /// forcing a context switch on every `RwLock` unlock. This can result in one /// thread acquiring a `RwLock` many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `RwLockReadGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared_fair(); } mem::forget(s); } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// The `RwLock` is unlocked a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared_fair(); } defer!(s.rwlock.raw.lock_shared()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `read`, however it can be much more efficient in the case where there /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.bump_shared(); } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { self.rwlock.raw.unlock_shared(); } } } impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for RwLockReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {} /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. /// /// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock` /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. #[cfg(feature = "arc_lock")] #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct ArcRwLockReadGuard<R: RawRwLock, T: ?Sized> { rwlock: Arc<RwLock<R, T>>, marker: PhantomData<R::GuardMarker>, } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> ArcRwLockReadGuard<R, T> { /// Returns a reference to the rwlock, contained in its `Arc`. pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { &s.rwlock } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`]. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared(); } defer!(s.rwlock.raw.lock_shared()); f() } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockFair, T: ?Sized> ArcRwLockReadGuard<R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`]. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared_fair(); } // SAFETY: ensure the Arc has its refcount decremented let mut s = ManuallyDrop::new(s); unsafe { ptr::drop_in_place(&mut s.rwlock) }; } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`]. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_shared_fair(); } defer!(s.rwlock.raw.lock_shared()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This is functionally identical to the `bump` method on [`RwLockReadGuard`]. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.bump_shared(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockReadGuard<R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockReadGuard<R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { self.rwlock.raw.unlock_shared(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockReadGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockReadGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } /// RAII structure used to release the exclusive write access of a lock when /// dropped. #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { rwlock: &'a RwLock<R, T>, marker: PhantomData<(&'a mut T, R::GuardMarker)>, } unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockWriteGuard<'_, R, T> {} impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { /// Returns a reference to the original reader-writer lock object. pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { s.rwlock } /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. /// /// This operation cannot fail as the `RwLockWriteGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U, { let raw = &s.rwlock.raw; let data = f(unsafe { &mut *s.rwlock.data.get() }); mem::forget(s); MappedRwLockWriteGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the /// locked data. The original guard is return if the closure returns `None`. /// /// This operation cannot fail as the `RwLockWriteGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { let raw = &s.rwlock.raw; let data = match f(unsafe { &mut *s.rwlock.data.get() }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData, }) } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockReadGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_exclusive(); } defer!(s.rwlock.raw.lock_exclusive()); f() } } impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { /// Atomically downgrades a write lock into a read lock without allowing any /// writers to take exclusive access of the lock in the meantime. /// /// Note that if there are any writers currently waiting to take the lock /// then other readers may not be able to acquire the lock even if it was /// downgraded. pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.downgrade(); } let rwlock = s.rwlock; mem::forget(s); RwLockReadGuard { rwlock, marker: PhantomData, } } } impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { /// Atomically downgrades a write lock into an upgradable read lock without allowing any /// writers to take exclusive access of the lock in the meantime. /// /// Note that if there are any writers currently waiting to take the lock /// then other readers may not be able to acquire the lock even if it was /// downgraded. pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.downgrade_to_upgradable(); } let rwlock = s.rwlock; mem::forget(s); RwLockUpgradableReadGuard { rwlock, marker: PhantomData, } } } impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// By default, `RwLock` is unfair and allow the current thread to re-lock /// the `RwLock` before another has the chance to acquire the lock, even if /// that thread has been blocked on the `RwLock` for a long time. This is /// the default because it allows much higher throughput as it avoids /// forcing a context switch on every `RwLock` unlock. This can result in one /// thread acquiring a `RwLock` many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `RwLockWriteGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.unlock_exclusive_fair(); } mem::forget(s); } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// The `RwLock` is unlocked a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.unlock_exclusive_fair(); } defer!(s.rwlock.raw.lock_exclusive()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `write`, however it can be much more efficient in the case where there /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.bump_exclusive(); } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.rwlock.data.get() } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { self.rwlock.raw.unlock_exclusive(); } } } impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for RwLockWriteGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {} /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. /// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock` /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. #[cfg(feature = "arc_lock")] #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct ArcRwLockWriteGuard<R: RawRwLock, T: ?Sized> { rwlock: Arc<RwLock<R, T>>, marker: PhantomData<R::GuardMarker>, } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> ArcRwLockWriteGuard<R, T> { /// Returns a reference to the rwlock, contained in its `Arc`. pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { &s.rwlock } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`]. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockWriteGuard always holds a shared lock. unsafe { s.rwlock.raw.unlock_exclusive(); } defer!(s.rwlock.raw.lock_exclusive()); f() } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { /// Atomically downgrades a write lock into a read lock without allowing any /// writers to take exclusive access of the lock in the meantime. /// /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`]. pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.downgrade(); } // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; ArcRwLockReadGuard { rwlock, marker: PhantomData, } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { /// Atomically downgrades a write lock into an upgradable read lock without allowing any /// writers to take exclusive access of the lock in the meantime. /// /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`]. pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T> { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.downgrade_to_upgradable(); } // SAFETY: same as above let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; ArcRwLockUpgradableReadGuard { rwlock, marker: PhantomData, } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockFair, T: ?Sized> ArcRwLockWriteGuard<R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`]. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.unlock_exclusive_fair(); } // SAFETY: prevent the Arc from leaking memory let mut s = ManuallyDrop::new(s); unsafe { ptr::drop_in_place(&mut s.rwlock) }; } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`]. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.unlock_exclusive_fair(); } defer!(s.rwlock.raw.lock_exclusive()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`]. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { s.rwlock.raw.bump_exclusive(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockWriteGuard<R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> DerefMut for ArcRwLockWriteGuard<R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.rwlock.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockWriteGuard<R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockWriteGuard always holds an exclusive lock. unsafe { self.rwlock.raw.unlock_exclusive(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockWriteGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[cfg(feature = "arc_lock")] impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockWriteGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } /// RAII structure used to release the upgradable read access of a lock when /// dropped. #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> { rwlock: &'a RwLock<R, T>, marker: PhantomData<(&'a T, R::GuardMarker)>, } unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync for RwLockUpgradableReadGuard<'a, R, T> { } impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Returns a reference to the original reader-writer lock object. pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { s.rwlock } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable(); } defer!(s.rwlock.raw.lock_upgradable()); f() } /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, /// blocking the current thread until it can be acquired. pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.upgrade(); } let rwlock = s.rwlock; mem::forget(s); RwLockWriteGuard { rwlock, marker: PhantomData, } } /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. /// /// If the access could not be granted at this time, then the current guard is returned. pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade() } { let rwlock = s.rwlock; mem::forget(s); Ok(RwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } } impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// By default, `RwLock` is unfair and allow the current thread to re-lock /// the `RwLock` before another has the chance to acquire the lock, even if /// that thread has been blocked on the `RwLock` for a long time. This is /// the default because it allows much higher throughput as it avoids /// forcing a context switch on every `RwLock` unlock. This can result in one /// thread acquiring a `RwLock` many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable_fair(); } mem::forget(s); } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// The `RwLock` is unlocked a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the `RwLock`. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable_fair(); } defer!(s.rwlock.raw.lock_upgradable()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `upgradable_read`, however it can be much more efficient in the case where there /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.bump_upgradable(); } } } impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Atomically downgrades an upgradable read lock lock into a shared read lock /// without allowing any writers to take exclusive access of the lock in the /// meantime. /// /// Note that if there are any writers currently waiting to take the lock /// then other readers may not be able to acquire the lock even if it was /// downgraded. pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.downgrade_upgradable(); } let rwlock = s.rwlock; mem::forget(s); RwLockReadGuard { rwlock, marker: PhantomData, } } /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, /// blocking the current thread until it can be acquired. /// /// Then, calls the provided closure with an exclusive reference to the lock's data. /// /// Finally, atomically downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `upgrade` which takes the guard by value. pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret { unsafe { self.rwlock.raw.upgrade(); } // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. f(unsafe { &mut *self.rwlock.data.get() }) } /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. /// /// If the access could not be granted at this time, then `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade` which takes the guard by value. pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade() } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } } impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// the current guard is returned. pub fn try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { let rwlock = s.rwlock; mem::forget(s); Ok(RwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// the current guard is returned. #[inline] pub fn try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { let rwlock = s.rwlock; mem::forget(s); Ok(RwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } } impl<'a, R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade_for` which takes the guard by value. pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Duration, f: F, ) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade_until` which takes the guard by value. pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Instant, f: F, ) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } } impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { self.rwlock.raw.unlock_upgradable(); } } } impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockUpgradableReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for RwLockUpgradableReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress for RwLockUpgradableReadGuard<'a, R, T> { } /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. /// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the /// `RwLock` it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` /// lifetime. #[cfg(feature = "arc_lock")] #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct ArcRwLockUpgradableReadGuard<R: RawRwLockUpgrade, T: ?Sized> { rwlock: Arc<RwLock<R, T>>, marker: PhantomData<R::GuardMarker>, } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { /// Returns a reference to the rwlock, contained in its original `Arc`. pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { &s.rwlock } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`]. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable(); } defer!(s.rwlock.raw.lock_upgradable()); f() } /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, /// blocking the current thread until it can be acquired. pub fn upgrade(s: Self) -> ArcRwLockWriteGuard<R, T> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.upgrade(); } // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out // of the struct let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; ArcRwLockWriteGuard { rwlock, marker: PhantomData, } } /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. /// /// If the access could not be granted at this time, then the current guard is returned. pub fn try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade() } { // SAFETY: same as above let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; Ok(ArcRwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgradeFair, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`]. #[inline] pub fn unlock_fair(s: Self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable_fair(); } // SAFETY: make sure we decrement the refcount properly let mut s = ManuallyDrop::new(s); unsafe { ptr::drop_in_place(&mut s.rwlock) }; } /// Temporarily unlocks the `RwLock` to execute the given function. /// /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`]. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.unlock_upgradable_fair(); } defer!(s.rwlock.raw.lock_upgradable()); f() } /// Temporarily yields the `RwLock` to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`]. #[inline] pub fn bump(s: &mut Self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.bump_upgradable(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { /// Atomically downgrades an upgradable read lock lock into a shared read lock /// without allowing any writers to take exclusive access of the lock in the /// meantime. /// /// Note that if there are any writers currently waiting to take the lock /// then other readers may not be able to acquire the lock even if it was /// downgraded. pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { s.rwlock.raw.downgrade_upgradable(); } // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; ArcRwLockReadGuard { rwlock, marker: PhantomData, } } /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, /// blocking the current thread until it can be acquired. /// /// Then, calls the provided closure with an exclusive reference to the lock's data. /// /// Finally, atomically downgrades the lock back to an upgradable read lock. /// The closure's return value is returned. /// /// This function only requires a mutable reference to the guard, unlike /// `upgrade` which takes the guard by value. pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret { unsafe { self.rwlock.raw.upgrade(); } // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. f(unsafe { &mut *self.rwlock.data.get() }) } /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. /// /// If the access could not be granted at this time, then `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade` which takes the guard by value. pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade() } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgradeTimed, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// the current guard is returned. pub fn try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { // SAFETY: same as above let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; Ok(ArcRwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// the current guard is returned. #[inline] pub fn try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { // SAFETY: same as above let s = ManuallyDrop::new(s); let rwlock = unsafe { ptr::read(&s.rwlock) }; Ok(ArcRwLockWriteGuard { rwlock, marker: PhantomData, }) } else { Err(s) } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade_for` which takes the guard by value. pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Duration, f: F, ) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } /// Tries to atomically upgrade an upgradable read lock into an exclusive /// write lock, until a timeout is reached. /// /// If the access could not be granted before the timeout expires, then /// `None` is returned. /// /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, /// and finally downgrades the lock back to an upgradable read lock. /// The closure's return value is wrapped in `Some` and returned. /// /// This function only requires a mutable reference to the guard, unlike /// `try_upgrade_until` which takes the guard by value. pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Instant, f: F, ) -> Option<Ret> { if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { // Safety: We just upgraded the lock, so we have mutable access to the data. // This will restore the state the lock was in at the start of the function. defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); // Safety: We upgraded the lock, so we have mutable access to the data. // When this function returns, whether by drop or panic, // the drop guard will downgrade it back to an upgradeable lock. Some(f(unsafe { &mut *self.rwlock.data.get() })) } else { None } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgrade, T: ?Sized> Deref for ArcRwLockUpgradableReadGuard<R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.rwlock.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgrade, T: ?Sized> Drop for ArcRwLockUpgradableReadGuard<R, T> { #[inline] fn drop(&mut self) { // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. unsafe { self.rwlock.raw.unlock_upgradable(); } } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgrade, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockUpgradableReadGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[cfg(feature = "arc_lock")] impl<R: RawRwLockUpgrade, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockUpgradableReadGuard<R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { raw: &'a R, data: *const T, marker: PhantomData<&'a T>, } unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {} unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where R::GuardMarker: Send { } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { /// Make a new `MappedRwLockReadGuard` for a component of the locked data. /// /// This operation cannot fail as the `MappedRwLockReadGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U, { let raw = s.raw; let data = f(unsafe { &*s.data }); mem::forget(s); MappedRwLockReadGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedRwLockReadGuard` for a component of the /// locked data. The original guard is return if the closure returns `None`. /// /// This operation cannot fail as the `MappedRwLockReadGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `MappedRwLockReadGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>, { let raw = s.raw; let data = match f(unsafe { &*s.data }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedRwLockReadGuard { raw, data, marker: PhantomData, }) } } impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// By default, `RwLock` is unfair and allow the current thread to re-lock /// the `RwLock` before another has the chance to acquire the lock, even if /// that thread has been blocked on the `RwLock` for a long time. This is /// the default because it allows much higher throughput as it avoids /// forcing a context switch on every `RwLock` unlock. This can result in one /// thread acquiring a `RwLock` many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `MappedRwLockReadGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MappedRwLockReadGuard always holds a shared lock. unsafe { s.raw.unlock_shared_fair(); } mem::forget(s); } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.data } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: A MappedRwLockReadGuard always holds a shared lock. unsafe { self.raw.unlock_shared(); } } } impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedRwLockReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MappedRwLockReadGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockReadGuard<'a, R, T> { } /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. #[clippy::has_significant_drop] #[must_use = "if unused the RwLock will immediately unlock"] pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { raw: &'a R, data: *mut T, marker: PhantomData<&'a mut T>, } unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockWriteGuard<'a, R, T> { } unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where R::GuardMarker: Send { } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. /// /// This operation cannot fail as the `MappedRwLockWriteGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U, { let raw = s.raw; let data = f(unsafe { &mut *s.data }); mem::forget(s); MappedRwLockWriteGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the /// locked data. The original guard is return if the closure returns `None`. /// /// This operation cannot fail as the `MappedRwLockWriteGuard` passed /// in already locked the data. /// /// This is an associated function that needs to be /// used as `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>, { let raw = s.raw; let data = match f(unsafe { &mut *s.data }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedRwLockWriteGuard { raw, data, marker: PhantomData, }) } } impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { /// Unlocks the `RwLock` using a fair unlock protocol. /// /// By default, `RwLock` is unfair and allow the current thread to re-lock /// the `RwLock` before another has the chance to acquire the lock, even if /// that thread has been blocked on the `RwLock` for a long time. This is /// the default because it allows much higher throughput as it avoids /// forcing a context switch on every `RwLock` unlock. This can result in one /// thread acquiring a `RwLock` many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `MappedRwLockWriteGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. unsafe { s.raw.unlock_exclusive_fair(); } mem::forget(s); } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.data } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> { #[inline] fn deref_mut(&mut self) -> &mut T { unsafe { &mut *self.data } } } impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> { #[inline] fn drop(&mut self) { // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. unsafe { self.raw.unlock_exclusive(); } } } impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedRwLockWriteGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MappedRwLockWriteGuard<'a, R, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for MappedRwLockWriteGuard<'a, R, T> { } ```
/content/code_sandbox/lock_api/src/rwlock.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
27,358
```rust // // path_to_url or the MIT license <LICENSE-MIT or // path_to_url at your option. This file may not be // copied, modified, or distributed except according to those terms. use crate::{ mutex::{RawMutex, RawMutexFair, RawMutexTimed}, GuardNoSend, }; use core::{ cell::{Cell, UnsafeCell}, fmt, marker::PhantomData, mem, num::NonZeroUsize, ops::Deref, sync::atomic::{AtomicUsize, Ordering}, }; #[cfg(feature = "arc_lock")] use alloc::sync::Arc; #[cfg(feature = "arc_lock")] use core::mem::ManuallyDrop; #[cfg(feature = "arc_lock")] use core::ptr; #[cfg(feature = "owning_ref")] use owning_ref::StableAddress; #[cfg(feature = "serde")] use serde::{Deserialize, Deserializer, Serialize, Serializer}; /// Helper trait which returns a non-zero thread ID. /// /// The simplest way to implement this trait is to return the address of a /// thread-local variable. /// /// # Safety /// /// Implementations of this trait must ensure that no two active threads share /// the same thread ID. However the ID of a thread that has exited can be /// re-used since that thread is no longer active. pub unsafe trait GetThreadId { /// Initial value. // A non-constant const item is a legacy way to supply an initialized value to downstream // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. #[allow(clippy::declare_interior_mutable_const)] const INIT: Self; /// Returns a non-zero thread ID which identifies the current thread of /// execution. fn nonzero_thread_id(&self) -> NonZeroUsize; } /// A raw mutex type that wraps another raw mutex to provide reentrancy. /// /// Although this has the same methods as the [`RawMutex`] trait, it does /// not implement it, and should not be used in the same way, since this /// mutex can successfully acquire a lock multiple times in the same thread. /// Only use this when you know you want a raw mutex that can be locked /// reentrantly; you probably want [`ReentrantMutex`] instead. pub struct RawReentrantMutex<R, G> { owner: AtomicUsize, lock_count: Cell<usize>, mutex: R, get_thread_id: G, } unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {} unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {} impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> { /// Initial value for an unlocked mutex. #[allow(clippy::declare_interior_mutable_const)] pub const INIT: Self = RawReentrantMutex { owner: AtomicUsize::new(0), lock_count: Cell::new(0), mutex: R::INIT, get_thread_id: G::INIT, }; #[inline] fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool { let id = self.get_thread_id.nonzero_thread_id().get(); if self.owner.load(Ordering::Relaxed) == id { self.lock_count.set( self.lock_count .get() .checked_add(1) .expect("ReentrantMutex lock count overflow"), ); } else { if !try_lock() { return false; } self.owner.store(id, Ordering::Relaxed); debug_assert_eq!(self.lock_count.get(), 0); self.lock_count.set(1); } true } /// Acquires this mutex, blocking if it's held by another thread. #[inline] pub fn lock(&self) { self.lock_internal(|| { self.mutex.lock(); true }); } /// Attempts to acquire this mutex without blocking. Returns `true` /// if the lock was successfully acquired and `false` otherwise. #[inline] pub fn try_lock(&self) -> bool { self.lock_internal(|| self.mutex.try_lock()) } /// Unlocks this mutex. The inner mutex may not be unlocked if /// this mutex was acquired previously in the current thread. /// /// # Safety /// /// This method may only be called if the mutex is held by the current thread. #[inline] pub unsafe fn unlock(&self) { let lock_count = self.lock_count.get() - 1; self.lock_count.set(lock_count); if lock_count == 0 { self.owner.store(0, Ordering::Relaxed); self.mutex.unlock(); } } /// Checks whether the mutex is currently locked. #[inline] pub fn is_locked(&self) -> bool { self.mutex.is_locked() } /// Checks whether the mutex is currently held by the current thread. #[inline] pub fn is_owned_by_current_thread(&self) -> bool { let id = self.get_thread_id.nonzero_thread_id().get(); self.owner.load(Ordering::Relaxed) == id } } impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> { /// Unlocks this mutex using a fair unlock protocol. The inner mutex /// may not be unlocked if this mutex was acquired previously in the /// current thread. /// /// # Safety /// /// This method may only be called if the mutex is held by the current thread. #[inline] pub unsafe fn unlock_fair(&self) { let lock_count = self.lock_count.get() - 1; self.lock_count.set(lock_count); if lock_count == 0 { self.owner.store(0, Ordering::Relaxed); self.mutex.unlock_fair(); } } /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. /// /// # Safety /// /// This method may only be called if the mutex is held by the current thread. #[inline] pub unsafe fn bump(&self) { if self.lock_count.get() == 1 { let id = self.owner.load(Ordering::Relaxed); self.owner.store(0, Ordering::Relaxed); self.lock_count.set(0); self.mutex.bump(); self.owner.store(id, Ordering::Relaxed); self.lock_count.set(1); } } } impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> { /// Attempts to acquire this lock until a timeout is reached. #[inline] pub fn try_lock_until(&self, timeout: R::Instant) -> bool { self.lock_internal(|| self.mutex.try_lock_until(timeout)) } /// Attempts to acquire this lock until a timeout is reached. #[inline] pub fn try_lock_for(&self, timeout: R::Duration) -> bool { self.lock_internal(|| self.mutex.try_lock_for(timeout)) } } /// A mutex which can be recursively locked by a single thread. /// /// This type is identical to `Mutex` except for the following points: /// /// - Locking multiple times from the same thread will work correctly instead of /// deadlocking. /// - `ReentrantMutexGuard` does not give mutable references to the locked data. /// Use a `RefCell` if you need this. /// /// See [`Mutex`](crate::Mutex) for more details about the underlying mutex /// primitive. pub struct ReentrantMutex<R, G, T: ?Sized> { raw: RawReentrantMutex<R, G>, data: UnsafeCell<T>, } unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send for ReentrantMutex<R, G, T> { } unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync for ReentrantMutex<R, G, T> { } impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> { /// Creates a new reentrant mutex in an unlocked state ready for use. #[cfg(has_const_fn_trait_bound)] #[inline] pub const fn new(val: T) -> ReentrantMutex<R, G, T> { ReentrantMutex { data: UnsafeCell::new(val), raw: RawReentrantMutex { owner: AtomicUsize::new(0), lock_count: Cell::new(0), mutex: R::INIT, get_thread_id: G::INIT, }, } } /// Creates a new reentrant mutex in an unlocked state ready for use. #[cfg(not(has_const_fn_trait_bound))] #[inline] pub fn new(val: T) -> ReentrantMutex<R, G, T> { ReentrantMutex { data: UnsafeCell::new(val), raw: RawReentrantMutex { owner: AtomicUsize::new(0), lock_count: Cell::new(0), mutex: R::INIT, get_thread_id: G::INIT, }, } } /// Consumes this mutex, returning the underlying data. #[inline] pub fn into_inner(self) -> T { self.data.into_inner() } } impl<R, G, T> ReentrantMutex<R, G, T> { /// Creates a new reentrant mutex based on a pre-existing raw mutex and a /// helper to get the thread ID. #[inline] pub const fn from_raw(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> { ReentrantMutex { data: UnsafeCell::new(val), raw: RawReentrantMutex { owner: AtomicUsize::new(0), lock_count: Cell::new(0), mutex: raw_mutex, get_thread_id, }, } } /// Creates a new reentrant mutex based on a pre-existing raw mutex and a /// helper to get the thread ID. /// /// This allows creating a reentrant mutex in a constant context on stable /// Rust. /// /// This method is a legacy alias for [`from_raw`](Self::from_raw). #[inline] pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> { Self::from_raw(raw_mutex, get_thread_id, val) } } impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { /// Creates a new `ReentrantMutexGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds the lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[inline] pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> { ReentrantMutexGuard { remutex: &self, marker: PhantomData, } } /// Acquires a reentrant mutex, blocking the current thread until it is able /// to do so. /// /// If the mutex is held by another thread then this function will block the /// local thread until it is available to acquire the mutex. If the mutex is /// already held by the current thread then this function will increment the /// lock reference count and return immediately. Upon returning, /// the thread is the only thread with the mutex held. An RAII guard is /// returned to allow scoped unlock of the lock. When the guard goes out of /// scope, the mutex will be unlocked. #[inline] pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> { self.raw.lock(); // SAFETY: The lock is held, as required. unsafe { self.make_guard_unchecked() } } /// Attempts to acquire this lock. /// /// If the lock could not be acquired at this time, then `None` is returned. /// Otherwise, an RAII guard is returned. The lock will be unlocked when the /// guard is dropped. /// /// This function does not block. #[inline] pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> { if self.raw.try_lock() { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Returns a mutable reference to the underlying data. /// /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to /// take place---the mutable borrow statically guarantees no locks exist. #[inline] pub fn get_mut(&mut self) -> &mut T { unsafe { &mut *self.data.get() } } /// Checks whether the mutex is currently locked. #[inline] pub fn is_locked(&self) -> bool { self.raw.is_locked() } /// Checks whether the mutex is currently held by the current thread. #[inline] pub fn is_owned_by_current_thread(&self) -> bool { self.raw.is_owned_by_current_thread() } /// Forcibly unlocks the mutex. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `ReentrantMutexGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a mutex is unlocked when not locked. #[inline] pub unsafe fn force_unlock(&self) { self.raw.unlock(); } /// Returns the underlying raw mutex object. /// /// Note that you will most likely need to import the `RawMutex` trait from /// `lock_api` to be able to call functions on the raw mutex. /// /// # Safety /// /// This method is unsafe because it allows unlocking a mutex while /// still holding a reference to a `ReentrantMutexGuard`. #[inline] pub unsafe fn raw(&self) -> &R { &self.raw.mutex } /// Returns a raw pointer to the underlying data. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `ReentrantMutexGuard` object alive, for example /// when dealing with FFI. /// /// # Safety /// /// You must ensure that there are no data races when dereferencing the /// returned pointer, for example if the current thread logically owns a /// `ReentrantMutexGuard` but that guard has been discarded using /// `mem::forget`. #[inline] pub fn data_ptr(&self) -> *mut T { self.data.get() } /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held. /// /// # Safety /// /// This method must only be called if the thread logically holds the lock. /// /// Calling this function when a guard has already been produced is undefined behaviour unless /// the guard was forgotten with `mem::forget`. #[cfg(feature = "arc_lock")] #[inline] pub unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> { ArcReentrantMutexGuard { remutex: self.clone(), marker: PhantomData, } } /// Acquires a reentrant mutex through an `Arc`. /// /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an /// `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> { self.raw.lock(); // SAFETY: locking guarantee is upheld unsafe { self.make_arc_guard_unchecked() } } /// Attempts to acquire a reentrant mutex through an `Arc`. /// /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside /// of an `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> { if self.raw.try_lock() { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } } impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { /// Forcibly unlocks the mutex using a fair unlock protocol. /// /// This is useful when combined with `mem::forget` to hold a lock without /// the need to maintain a `ReentrantMutexGuard` object alive, for example when /// dealing with FFI. /// /// # Safety /// /// This method must only be called if the current thread logically owns a /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`. /// Behavior is undefined if a mutex is unlocked when not locked. #[inline] pub unsafe fn force_unlock_fair(&self) { self.raw.unlock_fair(); } } impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> { /// Attempts to acquire this lock until a timeout is reached. /// /// If the lock could not be acquired before the timeout expired, then /// `None` is returned. Otherwise, an RAII guard is returned. The lock will /// be unlocked when the guard is dropped. #[inline] pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> { if self.raw.try_lock_for(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock until a timeout is reached. /// /// If the lock could not be acquired before the timeout expired, then /// `None` is returned. Otherwise, an RAII guard is returned. The lock will /// be unlocked when the guard is dropped. #[inline] pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> { if self.raw.try_lock_until(timeout) { // SAFETY: The lock is held, as required. Some(unsafe { self.make_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcReentrantMutexGuard<R, G, T>> { if self.raw.try_lock_for(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } /// Attempts to acquire this lock until a timeout is reached, through an `Arc`. /// /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements. #[cfg(feature = "arc_lock")] #[inline] pub fn try_lock_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcReentrantMutexGuard<R, G, T>> { if self.raw.try_lock_until(timeout) { // SAFETY: locking guarantee is upheld Some(unsafe { self.make_arc_guard_unchecked() }) } else { None } } } impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> { #[inline] fn default() -> ReentrantMutex<R, G, T> { ReentrantMutex::new(Default::default()) } } impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> { #[inline] fn from(t: T) -> ReentrantMutex<R, G, T> { ReentrantMutex::new(t) } } impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.try_lock() { Some(guard) => f .debug_struct("ReentrantMutex") .field("data", &&*guard) .finish(), None => { struct LockedPlaceholder; impl fmt::Debug for LockedPlaceholder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("<locked>") } } f.debug_struct("ReentrantMutex") .field("data", &LockedPlaceholder) .finish() } } } } // Copied and modified from serde #[cfg(feature = "serde")] impl<R, G, T> Serialize for ReentrantMutex<R, G, T> where R: RawMutex, G: GetThreadId, T: Serialize + ?Sized, { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { self.lock().serialize(serializer) } } #[cfg(feature = "serde")] impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T> where R: RawMutex, G: GetThreadId, T: Deserialize<'de> + ?Sized, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Deserialize::deserialize(deserializer).map(ReentrantMutex::new) } } /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure /// is dropped (falls out of scope), the lock will be unlocked. /// /// The data protected by the mutex can be accessed through this guard via its /// `Deref` implementation. #[clippy::has_significant_drop] #[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { remutex: &'a ReentrantMutex<R, G, T>, marker: PhantomData<(&'a T, GuardNoSend)>, } unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for ReentrantMutexGuard<'a, R, G, T> { } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { /// Returns a reference to the original `ReentrantMutex` object. pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> { s.remutex } /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. /// /// This operation cannot fail as the `ReentrantMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U, { let raw = &s.remutex.raw; let data = f(unsafe { &*s.remutex.data.get() }); mem::forget(s); MappedReentrantMutexGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the /// locked data. The original guard is return if the closure returns `None`. /// /// This operation cannot fail as the `ReentrantMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&T) -> Option<&U>, { let raw = &s.remutex.raw; let data = match f(unsafe { &*s.remutex.data.get() }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData, }) } /// Temporarily unlocks the mutex to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A ReentrantMutexGuard always holds the lock. unsafe { s.remutex.raw.unlock(); } defer!(s.remutex.raw.lock()); f() } } impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// By default, mutexes are unfair and allow the current thread to re-lock /// the mutex before another has the chance to acquire the lock, even if /// that thread has been blocked on the mutex for a long time. This is the /// default because it allows much higher throughput as it avoids forcing a /// context switch on every mutex unlock. This can result in one thread /// acquiring a mutex many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `ReentrantMutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.unlock_fair(); } mem::forget(s); } /// Temporarily unlocks the mutex to execute the given function. /// /// The mutex is unlocked a fair unlock protocol. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.unlock_fair(); } defer!(s.remutex.raw.lock()); f() } /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This method is functionally equivalent to calling `unlock_fair` followed /// by `lock`, however it can be much more efficient in the case where there /// are no waiting threads. #[inline] pub fn bump(s: &mut Self) { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.bump(); } } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref for ReentrantMutexGuard<'a, R, G, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.remutex.data.get() } } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop for ReentrantMutexGuard<'a, R, G, T> { #[inline] fn drop(&mut self) { // Safety: A ReentrantMutexGuard always holds the lock. unsafe { self.remutex.raw.unlock(); } } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for ReentrantMutexGuard<'a, R, G, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for ReentrantMutexGuard<'a, R, G, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for ReentrantMutexGuard<'a, R, G, T> { } /// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`. /// /// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the /// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static` /// lifetime. #[cfg(feature = "arc_lock")] #[clippy::has_significant_drop] #[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> { remutex: Arc<ReentrantMutex<R, G, T>>, marker: PhantomData<GuardNoSend>, } #[cfg(feature = "arc_lock")] impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> { /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`. pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> { &s.remutex } /// Temporarily unlocks the mutex to execute the given function. /// /// This is safe because `&mut` guarantees that there exist no other /// references to the data protected by the mutex. #[inline] pub fn unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A ReentrantMutexGuard always holds the lock. unsafe { s.remutex.raw.unlock(); } defer!(s.remutex.raw.lock()); f() } } #[cfg(feature = "arc_lock")] impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`]. #[inline] pub fn unlock_fair(s: Self) { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.unlock_fair(); } // SAFETY: ensure that the Arc's refcount is decremented let mut s = ManuallyDrop::new(s); unsafe { ptr::drop_in_place(&mut s.remutex) }; } /// Temporarily unlocks the mutex to execute the given function. /// /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`]. #[inline] pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U, { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.unlock_fair(); } defer!(s.remutex.raw.lock()); f() } /// Temporarily yields the mutex to a waiting thread if there is one. /// /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`]. #[inline] pub fn bump(s: &mut Self) { // Safety: A ReentrantMutexGuard always holds the lock unsafe { s.remutex.raw.bump(); } } } #[cfg(feature = "arc_lock")] impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.remutex.data.get() } } } #[cfg(feature = "arc_lock")] impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> { #[inline] fn drop(&mut self) { // Safety: A ReentrantMutexGuard always holds the lock. unsafe { self.remutex.raw.unlock(); } } } /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a /// subfield of the protected data. /// /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the /// former doesn't support temporarily unlocking and re-locking, since that /// could introduce soundness issues if the locked object is modified by another /// thread. #[clippy::has_significant_drop] #[must_use = "if unused the ReentrantMutex will immediately unlock"] pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> { raw: &'a RawReentrantMutex<R, G>, data: *const T, marker: PhantomData<&'a T>, } unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync for MappedReentrantMutexGuard<'a, R, G, T> { } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> MappedReentrantMutexGuard<'a, R, G, T> { /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data. /// /// This operation cannot fail as the `MappedReentrantMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U, { let raw = s.raw; let data = f(unsafe { &*s.data }); mem::forget(s); MappedReentrantMutexGuard { raw, data, marker: PhantomData, } } /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the /// locked data. The original guard is return if the closure returns `None`. /// /// This operation cannot fail as the `MappedReentrantMutexGuard` passed /// in already locked the mutex. /// /// This is an associated function that needs to be /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of /// the same name on the contents of the locked data. #[inline] pub fn try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&T) -> Option<&U>, { let raw = s.raw; let data = match f(unsafe { &*s.data }) { Some(data) => data, None => return Err(s), }; mem::forget(s); Ok(MappedReentrantMutexGuard { raw, data, marker: PhantomData, }) } } impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> MappedReentrantMutexGuard<'a, R, G, T> { /// Unlocks the mutex using a fair unlock protocol. /// /// By default, mutexes are unfair and allow the current thread to re-lock /// the mutex before another has the chance to acquire the lock, even if /// that thread has been blocked on the mutex for a long time. This is the /// default because it allows much higher throughput as it avoids forcing a /// context switch on every mutex unlock. This can result in one thread /// acquiring a mutex many more times than other threads. /// /// However in some cases it can be beneficial to ensure fairness by forcing /// the lock to pass on to a waiting thread if there is one. This is done by /// using this method instead of dropping the `ReentrantMutexGuard` normally. #[inline] pub fn unlock_fair(s: Self) { // Safety: A MappedReentrantMutexGuard always holds the lock unsafe { s.raw.unlock_fair(); } mem::forget(s); } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref for MappedReentrantMutexGuard<'a, R, G, T> { type Target = T; #[inline] fn deref(&self) -> &T { unsafe { &*self.data } } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop for MappedReentrantMutexGuard<'a, R, G, T> { #[inline] fn drop(&mut self) { // Safety: A MappedReentrantMutexGuard always holds the lock. unsafe { self.raw.unlock(); } } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedReentrantMutexGuard<'a, R, G, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MappedReentrantMutexGuard<'a, R, G, T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (**self).fmt(f) } } #[cfg(feature = "owning_ref")] unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress for MappedReentrantMutexGuard<'a, R, G, T> { } ```
/content/code_sandbox/lock_api/src/remutex.rs
rust
2016-05-13T10:59:24
2024-08-16T17:17:37
parking_lot
Amanieu/parking_lot
2,662
9,186
```qmake #Use 5 step of optimization #-optimizationpasses 5 #When not preverifing in a case-insensitive filing system, such as Windows. This tool will unpack your processed jars,(if using windows you should then use): -dontusemixedcaseclassnames #Specifies not to ignore non-public library classes. As of version 4.5, this is the default setting -dontskipnonpubliclibraryclasses # Optimization is turned off by default. Dex does not like code run # through the ProGuard optimize and preverify steps (and performs some # of these optimizations on its own). -dontoptimize -dontpreverify -dontwarn android.support.** -dontwarn com.squareup.picasso.** #Specifies to write out some more information during processing. If the program terminates with an exception, this option will print out the entire stack trace, instead of just the exception message. -verbose #The -optimizations option disables some arithmetic simplifications that Dalvik 1.0 and 1.5 can't handle. Note that the Dalvik VM also can't handle aggressive overloading (of static fields). #To understand or change this check path_to_url#/manual/optimizations.html #-optimizations !code/simplification/arithmetic,!field/*,!class/merging/* # Note that if you want to enable optimization, you cannot just # include optimization flags in your own project configuration file; # instead you will need to point to the # "proguard-android-optimize.txt" file instead of this one from your # project.properties file. #To repackage classes on a single package #-repackageclasses '' #Uncomment if using annotations to keep them. #-keepattributes *Annotation* #Keep classes that are referenced on the AndroidManifest -keep public class * extends android.app.Activity -keep public class * extends android.app.Application -keep public class * extends android.app.Service -keep public class * extends android.content.BroadcastReceiver -keep public class * extends android.content.ContentProvider -keep public class * extends android.app.backup.BackupAgentHelper -keep public class * extends android.preference.Preference -keep public class com.google.vending.licensing.ILicensingService -keep public class com.android.vending.licensing.ILicensingService #Compatibility library -keep public class * extends android.support.v4.app.Fragment -keep public class * extends android.app.Fragment #To maintain custom components names that are used on layouts XML. #Uncomment if having any problem with the approach below #-keep public class custom.components.package.and.name.** # keep setters in Views so that animations can still work. # see path_to_url#beans -keepclassmembers public class * extends android.view.View { void set*(***); *** get*(); } #To remove debug logs: -assumenosideeffects class android.util.Log { public static *** d(...); public static *** v(...); public static *** w(...); } #To avoid changing names of methods invoked on layout's onClick. # Uncomment and add specific method names if using onClick on layouts #-keepclassmembers class * { # public void onClickButton(android.view.View); #} #Maintain java native methods -keepclasseswithmembernames class * { native <methods>; } #To maintain custom components names that are used on layouts XML: -keep public class * extends android.view.View { public <init>(android.content.Context); public <init>(android.content.Context, android.util.AttributeSet); public <init>(android.content.Context, android.util.AttributeSet, int); public void set*(...); } -keepclasseswithmembers class * { public <init>(android.content.Context, android.util.AttributeSet); } -keepclasseswithmembers class * { public <init>(android.content.Context, android.util.AttributeSet, int); } #Maintain enums -keepclassmembers enum * { public static **[] values(); public static ** valueOf(java.lang.String); } #To keep parcelable classes (to serialize - deserialize objects to sent through Intents) -keep class * implements android.os.Parcelable { public static final android.os.Parcelable$Creator *; } -keep class com.squareup.okhttp.** { *;} -dontwarn okio.** -dontwarn retrofit2.** -keep class retrofit2.** { *; } -keepattributes Signature -keepattributes Exceptions # -keepclassmembers class **.R$* { public static <fields>; } #rxjava -dontwarn sun.misc.** -keepclassmembers class rx.internal.util.unsafe.*ArrayQueue*Field* { long producerIndex; long consumerIndex; } -keepclassmembers class rx.internal.util.unsafe.BaseLinkedQueueProducerNodeRef { rx.internal.util.atomic.LinkedQueueNode producerNode; } -keepclassmembers class rx.internal.util.unsafe.BaseLinkedQueueConsumerNodeRef { rx.internal.util.atomic.LinkedQueueNode consumerNode; } #ButterKnife -keep class butterknife.** { *; } -dontwarn butterknife.internal.** -keep class **$$ViewBinder { *; } -keep class com.github.mikephil.charting.** { *; } -dontwarn com.github.mikephil.charting.** ```
/content/code_sandbox/app/proguard-rules.pro
qmake
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
1,101
```java package com.example.yanjiang.stockchart; import org.junit.Test; import static org.junit.Assert.*; /** * To work on unit tests, switch the Test Artifact in the Build Variants view. */ public class ExampleUnitTest { @Test public void addition_isCorrect() throws Exception { assertEquals(4, 2 + 2); } } ```
/content/code_sandbox/app/src/test/java/com/example/yanjiang/stockchart/ExampleUnitTest.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
74
```java package com.example.yanjiang.stockchart; import android.content.Intent; import android.os.Bundle; import android.view.View; import android.widget.Button; import butterknife.Bind; import butterknife.ButterKnife; import butterknife.OnClick; public class MainActivity extends BaseActivity { @Bind(R.id.btn) Button btn; @Bind(R.id.btn_k) Button btnK; @Bind(R.id.btn_fix) Button btnFix; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); ButterKnife.bind(this); /* Intent intent = new Intent(MainActivity.this, KLineActivity.class); startActivity(intent);*/ } @OnClick({R.id.btn, R.id.btn_k,R.id.btn_fix}) public void onClick(View view) { switch (view.getId()) { case R.id.btn: Intent intent = new Intent(MainActivity.this, MinutesActivity.class); startActivity(intent); break; case R.id.btn_k: Intent intentK = new Intent(MainActivity.this, KLineActivity.class); startActivity(intentK); break; } } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/MainActivity.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
230
```java package com.example.yanjiang.stockchart; import android.app.Activity; import android.content.SharedPreferences; import android.os.Bundle; import android.widget.Toast; import com.example.yanjiang.stockchart.api.ClientApi; import com.example.yanjiang.stockchart.application.App; import com.example.yanjiang.stockchart.inject.component.ActivityComponent; import com.example.yanjiang.stockchart.inject.component.DaggerActivityComponent; import com.example.yanjiang.stockchart.inject.modules.ActivityModule; import javax.inject.Inject; import butterknife.ButterKnife; import rx.subscriptions.CompositeSubscription; public class BaseActivity extends Activity { public final String TAG =this.getClass().getSimpleName(); protected CompositeSubscription mCompositeSubscription; protected Activity activity; protected Toast mToast = null; protected ActivityComponent activityComponent; @Inject public ClientApi clientApi; @Inject public SharedPreferences sharedPreferences; /*@Inject Activity activity;*/ @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); activity = this; mCompositeSubscription = new CompositeSubscription(); activityComponent = DaggerActivityComponent.builder() .appComponent(((App) getApplication()).getApplicationComponent()) .activityModule(new ActivityModule(this)) .build(); activityComponent.inject(this); } @Override public void setContentView(int layoutResID) { super.setContentView(layoutResID); ButterKnife.bind(this); } public void showToast(String content) { if (mToast == null) { mToast = Toast.makeText(this, content, Toast.LENGTH_SHORT); } else { mToast.setText(content); } mToast.show(); } @Override protected void onResume() { super.onResume(); } @Override protected void onDestroy() { super.onDestroy(); ButterKnife.unbind(BaseActivity.this); if (mCompositeSubscription.hasSubscriptions()) { mCompositeSubscription.unsubscribe(); } } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/BaseActivity.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
398
```java package com.example.yanjiang.stockchart; import android.graphics.Color; import android.graphics.Matrix; import android.os.Bundle; import android.os.Handler; import android.os.Message; import android.support.annotation.NonNull; import android.util.Log; import com.example.yanjiang.stockchart.api.ConstantTest; import com.example.yanjiang.stockchart.bean.DataParse; import com.example.yanjiang.stockchart.bean.KLineBean; import com.example.yanjiang.stockchart.mychart.CoupleChartGestureListener; import com.example.yanjiang.stockchart.rxutils.MyUtils; import com.example.yanjiang.stockchart.rxutils.VolFormatter; import com.github.mikephil.charting.charts.BarChart; import com.github.mikephil.charting.charts.Chart; import com.github.mikephil.charting.charts.CombinedChart; import com.github.mikephil.charting.components.Legend; import com.github.mikephil.charting.components.XAxis; import com.github.mikephil.charting.components.YAxis; import com.github.mikephil.charting.data.BarData; import com.github.mikephil.charting.data.BarDataSet; import com.github.mikephil.charting.data.BarEntry; import com.github.mikephil.charting.data.CandleData; import com.github.mikephil.charting.data.CandleDataSet; import com.github.mikephil.charting.data.CandleEntry; import com.github.mikephil.charting.data.CombinedData; import com.github.mikephil.charting.data.Entry; import com.github.mikephil.charting.data.LineData; import com.github.mikephil.charting.data.LineDataSet; import com.github.mikephil.charting.highlight.Highlight; import com.github.mikephil.charting.interfaces.datasets.ILineDataSet; import com.github.mikephil.charting.listener.BarLineChartTouchListener; import com.github.mikephil.charting.listener.OnChartValueSelectedListener; import com.github.mikephil.charting.utils.Utils; import com.github.mikephil.charting.utils.ViewPortHandler; import org.json.JSONException; import org.json.JSONObject; import java.util.ArrayList; import butterknife.Bind; import butterknife.ButterKnife; public class KLineActivity extends BaseActivity { @Bind(R.id.combinedchart) CombinedChart combinedchart; @Bind(R.id.barchart) BarChart barChart; private DataParse mData; private ArrayList<KLineBean> kLineDatas; XAxis xAxisBar, xAxisK; YAxis axisLeftBar, axisLeftK; YAxis axisRightBar, axisRightK; BarDataSet barDataSet; private BarLineChartTouchListener mChartTouchListener; private CoupleChartGestureListener coupleChartGestureListener; float sum = 0; private Handler handler = new Handler() { @Override public void handleMessage(Message msg) { barChart.setAutoScaleMinMaxEnabled(true); combinedchart.setAutoScaleMinMaxEnabled(true); combinedchart.notifyDataSetChanged(); barChart.notifyDataSetChanged(); combinedchart.invalidate(); barChart.invalidate(); } }; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_kline); ButterKnife.bind(this); initChart(); getOffLineData(); } private void getOffLineData() { /**/ mData = new DataParse(); JSONObject object = null; try { object = new JSONObject(ConstantTest.KLINEURL); } catch (JSONException e) { e.printStackTrace(); } mData.parseKLine(object); mData.getKLineDatas(); setData(mData); } private void initChart() { barChart.setDrawBorders(true); barChart.setBorderWidth(1); barChart.setBorderColor(getResources().getColor(R.color.minute_grayLine)); barChart.setDescription(""); barChart.setDragEnabled(true); barChart.setScaleYEnabled(false); Legend barChartLegend = barChart.getLegend(); barChartLegend.setEnabled(false); //BarYAxisFormatter barYAxisFormatter=new BarYAxisFormatter(); //bar x y xAxisBar = barChart.getXAxis(); xAxisBar.setDrawLabels(true); xAxisBar.setDrawGridLines(false); xAxisBar.setDrawAxisLine(false); xAxisBar.setTextColor(getResources().getColor(R.color.minute_zhoutv)); xAxisBar.setPosition(XAxis.XAxisPosition.BOTTOM); xAxisBar.setGridColor(getResources().getColor(R.color.minute_grayLine)); axisLeftBar = barChart.getAxisLeft(); axisLeftBar.setAxisMinValue(0); axisLeftBar.setDrawGridLines(false); axisLeftBar.setDrawAxisLine(false); axisLeftBar.setTextColor(getResources().getColor(R.color.minute_zhoutv)); axisLeftBar.setDrawLabels(true); axisLeftBar.setSpaceTop(0); axisLeftBar.setShowOnlyMinMax(true); axisRightBar = barChart.getAxisRight(); axisRightBar.setDrawLabels(false); axisRightBar.setDrawGridLines(false); axisRightBar.setDrawAxisLine(false); /****************************************************************/ combinedchart.setDrawBorders(true); combinedchart.setBorderWidth(1); combinedchart.setBorderColor(getResources().getColor(R.color.minute_grayLine)); combinedchart.setDescription(""); combinedchart.setDragEnabled(true); combinedchart.setScaleYEnabled(false); Legend combinedchartLegend = combinedchart.getLegend(); combinedchartLegend.setEnabled(false); //bar x y xAxisK = combinedchart.getXAxis(); xAxisK.setDrawLabels(true); xAxisK.setDrawGridLines(false); xAxisK.setDrawAxisLine(false); xAxisK.setTextColor(getResources().getColor(R.color.minute_zhoutv)); xAxisK.setPosition(XAxis.XAxisPosition.BOTTOM); xAxisK.setGridColor(getResources().getColor(R.color.minute_grayLine)); axisLeftK = combinedchart.getAxisLeft(); axisLeftK.setDrawGridLines(true); axisLeftK.setDrawAxisLine(false); axisLeftK.setDrawLabels(true); axisLeftK.setTextColor(getResources().getColor(R.color.minute_zhoutv)); axisLeftK.setGridColor(getResources().getColor(R.color.minute_grayLine)); axisLeftK.setPosition(YAxis.YAxisLabelPosition.OUTSIDE_CHART); axisRightK = combinedchart.getAxisRight(); axisRightK.setDrawLabels(false); axisRightK.setDrawGridLines(true); axisRightK.setDrawAxisLine(false); axisRightK.setGridColor(getResources().getColor(R.color.minute_grayLine)); combinedchart.setDragDecelerationEnabled(true); barChart.setDragDecelerationEnabled(true); combinedchart.setDragDecelerationFrictionCoef(0.2f); barChart.setDragDecelerationFrictionCoef(0.2f); // K combinedchart.setOnChartGestureListener(new CoupleChartGestureListener(combinedchart, new Chart[]{barChart})); // K barChart.setOnChartGestureListener(new CoupleChartGestureListener(barChart, new Chart[]{combinedchart})); barChart.setOnChartValueSelectedListener(new OnChartValueSelectedListener() { @Override public void onValueSelected(Entry e, int dataSetIndex, Highlight h) { Log.e("%%%%", h.getXIndex() + ""); combinedchart.highlightValues(new Highlight[]{h}); } @Override public void onNothingSelected() { combinedchart.highlightValue(null); } }); combinedchart.setOnChartValueSelectedListener(new OnChartValueSelectedListener() { @Override public void onValueSelected(Entry e, int dataSetIndex, Highlight h) { barChart.highlightValues(new Highlight[]{h}); } @Override public void onNothingSelected() { barChart.highlightValue(null); } }); } private float getSum(Integer a, Integer b) { for (int i = a; i <= b; i++) { sum += mData.getKLineDatas().get(i).close; } return sum; } private float culcMaxscale(float count) { float max = 1; max = count / 127 * 5; return max; } private void setData(DataParse mData) { kLineDatas = mData.getKLineDatas(); int size = kLineDatas.size(); // // axisLeftBar.setAxisMaxValue(mData.getVolmax()); String unit = MyUtils.getVolUnit(mData.getVolmax()); int u = 1; if ("".equals(unit)) { u = 4; } else if ("".equals(unit)) { u = 8; } axisLeftBar.setValueFormatter(new VolFormatter((int) Math.pow(10, u))); // axisRightBar.setAxisMaxValue(mData.getVolmax()); Log.e("@@@", mData.getVolmax() + "da"); ArrayList<String> xVals = new ArrayList<>(); ArrayList<BarEntry> barEntries = new ArrayList<>(); ArrayList<CandleEntry> candleEntries = new ArrayList<>(); ArrayList<Entry> line5Entries = new ArrayList<>(); ArrayList<Entry> line10Entries = new ArrayList<>(); ArrayList<Entry> line30Entries = new ArrayList<>(); for (int i = 0, j = 0; i < mData.getKLineDatas().size(); i++, j++) { xVals.add(mData.getKLineDatas().get(i).date + ""); barEntries.add(new BarEntry(mData.getKLineDatas().get(i).vol, i)); candleEntries.add(new CandleEntry(i, mData.getKLineDatas().get(i).high, mData.getKLineDatas().get(i).low, mData.getKLineDatas().get(i).open, mData.getKLineDatas().get(i).close)); if (i >= 4) { sum = 0; line5Entries.add(new Entry(getSum(i - 4, i) / 5, i)); } if (i >= 9) { sum = 0; line10Entries.add(new Entry(getSum(i - 9, i) / 10, i)); } if (i >= 29) { sum = 0; line30Entries.add(new Entry(getSum(i - 29, i) / 30, i)); } } barDataSet = new BarDataSet(barEntries, ""); barDataSet.setBarSpacePercent(50); //bar barDataSet.setHighlightEnabled(true); barDataSet.setHighLightAlpha(255); barDataSet.setHighLightColor(Color.WHITE); barDataSet.setDrawValues(false); barDataSet.setColor(Color.RED); BarData barData = new BarData(xVals, barDataSet); barChart.setData(barData); final ViewPortHandler viewPortHandlerBar = barChart.getViewPortHandler(); viewPortHandlerBar.setMaximumScaleX(culcMaxscale(xVals.size())); Matrix touchmatrix = viewPortHandlerBar.getMatrixTouch(); final float xscale = 3; touchmatrix.postScale(xscale, 1f); CandleDataSet candleDataSet = new CandleDataSet(candleEntries, "KLine"); candleDataSet.setDrawHorizontalHighlightIndicator(false); candleDataSet.setHighlightEnabled(true); candleDataSet.setHighLightColor(Color.WHITE); candleDataSet.setValueTextSize(10f); candleDataSet.setDrawValues(false); candleDataSet.setColor(Color.RED); candleDataSet.setShadowWidth(1f); candleDataSet.setAxisDependency(YAxis.AxisDependency.LEFT); CandleData candleData = new CandleData(xVals, candleDataSet); ArrayList<ILineDataSet> sets = new ArrayList<>(); /******MA0******************************/ if(size>=30){ sets.add(setMaLine(5, xVals, line5Entries)); sets.add(setMaLine(10, xVals, line10Entries)); sets.add(setMaLine(30, xVals, line30Entries)); }else if (size>=10&&size<30){ sets.add(setMaLine(5, xVals, line5Entries)); sets.add(setMaLine(10, xVals, line10Entries)); }else if (size>=5&&size<10) { sets.add(setMaLine(5, xVals, line5Entries)); } CombinedData combinedData = new CombinedData(xVals); LineData lineData = new LineData(xVals, sets); combinedData.setData(candleData); combinedData.setData(lineData); combinedchart.setData(combinedData); combinedchart.moveViewToX(mData.getKLineDatas().size() - 1); final ViewPortHandler viewPortHandlerCombin = combinedchart.getViewPortHandler(); viewPortHandlerCombin.setMaximumScaleX(culcMaxscale(xVals.size())); Matrix matrixCombin = viewPortHandlerCombin.getMatrixTouch(); final float xscaleCombin = 3; matrixCombin.postScale(xscaleCombin, 1f); combinedchart.moveViewToX(mData.getKLineDatas().size() - 1); barChart.moveViewToX(mData.getKLineDatas().size() - 1); setOffset(); /**************************************************************************************** CombinedChartDemokybug (bugchenguang79) ****************************************************************************************/ handler.sendEmptyMessageDelayed(0, 300); } @NonNull private LineDataSet setMaLine(int ma, ArrayList<String> xVals, ArrayList<Entry> lineEntries) { LineDataSet lineDataSetMa = new LineDataSet(lineEntries, "ma" + ma); if (ma == 5) { lineDataSetMa.setHighlightEnabled(true); lineDataSetMa.setDrawHorizontalHighlightIndicator(false); lineDataSetMa.setHighLightColor(Color.WHITE); } else {/**/ lineDataSetMa.setHighlightEnabled(false); } lineDataSetMa.setDrawValues(false); if (ma == 5) { lineDataSetMa.setColor(Color.GREEN); } else if (ma == 10) { lineDataSetMa.setColor(Color.GRAY); } else { lineDataSetMa.setColor(Color.YELLOW); } lineDataSetMa.setLineWidth(1f); lineDataSetMa.setDrawCircles(false); lineDataSetMa.setAxisDependency(YAxis.AxisDependency.LEFT); return lineDataSetMa; } /**/ private void setOffset() { float lineLeft = combinedchart.getViewPortHandler().offsetLeft(); float barLeft = barChart.getViewPortHandler().offsetLeft(); float lineRight = combinedchart.getViewPortHandler().offsetRight(); float barRight = barChart.getViewPortHandler().offsetRight(); float barBottom = barChart.getViewPortHandler().offsetBottom(); float offsetLeft, offsetRight; float transLeft = 0, transRight = 0; /*setExtraLeft...AoffLeftA=20dp,BoffLeftB=30dp,A.setExtraLeftOffset(10),30*/ if (barLeft < lineLeft) { /* offsetLeft = Utils.convertPixelsToDp(lineLeft - barLeft); barChart.setExtraLeftOffset(offsetLeft);*/ transLeft = lineLeft; } else { offsetLeft = Utils.convertPixelsToDp(barLeft - lineLeft); combinedchart.setExtraLeftOffset(offsetLeft); transLeft = barLeft; } /*setExtraRight...AoffRightA=20dp,BoffRightB=30dp,A.setExtraLeftOffset(30),10*/ if (barRight < lineRight) { /* offsetRight = Utils.convertPixelsToDp(lineRight); barChart.setExtraRightOffset(offsetRight);*/ transRight = lineRight; } else { offsetRight = Utils.convertPixelsToDp(barRight); combinedchart.setExtraRightOffset(offsetRight); transRight = barRight; } barChart.setViewPortOffsets(transLeft, 15, transRight, barBottom); } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/KLineActivity.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
3,321
```java package com.example.yanjiang.stockchart; import android.graphics.Color; import android.os.Bundle; import android.text.TextUtils; import android.util.Log; import android.util.SparseArray; import com.example.yanjiang.stockchart.api.ConstantTest; import com.example.yanjiang.stockchart.bean.DataParse; import com.example.yanjiang.stockchart.bean.MinutesBean; import com.example.yanjiang.stockchart.mychart.MyBarChart; import com.example.yanjiang.stockchart.mychart.MyBottomMarkerView; import com.example.yanjiang.stockchart.mychart.MyLeftMarkerView; import com.example.yanjiang.stockchart.mychart.MyLineChart; import com.example.yanjiang.stockchart.mychart.MyRightMarkerView; import com.example.yanjiang.stockchart.mychart.MyXAxis; import com.example.yanjiang.stockchart.mychart.MyYAxis; import com.example.yanjiang.stockchart.rxutils.VolFormatter; import com.example.yanjiang.stockchart.rxutils.MyUtils; import com.example.yanjiang.stockchart.rxutils.SchedulersCompat; import com.github.mikephil.charting.components.Legend; import com.github.mikephil.charting.components.LimitLine; import com.github.mikephil.charting.components.XAxis; import com.github.mikephil.charting.components.YAxis; import com.github.mikephil.charting.data.BarData; import com.github.mikephil.charting.data.BarDataSet; import com.github.mikephil.charting.data.BarEntry; import com.github.mikephil.charting.data.Entry; import com.github.mikephil.charting.data.LineData; import com.github.mikephil.charting.data.LineDataSet; import com.github.mikephil.charting.formatter.YAxisValueFormatter; import com.github.mikephil.charting.highlight.Highlight; import com.github.mikephil.charting.interfaces.datasets.ILineDataSet; import com.github.mikephil.charting.listener.OnChartValueSelectedListener; import com.github.mikephil.charting.utils.Utils; import org.json.JSONException; import org.json.JSONObject; import java.io.IOException; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.List; import butterknife.Bind; import butterknife.ButterKnife; import okhttp3.ResponseBody; import rx.Subscriber; import rx.Subscription; public class MinutesActivity extends BaseActivity { @Bind(R.id.line_chart) MyLineChart lineChart; @Bind(R.id.bar_chart) MyBarChart barChart; private Subscription subscriptionMinute; private LineDataSet d1, d2; MyXAxis xAxisLine; MyYAxis axisRightLine; MyYAxis axisLeftLine; BarDataSet barDataSet; MyXAxis xAxisBar; MyYAxis axisLeftBar; MyYAxis axisRightBar; SparseArray<String> stringSparseArray; private DataParse mData; Integer sum = 0; List<Integer> listA, listB; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_minutes); ButterKnife.bind(this); initChart(); stringSparseArray = setXLabels(); /**/ //getMinutesData(); /**/ getOffLineData(); lineChart.setOnChartValueSelectedListener(new OnChartValueSelectedListener() { @Override public void onValueSelected(Entry e, int dataSetIndex, Highlight h) { // barChart.setHighlightValue(new Highlight(h.getXIndex(), 0)); barChart.highlightValue(new Highlight(h.getXIndex(), 0)); // lineChart.setHighlightValue(h); } @Override public void onNothingSelected() { barChart.highlightValue(null); } }); barChart.setOnChartValueSelectedListener(new OnChartValueSelectedListener() { @Override public void onValueSelected(Entry e, int dataSetIndex, Highlight h) { lineChart.highlightValue(new Highlight(h.getXIndex(), 0)); // lineChart.setHighlightValue(new Highlight(h.getXIndex(), 0));//highlightBValues //barChart.setHighlightValue(h); } @Override public void onNothingSelected() { lineChart.highlightValue(null); } }); /**/ /*listA = new ArrayList<>(); listB = new ArrayList<>(); for (int i = 0; i < 100; i++) { listA.add(i, i); } for (int i = 0; i < 100; i++) { if (i >= 4) { sum = 0; listB.add(i, fund(i - 4, i)); } else { listB.add(i, 0); } } for (int i = 0; i < 100; i++) { Log.e("OUT", listB.get(i) + ""); }*/ } private Integer fund(Integer a, Integer b) { for (int i = a; i <= b; i++) { sum += listA.get(i); } return sum; } private void initChart() { lineChart.setScaleEnabled(false); lineChart.setDrawBorders(true); lineChart.setBorderWidth(1); lineChart.setBorderColor(getResources().getColor(R.color.minute_grayLine)); lineChart.setDescription(""); Legend lineChartLegend = lineChart.getLegend(); lineChartLegend.setEnabled(false); barChart.setScaleEnabled(false); barChart.setDrawBorders(true); barChart.setBorderWidth(1); barChart.setBorderColor(getResources().getColor(R.color.minute_grayLine)); barChart.setDescription(""); Legend barChartLegend = barChart.getLegend(); barChartLegend.setEnabled(false); //x xAxisLine = lineChart.getXAxis(); xAxisLine.setDrawLabels(true); xAxisLine.setPosition(XAxis.XAxisPosition.BOTTOM); // xAxisLine.setLabelsToSkip(59); //y axisLeftLine = lineChart.getAxisLeft(); /*ybasevalue*/ axisLeftLine.setLabelCount(5, true); axisLeftLine.setDrawLabels(true); axisLeftLine.setDrawGridLines(false); /* border*/ axisLeftLine.setDrawAxisLine(false); //y axisRightLine = lineChart.getAxisRight(); axisRightLine.setLabelCount(2, true); axisRightLine.setDrawLabels(true); axisRightLine.setValueFormatter(new YAxisValueFormatter() { @Override public String getFormattedValue(float value, YAxis yAxis) { DecimalFormat mFormat = new DecimalFormat("#0.00%"); return mFormat.format(value); } }); axisRightLine.setStartAtZero(false); axisRightLine.setDrawGridLines(false); axisRightLine.setDrawAxisLine(false); // xAxisLine.setGridColor(getResources().getColor(R.color.minute_grayLine)); xAxisLine.enableGridDashedLine(10f,5f,0f); xAxisLine.setAxisLineColor(getResources().getColor(R.color.minute_grayLine)); xAxisLine.setTextColor(getResources().getColor(R.color.minute_zhoutv)); axisLeftLine.setGridColor(getResources().getColor(R.color.minute_grayLine)); axisLeftLine.setTextColor(getResources().getColor(R.color.minute_zhoutv)); axisRightLine.setAxisLineColor(getResources().getColor(R.color.minute_grayLine)); axisRightLine.setTextColor(getResources().getColor(R.color.minute_zhoutv)); //bar x y xAxisBar = barChart.getXAxis(); xAxisBar.setDrawLabels(false); xAxisBar.setDrawGridLines(true); xAxisBar.setDrawAxisLine(false); // xAxisBar.setPosition(XAxis.XAxisPosition.BOTTOM); xAxisBar.setGridColor(getResources().getColor(R.color.minute_grayLine)); axisLeftBar = barChart.getAxisLeft(); axisLeftBar.setAxisMinValue(0); axisLeftBar.setDrawGridLines(false); axisLeftBar.setDrawAxisLine(false); axisLeftBar.setTextColor(getResources().getColor(R.color.minute_zhoutv)); axisRightBar = barChart.getAxisRight(); axisRightBar.setDrawLabels(false); axisRightBar.setDrawGridLines(false); axisRightBar.setDrawAxisLine(false); //y this.axisLeftLine.setValueFormatter(new YAxisValueFormatter() { @Override public String getFormattedValue(float value, YAxis yAxis) { DecimalFormat mFormat = new DecimalFormat("#0.00"); return mFormat.format(value); } }); } private void setData(DataParse mData) { setMarkerView(mData); setShowLabels(stringSparseArray); Log.e("###", mData.getDatas().size() + "ee"); if (mData.getDatas().size() == 0) { lineChart.setNoDataText(""); return; } //y axisLeftLine.setAxisMinValue(mData.getMin()); axisLeftLine.setAxisMaxValue(mData.getMax()); axisRightLine.setAxisMinValue(mData.getPercentMin()); axisRightLine.setAxisMaxValue(mData.getPercentMax()); axisLeftBar.setAxisMaxValue(mData.getVolmax()); /**/ String unit = MyUtils.getVolUnit(mData.getVolmax()); int u = 1; if ("".equals(unit)) { u = 4; } else if ("".equals(unit)) { u = 8; } /**/ axisLeftBar.setValueFormatter(new VolFormatter((int) Math.pow(10, u))); axisLeftBar.setShowMaxAndUnit(unit); axisLeftBar.setDrawLabels(true); //axisLeftBar.setAxisMinValue(0);//0 axisLeftBar.setShowOnlyMinMax(true); axisRightBar.setAxisMaxValue(mData.getVolmax()); // axisRightBar.setAxisMinValue(mData.getVolmin);//0 //axisRightBar.setShowOnlyMinMax(true); // LimitLine ll = new LimitLine(0); ll.setLineWidth(1f); ll.setLineColor(getResources().getColor(R.color.minute_jizhun)); ll.enableDashedLine(10f, 10f, 0f); ll.setLineWidth(1); axisRightLine.addLimitLine(ll); axisRightLine.setBaseValue(0); ArrayList<Entry> lineCJEntries = new ArrayList<>(); ArrayList<Entry> lineJJEntries = new ArrayList<>(); ArrayList<String> dateList = new ArrayList<>(); ArrayList<BarEntry> barEntries = new ArrayList<>(); ArrayList<String> xVals = new ArrayList<>(); Log.e("##", Integer.toString(xVals.size())); for (int i = 0, j = 0; i < mData.getDatas().size(); i++, j++) { /* //skip if (mData.getDatas().get(i).time.equals("13:30")) { continue; }*/ MinutesBean t = mData.getDatas().get(j); if (t == null) { lineCJEntries.add(new Entry(Float.NaN, i)); lineJJEntries.add(new Entry(Float.NaN, i)); barEntries.add(new BarEntry(Float.NaN, i)); continue; } if (!TextUtils.isEmpty(stringSparseArray.get(i)) && stringSparseArray.get(i).contains("/")) { i++; } lineCJEntries.add(new Entry(mData.getDatas().get(i).cjprice, i)); lineJJEntries.add(new Entry(mData.getDatas().get(i).avprice, i)); barEntries.add(new BarEntry(mData.getDatas().get(i).cjnum, i)); // dateList.add(mData.getDatas().get(i).time); } d1 = new LineDataSet(lineCJEntries, ""); d2 = new LineDataSet(lineJJEntries, ""); d1.setDrawValues(false); d2.setDrawValues(false); barDataSet = new BarDataSet(barEntries, ""); d1.setCircleRadius(0); d2.setCircleRadius(0); d1.setColor(getResources().getColor(R.color.minute_blue)); d2.setColor(getResources().getColor(R.color.minute_yellow)); d1.setHighLightColor(Color.WHITE); d2.setHighlightEnabled(false); d1.setDrawFilled(true); barDataSet.setBarSpacePercent(50); //bar barDataSet.setHighLightColor(Color.WHITE); barDataSet.setHighLightAlpha(255); barDataSet.setDrawValues(false); barDataSet.setHighlightEnabled(true); barDataSet.setColor(Color.RED); List<Integer> list=new ArrayList<>(); list.add(Color.RED); list.add(Color.GREEN); barDataSet.setColors(list); // d1.setAxisDependency(YAxis.AxisDependency.LEFT); // d2.setAxisDependency(YAxis.AxisDependency.RIGHT); ArrayList<ILineDataSet> sets = new ArrayList<>(); sets.add(d1); sets.add(d2); /*LineDataChartDataif*/ LineData cd = new LineData(getMinutesCount(), sets); lineChart.setData(cd); BarData barData = new BarData(getMinutesCount(), barDataSet); barChart.setData(barData); setOffset(); lineChart.invalidate();// barChart.invalidate(); } private void getMinutesData() { String code = "sz002081"; subscriptionMinute = clientApi.getMinutes(code) .compose(SchedulersCompat.<ResponseBody>applyIoSchedulers()) .subscribe(new Subscriber<ResponseBody>() { @Override public void onCompleted() { } @Override public void onError(Throwable e) { showToast("" + e.toString()); } @Override public void onNext(ResponseBody minutes) { mData = new DataParse(); JSONObject object = null; try { object = new JSONObject(minutes.string()); } catch (JSONException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } mData.parseMinutes(object); setData(mData); } }); mCompositeSubscription.add(subscriptionMinute); } private void getOffLineData() { /**/ mData = new DataParse(); JSONObject object = null; try { object = new JSONObject(ConstantTest.MINUTESURL); } catch (JSONException e) { e.printStackTrace(); } mData.parseMinutes(object); setData(mData); } private SparseArray<String> setXLabels() { SparseArray<String> xLabels = new SparseArray<>(); xLabels.put(0, "09:30"); xLabels.put(60, "10:30"); xLabels.put(121, "11:30/13:00"); xLabels.put(182, "14:00"); xLabels.put(241, "15:00"); return xLabels; } /**/ private void setOffset() { float lineLeft = lineChart.getViewPortHandler().offsetLeft(); float barLeft = barChart.getViewPortHandler().offsetLeft(); float lineRight = lineChart.getViewPortHandler().offsetRight(); float barRight = barChart.getViewPortHandler().offsetRight(); float barBottom = barChart.getViewPortHandler().offsetBottom(); float offsetLeft, offsetRight; float transLeft = 0, transRight = 0; /*setExtraLeft...AoffLeftA=20dp,BoffLeftB=30dp,A.setExtraLeftOffset(10),30*/ if (barLeft < lineLeft) { //offsetLeft = Utils.convertPixelsToDp(lineLeft - barLeft); // barChart.setExtraLeftOffset(offsetLeft); transLeft = lineLeft; } else { offsetLeft = Utils.convertPixelsToDp(barLeft - lineLeft); lineChart.setExtraLeftOffset(offsetLeft); transLeft = barLeft; } /*setExtraRight...AoffRightA=20dp,BoffRightB=30dp,A.setExtraLeftOffset(30),10*/ if (barRight < lineRight) { //offsetRight = Utils.convertPixelsToDp(lineRight); //barChart.setExtraRightOffset(offsetRight); transRight = lineRight; } else { offsetRight = Utils.convertPixelsToDp(barRight); lineChart.setExtraRightOffset(offsetRight); transRight = barRight; } barChart.setViewPortOffsets(transLeft, 5, transRight, barBottom); } public void setShowLabels(SparseArray<String> labels) { xAxisLine.setXLabels(labels); xAxisBar.setXLabels(labels); } public String[] getMinutesCount() { return new String[242]; } private void setMarkerView(DataParse mData) { MyLeftMarkerView leftMarkerView = new MyLeftMarkerView(MinutesActivity.this, R.layout.mymarkerview); MyRightMarkerView rightMarkerView = new MyRightMarkerView(MinutesActivity.this, R.layout.mymarkerview); MyBottomMarkerView bottomMarkerView = new MyBottomMarkerView(MinutesActivity.this, R.layout.mymarkerview); lineChart.setMarker(leftMarkerView, rightMarkerView,bottomMarkerView, mData); barChart.setMarker(leftMarkerView, rightMarkerView,bottomMarkerView, mData); } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/MinutesActivity.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
3,616
```java package com.example.yanjiang.stockchart.application; import android.app.Application; import com.example.yanjiang.stockchart.BuildConfig; import com.example.yanjiang.stockchart.inject.component.AppComponent; import com.example.yanjiang.stockchart.inject.component.DaggerAppComponent; import com.example.yanjiang.stockchart.inject.modules.AppModule; import com.squareup.leakcanary.LeakCanary; import org.greenrobot.eventbus.EventBus; public class App extends Application { private static final int SHOW_TIME_MIN = 1000; private static App mApp; private static EventBus sBus; private AppComponent applicationComponent; @Override public void onCreate() { super.onCreate(); if (BuildConfig.DEBUG) { LeakCanary.install(this); } initComponent(); mApp=this; sBus = EventBus.getDefault(); } public static App getApp() { return mApp; } private void initComponent() { applicationComponent = DaggerAppComponent.builder().appModule(new AppModule(this)).build(); applicationComponent.inject(this); } public AppComponent getApplicationComponent() { return applicationComponent; } public static EventBus getBus() { return sBus; } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/application/App.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
252
```java package com.example.yanjiang.stockchart; /** * authorajiang * mail1025065158@qq.com * blogpath_to_url */ public class Test { /* private ArrayList<Integer> listA, listB; private int sum; public static void main(String[] args) { Test test=new Test(); test.listA = new ArrayList<>(); test.listB = new ArrayList<>(); for (int i = 0; i < 10; i++) { test.listA.add(i, i); } for (int i = 0; i < 10; i++) { if (i >= 4) { test.sum = 0; test.listB.add(i, test.fund(i - 4, i)); } else { test.listB.add(i, 0); } } for (int i = 0; i < 10; i++) { System.out.print(test.listB.get(i) + " "); } } public Integer fund(Integer a, Integer b) { for (int i = a; i <= b; i++) { sum += listA.get(i); } return sum; } */ /*public boolean foo(char c) { System.out.print(c); return true; } public static void main(String[] argv) { Test test=new Test(); int i = 0; for (test.foo('A'); test.foo('B') && (i < 2); test.foo('C')) { i++; test.foo('D'); } }*/ /*public static void main(String[] argv) { int[] a={2,3,1,6,4}; for(int i=0;i<a.length-1;i++){ for(int j=0;j<a.length-i-1;j++){ if(a[j]>a[j+1]){ int temp=a[j+1]; a[j+1]=a[j]; a[j]=temp; } } } for (int i:a) { System.out.print(i+","); } }*/ } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/Test.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
457
```java package com.example.yanjiang.stockchart.bean; public class MinutesBean { public String time; public float cjprice; public float cjnum; public float avprice = Float.NaN; public float per; public float cha; public float total; public int color = 0xff000000; } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/bean/MinutesBean.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
69
```java package com.example.yanjiang.stockchart.bean; /** * authorajiang * mail1025065158@qq.com * blogpath_to_url */ public class KLineBean { public String date; public float open; public float close; public float high; public float low; public float vol; } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/bean/KLineBean.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
70
```java package com.example.yanjiang.stockchart.service; import android.app.IntentService; import android.content.Context; import android.content.Intent; import android.os.Environment; import android.util.Log; import com.example.yanjiang.stockchart.api.Constant; import com.example.yanjiang.stockchart.api.DownLoadApi; import com.example.yanjiang.stockchart.application.App; import com.example.yanjiang.stockchart.event.ProgressUpdateEvent; import com.example.yanjiang.stockchart.inject.component.DaggerServiceComponent; import com.example.yanjiang.stockchart.inject.component.ServiceComponent; import com.example.yanjiang.stockchart.inject.modules.ServiceModule; import com.example.yanjiang.stockchart.rxutils.CommonUtil; import com.example.yanjiang.stockchart.rxutils.SchedulersCompat; import org.greenrobot.eventbus.Subscribe; import org.greenrobot.eventbus.ThreadMode; import java.io.File; import java.io.IOException; import javax.inject.Inject; import okhttp3.ResponseBody; import rx.Observable; import rx.Subscriber; import rx.Subscription; import rx.functions.Func1; import rx.schedulers.Schedulers; /** * authorajiang * mail1025065158@qq.com * blogpath_to_url */ public class DownLoadService extends IntentService { private static final String SERVICE_NAME = DownLoadService.class.getName(); private ServiceComponent serviceComponent; @Inject DownLoadApi downLoadApi; private Subscription subscription; /** * Creates an IntentService. Invoked by your subclass's constructor. * * @param name Used to name the worker thread, important only for debugging. */ public DownLoadService() { super(SERVICE_NAME); } public DownLoadService(String name) { super(name); } @Override public void onCreate() { super.onCreate(); App.getBus().register(this); serviceComponent = DaggerServiceComponent.builder() .appComponent(((App) getApplication()).getApplicationComponent()) .serviceModule(new ServiceModule(this)) .build(); serviceComponent.inject(this); } @Override protected void onHandleIntent(Intent intent) { subscription = downLoadApi.getDownApk() .subscribeOn(Schedulers.io()) .flatMap(new Func1<ResponseBody, Observable<Boolean>>() { @Override public Observable<Boolean> call(ResponseBody responseBody) { Boolean writtenToDisk = com.example.yanjiang.stockchart.rxutils.FileUtils.writeResponseBodyToDisk(responseBody, getApplicationContext()); return Observable.just(writtenToDisk); } }) .compose(SchedulersCompat.<Boolean>applyIoSchedulers()) .subscribe(new Subscriber<Boolean>() { @Override public void onCompleted() { // /* App app = (App) getApplication(); app.addPatch();*/ /* try { //String patchPath = CommonUtil.getApatchDownloadPath(getApplicationContext()); String patchPath = Constant.EXTERNALPATH + Constant.APATCH_PATH;///storage/emulated/0/out.apatch App.getPatchManager().addPatch(patchPath); // File f = new File(patchPath); if (f.exists()) { Log.e("@@@","!!!!"); //boolean result = new File(patchPath).delete(); *//* if (!result) Log.e("@@@", patchPath + " delete fail");*//* } } catch (IOException e) { e.printStackTrace(); }*/ } @Override public void onError(Throwable e) { } @Override public void onNext(Boolean responseBody) { } }); } /** * */ public static void stopTask(Context context) { if (context != null) { Intent intent = new Intent(context, DownLoadService.class); context.stopService(intent); } } /**/ @Subscribe(threadMode = ThreadMode.MAIN) public void updateProgress(ProgressUpdateEvent progressUpdateEvent) { Log.e("yan", progressUpdateEvent.getbytesRead() + ""); } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/service/DownLoadService.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
830
```java package com.example.yanjiang.stockchart.bean; import android.util.SparseArray; import org.json.JSONArray; import org.json.JSONObject; import java.util.ArrayList; public class DataParse { private ArrayList<MinutesBean> datas = new ArrayList<>(); private ArrayList<KLineBean> kDatas = new ArrayList<>(); private float baseValue; private float permaxmin; private float volmax; private SparseArray<String> dayLabels; private String code = "sz002081"; private int decreasingColor; private int increasingColor; private String stockExchange; private SparseArray<String> xValuesLabel=new SparseArray<>(); private int firstDay = 10; public void parseMinutes(JSONObject object) { JSONArray jsonArray = object.optJSONObject("data").optJSONObject(code).optJSONObject("data").optJSONArray("data"); String date = object.optJSONObject("data").optJSONObject(code).optJSONObject("data").optString("date"); if (date.length() == 0) { return; } /**/ baseValue = (float) object.optJSONObject("data").optJSONObject(code).optJSONObject("qt").optJSONArray(code).optDouble(4); int count = jsonArray.length(); for (int i = 0; i < count; i++) { String[] t = jsonArray.optString(i).split(" ");/* "0930 9.50 4707",*/ MinutesBean minutesData = new MinutesBean(); minutesData.time = t[0].substring(0, 2) + ":" + t[0].substring(2); minutesData.cjprice = Float.parseFloat(t[1]); if (i != 0) { String[] pre_t = jsonArray.optString(i - 1).split(" "); minutesData.cjnum = Integer.parseInt(t[2]) - Integer.parseInt(pre_t[2]); minutesData.total = minutesData.cjnum * minutesData.cjprice + datas.get(i - 1).total; minutesData.avprice = (minutesData.total) / Integer.parseInt(t[2]); } else { minutesData.cjnum = Integer.parseInt(t[2]); minutesData.avprice = minutesData.cjprice; minutesData.total = minutesData.cjnum * minutesData.cjprice; } minutesData.cha = minutesData.cjprice - baseValue; minutesData.per = (minutesData.cha / baseValue); double cha = minutesData.cjprice - baseValue; if (Math.abs(cha) > permaxmin) { permaxmin = (float) Math.abs(cha); } volmax = Math.max(minutesData.cjnum, volmax); datas.add(minutesData); } if (permaxmin == 0) { permaxmin = baseValue * 0.02f; } } public void parseKLine(JSONObject obj) { ArrayList<KLineBean> kLineBeans = new ArrayList<>(); JSONObject data = obj.optJSONObject("data").optJSONObject(code); JSONArray list = data.optJSONArray("day"); if (list != null) { int count = list.length(); for (int i = 0; i < count; i++) { JSONArray dayData = list.optJSONArray(i); KLineBean kLineData = new KLineBean(); kLineBeans.add(kLineData); kLineData.date = dayData.optString(0); kLineData.open = (float) dayData.optDouble(1); kLineData.close = (float) dayData.optDouble(2); kLineData.high = (float) dayData.optDouble(3); kLineData.low = (float) dayData.optDouble(4); kLineData.vol = (float) dayData.optDouble(5); volmax = Math.max(kLineData.vol, volmax); xValuesLabel.put(i, kLineData.date); } } kDatas.addAll(kLineBeans); } public float getMin() { return baseValue - permaxmin; } public float getMax() { return baseValue + permaxmin; } public float getPercentMax() { return permaxmin / baseValue; } public float getPercentMin() { return -getPercentMax(); } public float getVolmax() { return volmax; } public ArrayList<MinutesBean> getDatas() { return datas; } public ArrayList<KLineBean> getKLineDatas() { return kDatas; } public SparseArray<String> getXValuesLabel() { return xValuesLabel; } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/bean/DataParse.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
992
```java package com.example.yanjiang.stockchart.interceptor; import java.io.IOException; import okhttp3.Interceptor; import okhttp3.Response; public class ProgressInterceptor implements Interceptor { // private ProgressListener progressListener; /* public ProgressInterceptor(ProgressListener progressListener) { this.progressListener = progressListener; }*/ @Override public Response intercept(Chain chain) throws IOException { Response originalResponse = chain.proceed(chain.request()); return originalResponse.newBuilder().body(new ProgressResponseBody(originalResponse.body())).build(); } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/interceptor/ProgressInterceptor.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
116
```java package com.example.yanjiang.stockchart.interceptor; import java.io.IOException; import okhttp3.Interceptor; import okhttp3.MediaType; import okhttp3.Request; import okhttp3.Response; import okhttp3.ResponseBody; import okio.Buffer; /** * Created by yanjiang on 2016/3/7. */ public class LoggingInterceptor implements Interceptor { private static final String F_BREAK = " %n"; private static final String F_URL = " %s"; private static final String F_TIME = " in %.1fms"; private static final String F_HEADERS = "%s"; private static final String F_RESPONSE = F_BREAK + "Response: %d"; private static final String F_BODY = "body: %s"; private static final String F_BREAKER = F_BREAK + "-------------------------------------------" + F_BREAK; private static final String F_REQUEST_WITHOUT_BODY = F_URL + F_TIME + F_BREAK + F_HEADERS; private static final String F_RESPONSE_WITHOUT_BODY = F_RESPONSE + F_BREAK + F_HEADERS + F_BREAKER; private static final String F_REQUEST_WITH_BODY = F_URL + F_TIME + F_BREAK + F_HEADERS + F_BODY + F_BREAK; private static final String F_RESPONSE_WITH_BODY = F_RESPONSE + F_BREAK + F_HEADERS + F_BODY + F_BREAK + F_BREAKER; @Override public Response intercept(Chain chain) throws IOException { Request request = chain.request(); long t1 = System.nanoTime(); Response response = chain.proceed(request); long t2 = System.nanoTime(); MediaType contentType = null; String bodyString = null; if (response.body() != null) { contentType = response.body().contentType(); bodyString = response.body().string(); } double time = (t2 - t1) / 1e6d; if ("GET".equals(request.method())) { System.out.println(String.format("GET " + F_REQUEST_WITHOUT_BODY + F_RESPONSE_WITH_BODY, request.url(), time, request.headers(), response.code(), response.headers(), stringifyResponseBody(bodyString))); } else if ("POST".equals(request.method())) { System.out.println(String.format("POST " + F_REQUEST_WITH_BODY + F_RESPONSE_WITH_BODY, request.url(), time, request.headers(), stringifyRequestBody(request), response.code(), response.headers(), stringifyResponseBody(bodyString))); } else if ("PUT".equals(request.method())) { System.out.println(String.format("PUT " + F_REQUEST_WITH_BODY + F_RESPONSE_WITH_BODY, request.url(), time, request.headers(), request.body().toString(), response.code(), response.headers(), stringifyResponseBody(bodyString))); } else if ("DELETE".equals(request.method())) { System.out.println(String.format("DELETE " + F_REQUEST_WITHOUT_BODY + F_RESPONSE_WITHOUT_BODY, request.url(), time, request.headers(), response.code(), response.headers())); } if (response.body() != null) { ResponseBody body = ResponseBody.create(contentType, bodyString); return response.newBuilder().body(body).build(); } else { return response; } } private static String stringifyRequestBody(Request request) { try { final Request copy = request.newBuilder().build(); final Buffer buffer = new Buffer(); copy.body().writeTo(buffer); return buffer.readUtf8(); } catch (final IOException e) { return "did not work"; } } public String stringifyResponseBody(String responseBody) { return responseBody; } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/interceptor/LoggingInterceptor.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
723
```java package com.example.yanjiang.stockchart.interceptor; import android.content.Context; import android.content.SharedPreferences; import android.support.annotation.Nullable; import android.util.Base64; import java.io.IOException; import java.util.Random; import okhttp3.Interceptor; import okhttp3.Request; import okhttp3.Response; import okhttp3.ResponseBody; /** * Created by yanjiang on 2016/3/21. */ public class HttpInterceptor implements Interceptor { private static String mBoundry; private static final int BOUNDARY_LENGTH = 32; private Context context; private static final String BOUNDARY_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"; public HttpInterceptor(Context context) { this.context = context; } @Override public Response intercept(Chain chain) throws IOException { Request request = chain.request(); return chain.proceed(request); } private Response addCookie(Chain chain) throws IOException { SharedPreferences sharedPreferences = context.getSharedPreferences("cookie", Context.MODE_PRIVATE); String cookie = sharedPreferences.getString("cookie", ""); Request request = chain.request(); Response response; if (!"".equals(cookie)) { Request compressedRequest = request.newBuilder() .header("Content-type","application/x-www-form-urlencoded; charset=UTF-8") .header("cookie", cookie.substring(0,cookie.length()-1)) .build(); response = chain.proceed(compressedRequest); }else{ response = chain.proceed(request); } return response; } private static String setBoundary() { StringBuilder sb = new StringBuilder(); Random random = new Random(); for (int i = 0; i < BOUNDARY_LENGTH; ++i) sb.append(BOUNDARY_ALPHABET.charAt(random.nextInt(BOUNDARY_ALPHABET.length()))); return sb.toString(); } public String getBoundary() { return mBoundry; } @Nullable private Response getBase64Response(Response response) throws IOException { if (response.body() != null) { String bodyString = response.body().string(); /**/ String pp = new String( Base64.decode(bodyString, Base64.DEFAULT)); return response.newBuilder() .body(ResponseBody.create(response.body().contentType(), pp)) .build(); } return null; } //requestBody /* private RequestBody encode(final RequestBody body) { return new RequestBody() { @Override public MediaType contentType() { Log.e("yan","type"+body.contentType()); return MediaType.parse("text/plain"); } @Override public void writeTo(BufferedSink sink) throws IOException { Buffer buffer = new Buffer(); body.writeTo(buffer); byte[] encoded = Base64.encode(buffer.readByteArray(), Base64.DEFAULT); sink.write(encoded); buffer.close(); sink.close(); } }; } public static String bodyToString(final RequestBody request){ try { final RequestBody copy = request; final Buffer buffer = new Buffer(); if(copy != null) copy.writeTo(buffer); else return ""; return buffer.readUtf8(); } catch (final IOException e) { return "did not work"; } }*/ } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/interceptor/HttpInterceptor.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
683
```java package com.example.yanjiang.stockchart.interceptor; import com.example.yanjiang.stockchart.application.App; import com.example.yanjiang.stockchart.event.ProgressUpdateEvent; import java.io.IOException; import okhttp3.MediaType; import okhttp3.ResponseBody; import okio.Buffer; import okio.BufferedSource; import okio.ForwardingSource; import okio.Okio; import okio.Source; public class ProgressResponseBody extends ResponseBody { private final ResponseBody responseBody; // private final ProgressListener progressListener; private BufferedSource bufferedSource; public ProgressResponseBody(ResponseBody responseBody) { this.responseBody = responseBody; // this.progressListener = progressListener; } @Override public MediaType contentType() { return responseBody.contentType(); } @Override public long contentLength() { return responseBody.contentLength(); } @Override public BufferedSource source() { if(bufferedSource == null) { bufferedSource = Okio.buffer(source(responseBody.source())); } return bufferedSource; } private Source source(Source source) { return new ForwardingSource(source) { long totalBytesRead = 0L; @Override public long read(Buffer sink, long byteCount) throws IOException { long bytesRead = super.read(sink, byteCount); // read() returns the number of bytes read, or -1 if this source is exhausted. totalBytesRead += bytesRead != -1 ? bytesRead : 0; App.getBus().post(new ProgressUpdateEvent(totalBytesRead, responseBody.contentLength(), bytesRead == -1)); // progressListener.update(totalBytesRead, responseBody.contentLength(), bytesRead == -1); return bytesRead; } }; } } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/interceptor/ProgressResponseBody.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
370
```java package com.example.yanjiang.stockchart.inject.component; import android.content.Context; import android.content.SharedPreferences; import com.example.yanjiang.stockchart.api.ClientApi; import com.example.yanjiang.stockchart.api.DownLoadApi; import com.example.yanjiang.stockchart.application.App; import com.example.yanjiang.stockchart.inject.modules.AppModule; import com.example.yanjiang.stockchart.inject.modules.ClientApiModule; import javax.inject.Singleton; import dagger.Component; @Singleton @Component(modules = {AppModule.class, ClientApiModule.class}) public interface AppComponent { Context context(); ClientApi clientApi(); DownLoadApi downLoadApi(); SharedPreferences sharedPreferences(); void inject(App application); } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/inject/component/AppComponent.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
144
```java package com.example.yanjiang.stockchart.inject.component; import android.app.Activity; import com.example.yanjiang.stockchart.BaseActivity; import com.example.yanjiang.stockchart.inject.modules.ActivityModule; import com.example.yanjiang.stockchart.inject.others.PerActivity; import dagger.Component; @PerActivity @Component(dependencies = AppComponent.class, modules = ActivityModule.class) public interface ActivityComponent { Activity getActivityContext(); void inject(BaseActivity mBaseActivity); } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/inject/component/ActivityComponent.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
96
```java package com.example.yanjiang.stockchart.inject.component; import android.app.Activity; import com.example.yanjiang.stockchart.inject.modules.FragmentModule; import com.example.yanjiang.stockchart.inject.others.PerFragment; import dagger.Component; @PerFragment @Component(modules = FragmentModule.class, dependencies = AppComponent.class) public interface FragmentComponent { Activity getActivity(); // void inject(BaseFragment mBaseFragment); } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/inject/component/FragmentComponent.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
86
```java package com.example.yanjiang.stockchart.inject.others; import java.lang.annotation.Retention; import javax.inject.Scope; import static java.lang.annotation.RetentionPolicy.RUNTIME; @Scope @Retention(RUNTIME) public @interface PerFragment { } ```
/content/code_sandbox/app/src/main/java/com/example/yanjiang/stockchart/inject/others/PerFragment.java
java
2016-05-18T03:51:41
2024-08-09T09:00:29
StockChart
AndroidJiang/StockChart
1,086
51